diff --git "a/2565.jsonl" "b/2565.jsonl" new file mode 100644--- /dev/null +++ "b/2565.jsonl" @@ -0,0 +1,671 @@ +{"seq_id":"654144262","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Dec 3 13:25:52 2017\n\n@author: Daniel\n\"\"\"\n\ndef chng_date(date, simple = False):\n months = {\"sty\": \"01\", \"lut\": \"02\", \"mar\": \"03\", \"kwi\": \"04\", \"maj\": \"05\", \n \"cze\": \"06\", \"lip\": \"07\", \"sie\": \"08\", \"wrz\": \"09\", \"paź\": \"10\",\n \"lis\": \"11\", \"gru\": \"12\"}\n \n if not simple:\n date = date.split()\n date[1] = date[1].replace(date[1], months[date[1]])\n date = [date[2], date[1], date[0]]\n date = '-'.join(date)\n return date\n else:\n return months[date]\n \ndef slice_dataframe(df, window):\n \"\"\"Returns a list of slices of a dataframe (rolling window of all columns)\"\"\"\n dfs = []\n i = window\n if window < len(df):\n while i<=len(df):\n start = i-window\n end = i\n # from start to end and all columns\n df_slice = df.iloc[start:end,:]\n dfs.append(df_slice)\n i+=1\n return dfs\n \n ","sub_path":"data/formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"561834215","text":"\"\"\"\nIndependent component analysis of resting-state fMRI\n=====================================================\n\nAn example applying ICA to resting-state data.\n\"\"\"\n\nimport numpy as np\n\n### Load nyu_rest dataset #####################################################\nfrom nisl import datasets\n# Here we use only 3 subjects to get faster-running code. For better\n# results, simply increase this number\ndataset = datasets.fetch_nyu_rest(n_subjects=3)\n\n### Preprocess ################################################################\n\n# Concatenate all the subjects\nfmri_data = np.concatenate(dataset.func, axis=3)\n\n# Apply a small amount of Gaussian smoothing: in the case of ICA it is\n# important as it introduces a spatial model that ICA lacks and greatly\n# reduces the high-frequency signal\nfrom scipy import ndimage\nfor image in fmri_data.T:\n # This works efficiently because image is a view on fmri_data\n image[...] = ndimage.gaussian_filter(image, 1.5)\n\n# Take the mean along axis 3: the direction of time\nmean_img = np.mean(fmri_data, axis=3)\n\n# Mask non brain areas\nfrom nisl import masking\nmask = masking.compute_mask(mean_img)\ndata_masked = fmri_data[mask]\n\n\n### Apply ICA #################################################################\n\nfrom sklearn.decomposition import FastICA\nn_components = 20\nica = FastICA(n_components=n_components, random_state=42)\ncomponents_masked = ica.fit(data_masked).transform(data_masked)\n\n# We normalize the estimated components, for thresholding to make sens\ncomponents_masked -= components_masked.mean(axis=0)\ncomponents_masked /= components_masked.std(axis=0)\n# Threshold\ncomponents_masked[np.abs(components_masked) < .5] = 0\n\n# Now we inverting the masking operation, to go back to a full 3D\n# representation\n(x, y, z) = mean_img.shape\ncomponents = np.zeros((x, y, z, n_components))\ncomponents[mask] = components_masked\n\n# Using a masked array is important to have transparency in the figures\ncomponents = np.ma.masked_equal(components, 0, copy=False)\n\n### Visualize the results #####################################################\n# Show some interesting components\nimport pylab as pl\npl.figure()\npl.axis('off')\nvmax = np.max(np.abs(components[:, :, 20, 16]))\npl.imshow(np.rot90(mean_img[:, :, 20]), interpolation='nearest',\n cmap=pl.cm.gray)\npl.imshow(np.rot90(components[:, :, 20, 16]), interpolation='nearest',\n cmap=pl.cm.jet, vmax=vmax, vmin=-vmax)\n\npl.figure()\npl.axis('off')\nvmax = np.max(np.abs(components[:, :, 25, 19]))\npl.imshow(np.rot90(mean_img[:, :, 25]), interpolation='nearest',\n cmap=pl.cm.gray)\npl.imshow(np.rot90(components[:, :, 25, 19]), interpolation='nearest',\n cmap=pl.cm.jet, vmax=vmax, vmin=-vmax)\npl.show()\n","sub_path":"_downloads/plot_ica_resting_state.py","file_name":"plot_ica_resting_state.py","file_ext":"py","file_size_in_byte":2721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"294541757","text":"\"\"'''\n82. 確率的勾配降下法による学習\n'''\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport torch\nfrom knock80_ import df2id\nfrom torch.utils.data import TensorDataset, DataLoader\nfrom torch.utils.tensorboard import SummaryWriter\nwriter = SummaryWriter()\n\nbase1 = '../chapter06/'\n\ntrain = pd.read_csv(base1 + 'train.txt', header=None, sep='\\t')\nvalid = pd.read_csv(base1 + 'valid.txt', header=None, sep='\\t')\ntest = pd.read_csv(base1 + 'test.txt', header=None, sep='\\t')\n\nvectorizer = CountVectorizer(min_df=2) # TFを計算。ただし、出現頻度が2回以上の単語だけを登録\ntrain_title = train.iloc[:, 0].str.lower()\ncnt = vectorizer.fit_transform(train_title).toarray() # title corpusを入力とし、TF array(スパース行列)を得る\n\nsm = cnt.sum(axis=0) # 列ごとに累加して、.get_feature_names()の単語ごとに、各docに出現頻度を数える\nidx = np.argsort(sm)[::-1] # 出現頻度の降順で、対応するindexを返す(.argsort返回数组值从小到大的对应索引值)\nwords = np.array(vectorizer.get_feature_names())[idx] # ['w1',...,'wn'][index] indexで単語を索引し返す。最も出現した単語が先頭に\n\nmax_len = 10\ndw = 300\ndh = 50\nn_vocab = len(words) + 2\nPAD = len(words) + 1\n\nclass RNN(torch.nn.Module):\n def __init__(self):\n super().__init__()\n self.emb = torch.nn.Embedding(n_vocab, dw, padding_idx=PAD)\n self.rnn = torch.nn.RNN(dw, dh, batch_first=True)\n self.linear = torch.nn.Linear(dh, 4)\n self.softmax = torch.nn.Softmax()\n\n def forward(self, x, h=None):\n x = self.emb(x)\n y, h = self.rnn(x, h)\n y = y[:, -1, :]\n y = self.linear(y)\n return y\n\n\ndef list2tensor(data, max_len):\n new = []\n for d in data: # data: [行=文書毎のID番号のリスト]\n if len(d) > max_len:\n d = d[:max_len]\n else:\n d += [PAD] * (max_len - len(d))\n new.append(d)\n\n return torch.tensor(new, dtype=torch.int64)\n\ndef accuracy(pred, label):\n pred = np.argmax(pred.data.numpy(), axis=1)\n label = label.data.numpy()\n return (pred == label).mean()\n\n\nX_train = df2id(train)\nX_valid = df2id(valid)\nX_test = df2id(test)\nX_train = list2tensor(X_train, max_len)\nX_valid = list2tensor(X_valid, max_len)\nX_test = list2tensor(X_test, max_len)\n\ny_train = np.loadtxt('y_train.txt')\ny_train = torch.tensor(y_train, dtype=torch.int64)\ny_valid = np.loadtxt('y_valid.txt')\ny_valid = torch.tensor(y_valid, dtype=torch.int64)\ny_test = np.loadtxt('y_test.txt')\ny_test = torch.tensor(y_test, dtype=torch.int64)\n\nmodel = RNN()\nds = TensorDataset(X_train, y_train)\n# Dataloaderを作成\nloader = DataLoader(ds, batch_size=1, shuffle=True)\nloss_func = torch.nn.CrossEntropyLoss()\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-3)\n\nif __name__ == '__main__':\n\n for epoch in range(10):\n for xx, yy in loader:\n y_pred = model(xx)\n loss = loss_func(y_pred, yy)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n with torch.no_grad():\n y_pred = model(X_train)\n loss = loss_func(y_pred, y_train)\n writer.add_scalar('Loss/train', loss, epoch)\n writer.add_scalar('Accuracy/train', accuracy(y_pred, y_train), epoch)\n print(f'epoch:{epoch+1}')\n print(f'Accuracy on train:{accuracy(y_pred, y_train)}')\n\n\n y_pred = model(X_valid)\n loss = loss_func(y_pred, y_valid)\n writer.add_scalar('Loss/valid', loss, epoch)\n writer.add_scalar('Accuracy/valid', accuracy(y_pred, y_valid), epoch)\n print(f'Accuracy on valid:{accuracy(y_pred, y_valid)}')\n\n\n\n'''\nlr = 1e-1\nepoch:10\nAccuracy on train:0.4335454885810558\nAccuracy on valid:0.42664670658682635\n\nlr = 1e-3\nepoch:10\nAccuracy on train:0.8346125046798951\nAccuracy on valid:0.7537425149700598\n\n'''\n\n\n\n","sub_path":"wei/chapter09/knock82_.py","file_name":"knock82_.py","file_ext":"py","file_size_in_byte":4048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587985477","text":"import shlex\nfrom subprocess import Popen, PIPE\n\n\n# def run_cmd(cmd):\n# \"\"\"Run a command and return a tuple with (stdout, stderr, exit_code)\"\"\"\n# process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,\n# stderr=subprocess.PIPE)\n# (stdout, stderr) = process.communicate()\n# return stdout, stderr, process.wait()\n\n\ndef cmd(command, **params):\n \"\"\"Execute the specified commands via subprocess.\"\"\"\n\n process = Popen(shlex.split(command),\n stdout=PIPE,\n stderr=PIPE)\n\n (stdout, stderr) = process.communicate()\n (so, se, f) = stdout, stderr, process.wait()\n if f != 0:\n print(\"AN ERROR OCCURRED??\")\n raise Exception(\"The command threw an error?\")\n\n output = []\n for line in so:\n ln = str(line, encoding=\"utf-8\").replace(\"\\n\", \"\")\n output.append(ln)\n\n for line in se:\n print(line)\n\n # error = None\n # for line in se:\n # ln = str(line, encoding=\"utf-8\").replace(\"\\n\", \"\")\n # if ln:\n # # print(\"ERR: \" + ln)\n # if error is None:\n # error = []\n # error.append(ln)\n #\n # error = None if not error else \"\\n\".join(error)\n # if error:\n # if not output or (len(error) > 50 or error.lower().find(\"error\") > -1):\n # raise Exception(\"Error executing command: \" + error)\n\n txt = \"\\n\".join(output)\n return txt\n\n# def cmd(command, **params):\n# \"\"\"Execute the specified commands via subprocess.\"\"\"\n#\n# p = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True)\n# output = []\n# for line in p.stdout:\n# ln = str(line, encoding=\"utf-8\").replace(\"\\n\", \"\")\n# output.append(ln)\n#\n# error = None\n# for line in p.stderr:\n# ln = str(line, encoding=\"utf-8\").replace(\"\\n\", \"\")\n# if ln:\n# # print(\"ERR: \" + ln)\n# if error is None:\n# error = []\n# error.append(ln)\n#\n# if \"on_stop\" in params:\n# params[\"on_stop\"]()\n#\n# error = None if not error else \"\\n\".join(error)\n# if error:\n# if not output or (len(error) > 50 or error.lower().find(\"error\") > -1):\n# raise Exception(\"Error executing command: \" + error)\n#\n# txt = \"\\n\".join(output)\n# return txt\n\n\ndef args(*params, **kwargs):\n \"\"\"\n Parse the sys.argv commands\n :return: An object representing the given commands.\n \"\"\"\n\n import sys\n from .structs import EasyDict\n from . import codec\n\n argv = None\n if kwargs.get(\"prune\"):\n if not params:\n argv = sys.argv\n\n tokens = params[0].split(\" \") if params else sys.argv[1:] # ignore the first param (the script file)\n params = {}\n args = []\n for x, token in enumerate(tokens):\n if token[0] == \"-\":\n if argv:\n argv[x + 1] = None\n\n token = token[2:] if token.startswith(\"--\") else token[1:]\n parts = token.split(\"=\")\n key = parts[0]\n val = None if len(parts) == 1 else \"=\".join(parts[1:])\n if val:\n if val.isdigit():\n val = int(val)\n elif val.lower() in [\"true\", \"false\", \"yes\", \"no\"]:\n val = True if val.lower() in [\"true\", \"yes\"] else False\n\n params[key] = val\n tokens[x] = None\n continue\n elif not params:\n args.append(token)\n\n o = {}\n if args:\n for arg in args:\n o[arg] = True\n\n if params:\n for k, v in params.items():\n if isinstance(v, str):\n if codec.is_base64(v):\n try:\n v = codec.unbase64(v)\n except:\n pass\n if codec.is_json(v):\n try:\n v = codec.unjson(v)\n except:\n pass\n o[k] = v\n\n if argv:\n argv = [a for a in argv if a is not None]\n sys.argv = tuple(argv)\n\n return EasyDict(o)\n\n\n","sub_path":"fuze/util/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":4115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"556990985","text":"import hashlib\n\n\ndef make_sign(json_data, key):\n param_list = []\n josn_to_string = ''\n json_data['Key'] = key\n a = sorted(json_data.items(), key=lambda x: x[0], reverse=False)\n\n for i in a:\n if i[0] != 'SignValue' and i[1]:\n if (isinstance(i[1], str) or isinstance(i[1], int)):\n param_list.append(i[0] + \"=\" + str(i[1]))\n else:\n param_list.append(i[0])\n\n for i in range(len(param_list)):\n if i > 0:\n josn_to_string += \"&\"\n josn_to_string += param_list[i]\n json_data.pop('Key')\n # print(josn_to_string)\n hash = hashlib.sha256()\n hash.update(josn_to_string.encode('utf-8'))\n return hash.hexdigest()\n\n\ndef verify(json_data, key):\n sign = (json_data['SignValue'])\n if sign == '' and sign == None:\n return False\n return sign == make_sign(json_data, key)\n\n\nif __name__ == '__main__':\n from config import SIGN_KEY\n json_data = {\"UserMobile\":\"13776242953\",\"UserPwd\":\"123456\",\"VeriCode\":\"354027\"}\n json_data[\"SignValue\"] = make_sign(json_data, SIGN_KEY)\n print(json_data)\n print(verify(json_data,SIGN_KEY))","sub_path":"web/utils/SignVerifyUtil.py","file_name":"SignVerifyUtil.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490519484","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/Products/HaufeWingDBG/ZopeLogFile.py\n# Compiled at: 2012-12-06 03:42:07\n\"\"\"File-like object which writes to Zope eventlog facility\n\nAll written information is collected until a newline is written after which the\ncollected line will be sent to the logging module.\n\n\"\"\"\nimport logging\n\nclass ZopeLogFile:\n \"\"\"File-like object that sends all data written to it to the Zope eventlog\n (which Zope will have handlers configured for).\n \n Data is collected and logged as summary lines whenever a newline is \n encountered. On creation, set the subsystem under which the lines should be \n logged. By default the INFO severity is used, unless a different severity\n is specified on creation of the class.\n \n \"\"\"\n severity = logging.INFO\n subsystem = ''\n _data = ''\n\n def __init__(self, subsystem, severity=logging.INFO):\n self.subsystem = subsystem\n self.severity = severity\n\n def write(self, text):\n self._data += text\n while '\\n' in self._data:\n line, self._data = self._data.split('\\n', 1)\n logger = logging.getLogger(self.subsystem)\n logger.log(self.severity, line)","sub_path":"pycfiles/Products.HaufeWingDBG-4.1.9-py2.7/ZopeLogFile.py","file_name":"ZopeLogFile.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622877445","text":"from pygame import *\nfrom Var import *\nimport random\n\n\nclass selfTank(object):\n def __init__(self, direction, point):\n self.direction = direction\n self.point = point\n self.image = pygame.image.load(r\"image\\tank_T1_0.png\").subsurface(pygame.Rect(0, 0, 48, 48)).copy()\n self.mo = ['静止']\n self.moveFlag = True\n self.paolist = []\n self.paopoint = []\n self.paodir = '上'\n self.paoimage = pygame.image.load(r\"image\\bullet_left.png\")\n self.paocoll = []\n self.paocollimage = pygame.image.load(r\"image\\boom_dynamic.png\").subsurface(pygame.Rect(384, 0, 96, 96)).copy()\n\n def move(self):\n if self.mo[-1] == '左':\n if self.point[0] != 24:\n self.image = pygame.image.load(r\"image\\tank_T1_0.png\").subsurface(pygame.Rect(0, 96, 48, 48)).copy()\n self.paodir = '左'\n self.point[0] += -1\n if self.mo[-1] == '右':\n if self.point[0] != SCREEN_WIDTH - 24:\n self.image = pygame.image.load(r\"image\\tank_T1_0.png\").subsurface(pygame.Rect(0, 144, 48, 48)).copy()\n self.paodir = '右'\n self.point[0] += 1\n if self.mo[-1] == '下':\n if self.point[1] != SCREEN_HEIGHT - 24:\n self.image = pygame.image.load(r\"image\\tank_T1_0.png\").subsurface(pygame.Rect(0, 48, 48, 48)).copy()\n self.paodir = '下'\n self.point[1] += 1\n if self.mo[-1] == '上':\n if self.point[1] != 24:\n self.image = pygame.image.load(r\"image\\tank_T1_0.png\").subsurface(pygame.Rect(0, 0, 48, 48)).copy()\n self.paodir = '上'\n self.point[1] += -1\n screen.blit(self.image, (self.point[0] - 24, self.point[1] - 24))\n\n def shoot(self):\n # 遍历显示炮弹\n for i in self.paolist:\n if i[1] == '左':\n i[0][0] -= 2\n self.paoimage = pygame.image.load(r\"image\\bullet_left.png\")\n\n elif i[1] == '右':\n i[0][0] += 2\n self.paoimage = pygame.image.load(r\"image\\bullet_right.png\")\n\n elif i[1] == '下':\n i[0][1] += 2\n self.paoimage = pygame.image.load(r\"image\\bullet_down.png\")\n\n elif i[1] == '上':\n i[0][1] -= 2\n self.paoimage = pygame.image.load(r\"image\\bullet_up.png\")\n\n screen.blit(self.paoimage, i[0])\n\n # 撞边界的炮弹删除之\n for i in self.paolist:\n if 0 > i[0][0] or 0 > i[0][1] or SCREEN_WIDTH < i[0][0] or SCREEN_HEIGHT < i[0][1]:\n self.paolist.remove(i)\n self.paocoll.append([50, [i[0][0] - 48, i[0][1] - 48]])\n break\n # 撞边界的炮弹显示之\n\n def boom(self):\n for i in self.paocoll:\n if i[0] > 0:\n screen.blit(self.paocollimage, i[1])\n i[0] -= 1\n else:\n self.paocoll.remove(i)\n break\n\n def setBullet(self):\n self.paopoint = [self.point[0] - 6, self.point[1] - 6]\n self.paolist.append([self.paopoint, self.paodir])\n\n\nclass enemyTank(object):\n def __init__(self, direction, point):\n self.direction = direction\n self.point = point\n self.image = pygame.image.load(r\"image\\enemy_3_3.png\").subsurface(pygame.Rect(0, 96, 48, 48)).copy()\n self.desflag = False\n self.destination = [random.randint(0, SCREEN_WIDTH - 48), random.randint(0, SCREEN_HEIGHT - 48)]\n\n def move(self, direction, speed):\n self.direction = direction\n self.speed = speed\n if self.direction == 1:\n # 左\n self.image = pygame.image.load(r\"image\\enemy_3_3.png\").subsurface(pygame.Rect(0, 96, 48, 48)).copy()\n self.point[0] -= self.speed\n elif self.direction == 2:\n # 右\n self.image = pygame.image.load(r\"image\\enemy_3_3.png\").subsurface(pygame.Rect(0, 144, 48, 48)).copy()\n self.point[0] += self.speed\n elif self.direction == 3:\n # 下\n self.image = pygame.image.load(r\"image\\enemy_3_3.png\").subsurface(pygame.Rect(0, 48, 48, 48)).copy()\n self.point[1] += self.speed\n elif self.direction == 4:\n # 上\n self.image = pygame.image.load(r\"image\\enemy_3_3.png\").subsurface(pygame.Rect(0, 0, 48, 48)).copy()\n self.point[1] -= self.speed\n\n def randomDestination(self):\n\n vvx = self.destination[0] - self.point[0]\n vvy = self.destination[1] - self.point[1]\n if vvx > 0:\n self.move(2, 1)\n elif vvx < 0:\n self.move(1, 1)\n elif vvy > 0:\n self.move(3, 1)\n elif vvy < 0:\n self.move(4, 1)\n else:\n self.destination = [random.randint(0, SCREEN_WIDTH - 48), random.randint(0, SCREEN_HEIGHT - 48)]\n\n\nclass wall(object):\n def __init__(self, location, species):\n self.location = location\n self.image = pygame.image.load(r\"image\\brick.png\")\n\n def creatwall(self):\n for i in range(12):\n for j in range(12):\n if self.location[j][i] == 1:\n print(f'{j}..{i}')\n screen.blit(self.image, [i * 48, j * 48])\n screen.blit(self.image, [i * 48+24, j * 48+24])\n screen.blit(self.image, [i * 48+24, j * 48])\n screen.blit(self.image, [i * 48, j * 48+24])\n","sub_path":"elementClass.py","file_name":"elementClass.py","file_ext":"py","file_size_in_byte":5531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422052688","text":"# _*_ coding: utf-8 _*_\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sb\nfrom scipy.io import loadmat\nfrom sklearn import svm\n\n\ndef load_mat(path):\n data = loadmat(path)\n X = data['X']\n y = data['y']\n return X, y\n\ndef plotData(X, y):\n plt.figure(figsize=(8, 5))\n plt.scatter(X[:, 0], X[:, 1], c=y.flatten(), cmap='rainbow')\n plt.xlabel('x1')\n plt.ylabel('x2')\n plt.legend()\n\ndef plotBoundary(clf, X):\n x_min, x_max = X[:, 0].min() * 1.2, X[:, 0].max() * 1.1\n y_min, y_max = X[:, 1].min() * 1.1, X[:, 1].max() * 1.1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 500),\n np.linspace(y_min, y_max, 500))\n Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n plt.contour(xx, yy, Z)\n\n\"\"\"\n1' get data\n2' init 'sigma, gamma' choose the best fit\n3' plot the boundary\n\"\"\"\ndef example1():\n X, y = load_mat('ex6data1.mat')\n models = [svm.SVC(C, kernel='linear') for C in [1, 100]]\n clfs = [model.fit(X, y.ravel()) for model in models]\n title = ['SVM Decision Boundary with C = {} (Example Dataset 1'.format(C) for C in [1, 100]]\n for model, title in zip(clfs, title):\n plt.figure(figsize=(8, 5))\n plotData(X, y)\n plotBoundary(model, X)\n plt.title(title)\n plt.show()\n\ndef example2():\n X, y = load_mat('ex6data2.mat')\n sigma = 0.1\n gamma = np.power(sigma, -2.) / 2\n clf = svm.SVC(C=1, kernel='rbf', gamma=gamma)\n model = clf.fit(X, y.flatten())\n plotData(X, y)\n plotBoundary(model, X)\n\n\ndef example3():\n mat3 = loadmat('ex6data3.mat')\n X, y = mat3['X'], mat3['y']\n Xval, yval = mat3['Xval'], mat3['yval']\n Cvalues = (0.01, 0.03, 0.1, 0.3, 1., 3., 10., 30.)\n sigmas = Cvalues\n best_score, best_pair = 0, (0, 0)\n for C in Cvalues:\n for sigma in sigmas:\n gamma = np.power(sigma, -2.) / 2\n model = svm.SVC(C=C, kernel='rbf', gamma=gamma)\n model.fit(X, y.flatten())\n this_score = model.score(Xval, yval)\n if this_score > best_score:\n best_score = this_score\n best_pair = (C, sigma)\n\n model = svm.SVC(C = best_pair[0], kernel='rbf', gamma=np.power(best_pair[1], -2) / 2)\n model.fit(X, y)\n plotData(X, y)\n plotBoundary(model, X)\n\n\nif __name__ == '__main__':\n example3()\n plt.show()\n","sub_path":"code/andrew_NG/SVM/SVM.py","file_name":"SVM.py","file_ext":"py","file_size_in_byte":2393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391568375","text":"import numpy as np\r\nimport cv2\r\n\r\ndef click_event(event, x, y):\r\n if event == cv2.EVENT_LBUTTONDOWN:\r\n b = img[x,y,0]\r\n g = img[x,y,1]\r\n r = img[x,y,2]\r\n\r\n cv2.circle(img,(x,y),3,(0,255,0),-1)\r\n myimg = np.zeros((512,512,3),np.uint8)\r\n\r\n myimg[:] = [b,g,r]\r\n cv2.imshow('color',myimg)\r\n\r\nimg = cv2.imread('lena.jpg',1)\r\ncv2.imshow('image',img)\r\n\r\ncv2.setMouseCallback('image',click_event)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"mouse-event3.py","file_name":"mouse-event3.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36130066","text":"from setuptools import setup, find_packages\n\nNAME = \"valuate\"\nPACKAGES = [NAME] + [\"%s.%s\" % (NAME, i) for i in find_packages(NAME)]\n\nsetup(\n name=NAME,\n version='5.1.7',\n author='DJ Leo',\n author_email='m18349125880@gmail.com',\n description='Used car valuation api.',\n packages=PACKAGES,\n package_data={\n '': ['*.csv'],\n },\n install_requires=[\n 'numpy==1.15.4',\n 'pandas==0.23.4',\n 'setuptools==40.6.2',\n 'SQLAlchemy==1.2.14',\n 'mysql-connector-python==8.0.5',\n 'PyMySQL==0.9.2',\n ],\n # include_package_data=True\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608825082","text":"import discord\nfrom discord.ext import commands\nimport datetime\n\nclass AntiPermissions(commands.Cog):\n def __init__(self, client, db, webhook):\n self.client = client\n self.db = db\n self.webhook = webhook\n\n @commands.Cog.listener()\n async def on_guild_role_create(self, role):\n whitelistedUsers = self.db.find_one({ \"guild_id\": role.guild.id })[\"users\"]\n async for i in role.guild.audit_logs(limit=1, after=datetime.datetime.now() - datetime.timedelta(minutes = 2), action=discord.AuditLogAction.role_create):\n if i.user.bot:\n return\n \n if i.user.id in whitelistedUsers or i.user in whitelistedUsers:\n return\n\n await role.guild.ban(i.user)\n return\n\n @commands.Cog.listener()\n async def on_guild_role_delete(self, role):\n whitelistedUsers = self.db.find_one({ \"guild_id\": role.guild.id })[\"users\"]\n async for i in role.guild.audit_logs(limit=1, after=datetime.datetime.now() - datetime.timedelta(minutes = 2), action=discord.AuditLogAction.role_delete):\n if i.user.bot:\n return\n\n if i.user.id in whitelistedUsers or i.user in whitelistedUsers:\n return\n\n await role.guild.ban(i.users)\n return\n\n @commands.Cog.listener()\n async def on_guild_role_update(self, before, after):\n whitelistedUsers = self.db.find_one({ \"guild_id\": after.guild.id })[\"users\"]\n async for i in after.guild.audit_logs(limit=1, after=datetime.datetime.now() - datetime.timedelta(minutes = 2), action=discord.AuditLogAction.role_update):\n if i.user.id in whitelistedUsers or i.user in whitelistedUsers:\n return\n\n if not before.permissions.ban_members and after.permissions.ban_members:\n await after.guild.ban(i.user)\n return\n\n if not before.permissions.kick_members and after.permissions.kick_members:\n await after.guild.ban(i.user)\n return\n\n if not before.permissions.administrator and after.permissions.administrator:\n await after.guild.ban(i.user)\n return\n\n if i.target.id == before.guild.id:\n if after.permissions.kick_members or after.permissions.ban_members or after.permissions.administrator or after.permissions.mention_everyone or after.permissions.manage_roles:\n await after.guild.ban(i.user)\n await after.edit(permissions=1166401)\n \n return","sub_path":"cogs/AntiPermissions.py","file_name":"AntiPermissions.py","file_ext":"py","file_size_in_byte":2605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362120865","text":"from flask import Flask, render_template\r\nfrom datetime import datetime\r\nimport requests\r\n\r\ndate = datetime.now()\r\n\r\napp = Flask(__name__)\r\n\r\n@app.route('/')\r\ndef home():\r\n year = date.year\r\n return render_template('index.html', year=year)\r\n\r\n@app.route('/guess/')\r\ndef guess(name):\r\n response = requests.get(f\"https://api.agify.io?name={name}\")\r\n age = response.json()\r\n age = age[\"age\"]\r\n result = requests.get(f\"https://api.genderize.io?name={name}\")\r\n gender = result.json()\r\n gender = gender[\"gender\"]\r\n return render_template('predict.html',name=name, gender=gender,age=age)\r\n\r\n@app.route('/blog')\r\ndef get_blog():\r\n response = requests.get(\"https://api.npoint.io/e5aa94ce16fc572668e3\")\r\n all_posts = response.json()\r\n return render_template('blog.html',posts=all_posts)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)","sub_path":"Day 57 Templating with Jinja/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472815148","text":"import math\r\nimport pygame\r\nimport figuras\r\nimport os\r\nimport bot\r\nimport random\r\n'''\r\ntest 2, el objetivo es probar el algoritmo de gradiente, para definir la informacion de proximidad de los bots\r\n'''\r\n\r\n#ajusta la aparicion de la ventana en las coordenadas x,y\r\nx = 200\r\ny = 30\r\nos.environ['SDL_VIDEO_WINDOW_POS'] = \"%d,%d\" % (x,y)\r\n\r\nLx=1000\r\nLy=1000\r\npygame.init()\r\ndimensiones = [Lx, Ly]\r\npantalla =pygame.display.set_mode(dimensiones)\r\npygame.display.set_caption('pruebas para la formacion de figuras')\r\nNEGRO= (0,0,0)\r\nBLANCO=(255,255,255)\r\n\r\nfont = pygame.font.SysFont(\"arial\", 20)\r\n\r\nL=600\r\nofx=200\r\nofy=400\r\nf1=figuras.circulo(pantalla, (100,100), 800, 800)\r\nf2=figuras.estrella5(pantalla, (ofx,ofy), L,L, 1)\r\nt1=figuras.Ctest(pantalla,(500,500), 50)\r\n\r\n\r\nB=[]\r\nl=20\r\nr=50\r\nn2=20\r\nnx=int(math.sqrt(n2))\r\nny=int(n2/nx)\r\n\r\n'''\r\n#arreglo ordenado de bots en reticula cuadrada\r\nfor i in range(0, ny):\r\n for j in range(0, nx):\r\n x=2*r*i+200\r\n y=2*r*j+200\r\n vel=random.uniform(0,10)\r\n alpha=random.uniform(0,2*math.pi)\r\n b1=bot.coin(pantalla, (x,y), r, vel, alpha, False, '1')\r\n B.append(b1)\r\n \r\n'''\r\n#arreglo ordenado de bots en reticula hexagonal\r\ndx=2*r\r\ndy=int(2*r*math.sqrt(3)/2)\r\nfor i in range(0,ny):\r\n for j in range(0,nx):\r\n if i%2==0:\r\n coordenadas=(ofx+int(dx/2)+dx*j-r+L/2 , ofy+int(dy/2)-dy*i)\r\n vel=random.uniform(0,10)\r\n alpha=random.uniform(0,2*math.pi)\r\n b1=bot.coin(pantalla, (coordenadas[0],coordenadas[1]), r, vel, alpha, False, '*', 'detenido',0)\r\n B.append(b1)\r\n else:\r\n #coordenadas=(200+int(dx)+dx*j , 200+int(dy/2)+dy*i)#coloca los bots de la segunda liena a la derecha\r\n coordenadas=(ofx+dx*j-r+L/2 , ofy+int(dy/2)-dy*i)#coloca los bots de la segunda linea a la izquierda\r\n vel=random.uniform(0,10)\r\n alpha=random.uniform(0,2*math.pi)\r\n b1=bot.coin(pantalla, (coordenadas[0],coordenadas[1]), r, vel, alpha, '123', '*', 'detenido',0)\r\n B.append(b1)\r\n \r\n\r\n'''\r\n#arreglo aleatorio de bots\r\nfor i in range(0, nx):\r\n for j in range(0, ny):\r\n x=L*i+random.uniform(-l,l)\r\n y=L*j+random.uniform(-l,l)\r\n vel=random.uniform(0,10)\r\n alpha=random.uniform(0,2*math.pi)\r\n b1=bot.coin(pantalla, (x,y), r, vel, alpha, False, '1')\r\n B.append(b1)\r\n\r\n\r\n'''\r\nB[0].modseed(True)\r\n#B[1].modseed(True)\r\nB[nx].modseed(True)\r\nB[nx+1].modseed(True)\r\nB[2*nx].modseed(True)\r\n\r\nB[0].modgrad(0)\r\n#B[1].modgrad(0)\r\nB[nx].modgrad(1)\r\nB[nx+1].modgrad(1)\r\nB[2*nx].modgrad(2)\r\n\r\n\r\n'''\r\nB[5].modseed(True)\r\nB[8].modseed(True)\r\nB[9].modseed(True)\r\nB[13].modseed(True)\r\n#B[2*nx+1].modseed(True)\r\n\r\nB[5].modgrad(0)\r\nB[9].modgrad(1)\r\nB[10].modgrad(1)\r\nB[13].modgrad(2)\r\n#B[2*nx+1].modgrad(2)\r\n\r\n'''\r\n\r\n\r\n\r\n\r\n#definir vecinos\r\n#codigo para obtener los centros de los bots \r\nA=[]\r\nfor i in range(0,len(B)):\r\n cen=B[i].getdatpos()\r\n A.append(cen[0])\r\n \r\n#bloque que indica los vecinos, en otras palabras cuales bots interactuan entre si\r\n'''\r\npara el algoritmo de gradiente se ocupa un radio de interaccion menor\r\neste radio tiene un valor de 3*r\r\n'''\r\nvecinos=[]\r\nfor i in range(0,len(B)):\r\n x1=A[i][0]\r\n y1=A[i][1]\r\n j=0\r\n for j in range(0,len(B)):\r\n x2=A[j][0]\r\n y2=A[j][1]\r\n distancia=math.sqrt((x1-x2)**2+(y1-y2)**2)\r\n if distancia < 3*r:\r\n if i==j:\r\n pass\r\n #elif j>i-1:\r\n # pass\r\n else:\r\n nn=[i,j, distancia]\r\n vecinos.append(nn) \r\n\r\n#ordena la lista vecinos para poder manejar los datos con mayor facilidad \r\nlvecinos=[]\r\nlvecinosD=[]\r\nfor i in range(0,len(B)):\r\n b=[]\r\n bD=[]\r\n for j in range(0,len(vecinos)):\r\n if vecinos[j][0]==i:\r\n a=vecinos[j][1]\r\n aD=vecinos[j][2]\r\n b.append(a)\r\n bD.append(aD)\r\n else:\r\n pass\r\n c=[i,b]\r\n lvecinos.append(c)\r\n cD=[i,bD]\r\n lvecinosD.append(cD)\r\n b=[]\r\n c=[]\r\n \r\n\r\n\r\n \r\n\r\nprint(vecinos)\r\nprint(len(vecinos))\r\nprint(lvecinos)\r\nprint(len(lvecinos))\r\nprint(lvecinosD)\r\nprint(len(lvecinosD))\r\n\r\n#codigo para dar un valor adecuado al gradiente\r\nfor i in range(0, len(B)):\r\n B[i].compseed()\r\n B[i].compgrad(len(B))\r\n if B[i].getgrad()<2:\r\n pass\r\nwhile True: \r\n comp=[]\r\n for i in range(0, len(B)):\r\n H=[]\r\n \r\n for j in range(0, len(lvecinos[i][1])):\r\n a=B[lvecinos[i][1][j]].getgrad()\r\n H.append(a)\r\n b=min(H)\r\n if B[i].getgrad()<2:\r\n pass\r\n else:\r\n B[i].modgrad(b+1)\r\n \r\n a=B[i].getgrad()\r\n comp.append(a)\r\n c=max(comp)\r\n print(c, ' este es el valor maximo del gradiente')\r\n if c 0: # thi condition works if there is something to remove in the list\n killRobot = input('\\nWhat do you like to kill?\\n') # it is an input to add some item\n\n if killRobot == '':\n print('You have not choose who will die today!')\n print('You will be returned to the options')\n\n else:\n Robot.population.remove(killRobot) # it removes the item you choose\n print('You have', len(Robot.population), 'robot(s).')\n print('Your robotsm(s) are:', end=' ')\n for robots in Robot.population:\n print(robots, end=' ')\n else:\n print('You have no one to kill!')\n\n def say_hi(self):\n \"\"\"Greeting by the robot.\n Yeah, they can do that.\"\"\"\n\n print(\"\\nGreetings, my masters call me {}.\".format(self.name))\n\n @classmethod\n def how_many(cls):\n \"\"\"Prints the current population.\"\"\"\n # It just shows the list if exist something in the list\n if len(Robot.population) > 0:\n print('Here are all them')\n print('You have', len(Robot.population), 'robot(s).') # this line shows how many items you have\n print('These robot(s) are:', end=' ') # it shows all the products or items in your list\n for robots in Robot.population:\n print(robots, end=' ')\n else:\n print('You have no one!')\n\nrunning = True\n\nwhile running:\n # it is a menu with options\n print('\\n1 - Initializes a new robot')\n print('2 - Ask to robot commits a suicide')\n print('3 - See how many robots you have')\n print('0 - Get Out')\n command=int(input('What I do?')) # the option's input\n\n # first option\n if command == 1:\n robotName = input(\"\\nWhich is the robot's name?\\n\")\n\n newDroid = Robot(robotName)\n newDroid.say_hi()\n\n # second option\n elif command == 2:\n newDroid.die()\n\n\n # third option\n elif command == 3:\n Robot.how_many()\n\n # nothing else for to do\n else:\n print('\\nClosing your terminal...')\n running = False\nelse:\n print(\"\\nDon't forget to dominate thw world.\")\n # The loops is closed\n","sub_path":"oop_objvar_versions.py","file_name":"oop_objvar_versions.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138049864","text":"import configparser\nfrom request import *\nfrom screen import ScreenID\nfrom screen import Screen\nfrom characters.character import Character\nfrom mapObjects.item import Item\nfrom actions.event import Event\n\nconfig = configparser.ConfigParser()\nconfig.read('config.ini')\nwidth = int(config['window']['width'])\nheight = int(config['window']['height'])\n\n\ndef login(name):\n player = readPlayer(name)\n if player == None:\n player = createPlayer(name)\n return player\n\n\ndef levelUP(player, space, screenID):\n player.levelUP(space)\n playerInfo = updateCharacter(player, screenID.value)\n return playerInfo\n\n\ndef characterTransferScreen(character, screenID):\n characterInfo = updateCharacter(character, screenID.value)\n\n\ndef getScreen(screenID):\n screen = readScreen(screenID.value)\n return Screen(\n screen.name,\n screen.backgroundName,\n normalizeCharacters(screen.characters),\n normalizeMapObjects(screen.mapObjects))\n\n\ndef getCharacters(screenID):\n # get all characters information that include name, role, imageName, level\n characterInfos = readCharacters()\n characters = normalizeCharacters(characterInfos)\n\n # Climu not in meeting room\n if screenID == ScreenID.MEETING_ROOM:\n del characters[2]\n\n # set character position that be got from config file\n for character in characters:\n character.x = float(config['screen' + screenID.value]\n [character.name.lower() + '_xRatio']) * width\n character.y = float(config['screen' + screenID.value]\n [character.name.lower() + '_yRatio']) * height\n\n return characters\n\n\ndef normalizeCharacters(characterInfos):\n characters = []\n\n for characterInfo in characterInfos:\n character = Character(characterInfo)\n normalizeEvents(character.events, characterInfo.events)\n characters.append(character)\n\n return characters\n\n\ndef normalizeEvents(characterEvents, eventInfos):\n for eventInfo in eventInfos:\n characterEvents.append(Event(eventInfo))\n\n\ndef normalizeMapObjects(mapObjectInfos):\n mapObjects = []\n\n for mapObjectInfo in mapObjectInfos:\n mapObjects.append(Item(mapObjectInfo))\n\n return mapObjects\n","sub_path":"handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":2241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525677079","text":"import arcpy \r\nimport os \r\n\r\n#set workspace environment \r\nworkspace = arcpy.GetParameterAsText(0) \r\narcpy.env.overwriteOutput = True\r\nignore = ['logs']\r\n#iterate through files in the root directory\r\nfor root, dirs, files in os.walk(workspace):\r\n\t\t#ignore directories listed in the \"ignore\" argument\r\n\t\tfor idir in ignore:\r\n\t\t\tif idir in dirs:\r\n\t\t\t\tdirs.remove(idir)\r\n\t\tfor f in files:\r\n\t\t\t#select txt files to convert\r\n\t\t\tif f.endswith(\".txt\"):\r\n\t\t\t\t#create an object with the path and filename\r\n\t\t\t\ttxt = os.path.join(root, f)\r\n\t\t\t\t#create a pair that consists of the entire path and the \".txt\"\r\n\t\t\t\tname, ext = os.path.splitext(txt)\r\n\t\t\t\t#create a pair that consists of the head (leading up to first backslash) and tail(everything after)\r\n\t\t\t\t#so that you can truncate the first 13 characters of the filename\r\n\t\t\t\toutras, tail = os.path.split(name)\r\n\t\t\t\t#invoke the conversion tool\r\n\t\t\t\tarcpy.ASCIIToRaster_conversion(txt, os.path.join(outras, tail[12:]), \"FLOAT\")","sub_path":"python scripts/ArcPy/BatchAsciiToRasterttxt.py","file_name":"BatchAsciiToRasterttxt.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562269277","text":"import numpy as np\n\n\nclass KernelPerceptron(object):\n def __init__(self, n=10, kernel=None, sigma=1.0, theta=0.0, p=1.0, eta=1.0, c1=0.0, c2=1.0):\n self.n = n # iterations\n self.kernel = kernel # the kernel we use. By default, we just take a dot product.\n self.sigma = sigma # sigma for RBF kernel\n self.theta = theta # theta for polynomial and sigmoid kernels\n self.p = p # power for polynomial kernel\n self.eta = eta # eta for sigmoid kernel\n self.c1 = c1 # addition c for generalized kernel function\n self.c2 = c2 # multiplication c for generalized kernel function\n\n def fit(self, x, y):\n self.a_ = np.zeros(x.shape[0])\n self.errors_ = []\n self.x_ = x\n self.y_ = y\n\n for iteration in range(self.n):\n errors = 0\n for j in range(self.a_.size):\n if self.predict(self.x_.iloc[j, :]) != y[j]: # we only update when we make mistakes\n self.a_[j] += 1 # our whole update rule is just keeping track of errors!\n errors += 1\n self.errors_.append(errors)\n if errors == 0:\n print('Converged after %.i iterations.' % iteration)\n break # if we don't update in an iteration, it saves time to just stop, and let the user know.\n return self\n\n def predict(self, xj):\n total = 0 # our predict function can be written sign(sum(a_i*y_i*K(x_i,x_j)))\n for i in range(self.a_.size):\n total += self.a_[i]*self.y_[i]*self.kernel_function(self.x_.iloc[i, :], xj)\n return np.where(total >= 0.0, 1, -1) # np.where conveniently gives a value for 0.\n\n def kernel_function(self, xi, xj):\n if self.kernel is None: # if we're not using a kernel, we just use a dot product.\n kern = np.dot(xi, xj)\n elif self.kernel == 'rbf' or self.kernel == 'Gaussian':\n num = np.linalg.norm(xi-xj)**2\n den = 2*self.sigma**2\n frac = num / den\n kern = np.exp(-1 * frac) # this is the definition of the RBF kernel.\n elif self.kernel == 'polynomial':\n kern = (np.dot(xi, xj) + self.theta)**self.p # this is the definition of the polynomial kernel.\n else:\n raise ValueError('Unrecognized kernel \"' + self.kernel + '\"') # mainly for typos and whatnot\n return self.c1 + self.c2 * kern # user-specified c1 and c2 allow us to generalize the kernel function\n\n def score(self, x, y):\n correct = 0\n total = x.shape[0]\n for j in range(total):\n if self.predict(x.iloc[j, :]) == y[j]:\n correct += 1\n return correct / total # self-explanatory: returns a % of correct answers\n","sub_path":"Kernel_Perceptron.py","file_name":"Kernel_Perceptron.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400428733","text":"import imgaug as ia\nfrom imgaug import augmenters as iaa\nimport numpy as np\n\ndef get_transforms():\n sometimes = lambda aug: iaa.Sometimes(0.2,aug)\n\n seq1 = iaa.Sequential(\n [\n iaa.Fliplr(0.5),\n iaa.Flipud(0.5),\n sometimes(iaa.Affine(\n scale={\"x\":(0.8,1.2),\"y\":(0.8,1.2)},\n translate_percent={\"x\":(-0.2,0.2),\"y\":(-0.2,0.2)},\n rotate=(-30,30),\n shear=(-10,10),\n mode='constant',\n cval=(0,255),\n )),\n sometimes(iaa.PiecewiseAffine(scale=(0.01,0.05),\n nb_cols=8,\n nb_rows=8,\n mode='constant',\n cval=(0,255),)),\n ],\n )\n\n seq2 = iaa.Sequential(\n [\n iaa.SomeOf((0,1),[\n sometimes(iaa.MultiplyElementwise((0.8, 1.2))),\n sometimes(iaa.AddElementwise((-20,20))),\n sometimes(iaa.ContrastNormalization((0.8, 1.2))),\n ]),\n\n iaa.SomeOf((0,1),[\n iaa.OneOf([\n iaa.GaussianBlur((0,2.0)),\n iaa.AverageBlur(k=2),\n iaa.MedianBlur(k=3),\n ]),\n iaa.AdditiveGaussianNoise(0,10),\n iaa.SaltAndPepper(0.01),\n iaa.ReplaceElementwise(0.05,(0,255))\n ]),\n ],\n )\n\n return seq1,seq2\n","sub_path":"trans.py","file_name":"trans.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"649898334","text":"# Copyright (c) 2009-2011 Assembly Organizing\n# See also LICENSE.txt\n\nimport asm.cms\nimport asm.cms.interfaces\nimport asm.cmsui.base\nimport asm.cmsui.interfaces\nimport asm.translation.interfaces\nimport grok\nimport zc.sourcefactory.basic\nimport zope.component\nimport zope.i18n.interfaces\nimport zope.interface\nimport zope.publisher.interfaces.browser\nimport zope.schema\n\nLANGUAGE_LABELS = {'en': 'English',\n 'de': 'German',\n '': 'independent',\n 'fi': 'Finnish'}\n\ndefault_languages = ['en', 'fi']\n\n\ndef tag2lang(tag):\n return tag.replace('lang:', '')\n\n\ndef lang2tag(lang):\n return 'lang:%s' % lang\n\n\ndef fallback():\n return current()[0]\n\n\ndef current():\n return zope.component.getUtility(\n asm.translation.interfaces.ILanguageProfile)\n\n\nclass LanguageSource(zc.sourcefactory.basic.BasicSourceFactory):\n\n def getValues(self):\n return current()\n\n def getTitle(self, value):\n return LANGUAGE_LABELS.get(value, value)\n\n\nclass LanguageLabels(grok.GlobalUtility):\n\n zope.interface.implements(asm.cms.interfaces.IEditionLabels)\n grok.name('lang')\n\n def lookup(self, tag):\n lang = tag2lang(tag)\n return LANGUAGE_LABELS.get(lang, lang)\n\n\ndef select_initial_language(page):\n return set([lang2tag(fallback())])\n\n\nclass Prefixes(object):\n\n zope.interface.implements(asm.cms.interfaces.IExtensionPrefixes)\n\n prefixes = set(['lang'])\n\n\nclass CMSEditionSelector(object):\n\n zope.interface.implements(asm.cms.IEditionSelector)\n zope.component.adapts(asm.cmsui.interfaces.ICMSSkin)\n\n def __init__(self, request):\n self.request = request\n\n def select(self, page):\n preferred = []\n acceptable = []\n for edition in page.editions:\n if lang2tag(fallback()) in edition.parameters:\n preferred.append(edition)\n else:\n acceptable.append(edition)\n return preferred, acceptable\n\n\ndef get_language_preferences(request):\n preferred_langs = {}\n\n # Prefer cookie if set\n if 'asm.translation.lang' in request.cookies:\n preferred_langs[request.cookies['asm.translation.lang']] = 1.0\n else:\n # If no cookie is set we'll prefer the browser setting\n for lang in request.headers.get('Accept-Language', '').split(','):\n lang_priority = lang.split(';')\n lang = lang_priority[0]\n lang = lang.split('-')[0]\n\n priority = 1.0\n if len(lang_priority) > 1:\n priority_str = lang_priority[1][2:]\n priority = float(priority_str)\n\n if preferred_langs.get(lang, 0) < priority:\n preferred_langs[lang] = priority\n\n prioritized_langs = sorted(\n preferred_langs.items(),\n key=lambda x : x[1],\n reverse=True)\n return [lang[0] for lang in prioritized_langs]\n\n\nclass RetailEditionSelector(object):\n\n zope.interface.implements(asm.cms.IEditionSelector)\n zope.component.adapts(asm.cmsui.interfaces.IRetailBaseSkin)\n\n def __init__(self, request):\n # XXX Need to make this more pluggable\n self.request = request\n request.response.setHeader('Vary', 'Cookie,Accept-Language')\n\n preferred_langs = get_language_preferences(self.request)\n acceptable_langs = []\n if fallback() not in preferred_langs:\n acceptable_langs.append(fallback())\n # XXX Here's a special case: some old databases were created with an\n # empty string as the marker for 'language independent'. This feature\n # isn't used much but some editions still have it.\n acceptable_langs.append('')\n\n self.preferred_langs = preferred_langs\n self.acceptable_langs = acceptable_langs\n\n def select(self, page):\n # Select the preferred language by finding the one with the\n # highest priority that has at least one edition.\n preferred_langs = list(self.preferred_langs)\n acceptable_langs = list(self.acceptable_langs)\n page_editions = list(page.editions)\n preferred_language = None\n for language in preferred_langs:\n language_tag = lang2tag(language)\n for edition in page_editions:\n if language_tag in edition.parameters:\n preferred_language = language\n break\n if preferred_language is not None:\n break\n\n if preferred_language is not None:\n preferred_langs.remove(preferred_language)\n acceptable_langs = preferred_langs + acceptable_langs\n preferred_langs = [preferred_language]\n\n def get_editions(languages):\n result = []\n for language in languages:\n tag = lang2tag(language)\n for edition in page_editions:\n if tag in edition.parameters:\n result.append(edition)\n return result\n\n preferred = get_editions(preferred_langs)\n acceptable = get_editions(acceptable_langs)\n return preferred, acceptable\n\n\nclass RetailPreferredLanguages(grok.Adapter):\n\n grok.context(zope.publisher.interfaces.browser.IBrowserRequest)\n grok.provides(zope.i18n.interfaces.IUserPreferredLanguages)\n\n def __init__(self, request):\n self.request = request\n\n def getPreferredLanguages(self):\n return get_language_preferences(self.request)\n\n\nclass ITranslation(zope.interface.Interface):\n\n language = zope.schema.Choice(\n title=u'Language to translate to',\n source=LanguageSource())\n\n\nclass TranslationMenu(grok.Viewlet):\n\n grok.viewletmanager(asm.cmsui.base.PageActionGroups)\n grok.context(asm.cms.IEdition)\n\n def current_language(self):\n for candidate in self.context.parameters:\n if candidate.startswith(lang2tag('')):\n return LANGUAGE_LABELS.get(tag2lang(candidate), candidate)\n\n def list_language_versions(self):\n parameters = self.context.parameters\n for lang in current():\n p = self.context.parameters.replace(\n lang2tag('*'), lang2tag(lang))\n edition = None\n try:\n # Try to find an edition in this language with exactly\n # identical other parameters\n edition = self.context.page.getEdition(p)\n except KeyError:\n # Try to find an edition in this language without caring for\n # the other parameters.\n for candidate in self.context.page.editions:\n if lang2tag(lang) in candidate.parameters:\n edition = candidate\n break\n\n version = {}\n version['class'] = ''\n version['label'] = LANGUAGE_LABELS.get(lang, lang)\n version['hint'] = []\n\n if edition is not None:\n version['url'] = self.view.url(edition, '@@edit')\n else:\n version['hint'] = '(not created yet)'\n version['url'] = self.view.url(\n self.context, '@@translate',\n data=dict(language=lang))\n\n if edition is self.context:\n version['class'] = 'selected'\n if lang == fallback():\n version['hint'] = '(is fallback)'\n\n yield version\n\n\nclass Translate(grok.View):\n\n grok.context(asm.cms.IEdition)\n form_fields = grok.AutoFields(ITranslation)\n\n def update(self, language):\n page = self.context.page\n parameters = asm.cms.edition.get_initial_parameters()\n p = parameters.replace(lang2tag('*'), lang2tag(language))\n try:\n translation = page.getEdition(p)\n except KeyError:\n translation = page.addEdition(p)\n translation.copyFrom(self.context)\n self.flash(u'Translation created.')\n else:\n self.flash(u'Translation already exists.')\n self.translation = translation\n\n def render(self):\n self.redirect(self.url(self.translation, '@@edit'))\n","sub_path":"src/asm/translation/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":8114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"548460242","text":"from discord.ext import commands\n\n\nclass Error(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n # The error handler\n @commands.Cog.listener()\n async def on_command_error(self, ctx, error):\n message = ''\n\n if isinstance(error, commands.CommandNotFound):\n message = \":x: The command was not found! Type `-help` to check the list of available commands.\"\n elif isinstance(error, commands.MissingRequiredArgument):\n message = \":x: One or more arguments are missing!\"\n elif isinstance(error, commands.TooManyArguments):\n message = \":x: Too many arguments were provided!\"\n else:\n message = \":x: Something went wrong while running the command!\"\n print(error)\n\n if message != '':\n await ctx.send(message)\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(\"-Error ready!\")\n\n\ndef setup(client):\n client.add_cog(Error(client))\n","sub_path":"cogs/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"600503432","text":"#!/usr/bin/python3\n# vim: tabstop=2:\nimport classes\n\nimport threading\nimport socket\nimport os,sys,stat\nfrom time import sleep\n\nclass ControlPanel (threading.Thread):\n\n\tdef __init__(self):\n\t\tself.workers=[]\n\t\tself.worker_names=[]\n\t\tthreading.Thread.__init__(self)\n\tdef run(self):\n\t\twith socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:\n\t\t\tos.mkdir(\"/tmp/minecraft_worker\")\n\t\t\ts.bind(\"/tmp/minecraft_worker/1\")\n\t\t\ts.listen(1)\n\t\t\tos.chmod(\"/tmp/minecraft_worker/1\", stat.S_IRWXO)\n\t\t\tEnd = True\n\t\t\twhile End:\n\t\t\t\tconn, addr = s.accept()\n\t\t\t\twith conn:\n\t\t\t\t\twhile End:\n\t\t\t\t\t\tdata = conn.recv(1024)\n\t\t\t\t\t\tif not data: break\n\t\t\t\t\t\tprint(data)\n\t\t\t\t\t\tcmds = data.decode().split(\"\\n\")\n\t\t\t\t\t\tfor i in cmds:\n\t\t\t\t\t\t\tcmd = i.split(\" \")\n\t\t\t\t\t\t\tif (cmd[0]==\"create\"):\n\t\t\t\t\t\t\t\tif cmd[1] in self.worker_names:\n\t\t\t\t\t\t\t\t\tconn.sendall(\"chujaj sie\\n\".encode())\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tself.workers.append(classes.Idler(username=cmd[1]))\n\t\t\t\t\t\t\t\t\tself.workers[-1].start()\n\t\t\t\t\t\t\t\t\tself.worker_names.append(cmd[1])\n\t\t\t\t\t\t\t\t\tconn.sendall((\"created \"+cmd[1]+\"\\n\").encode())\n\t\t\t\t\t\t\telif (cmd[0]==\"stop\"):\n\t\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\t\tx = self.worker_names.index(cmd[1])\n\t\t\t\t\t\t\t\t\tself.workers[x].stop=True\n\t\t\t\t\t\t\t\t\tself.workers[x].join()\n\t\t\t\t\t\t\t\t\tdel self.workers[x]\n\t\t\t\t\t\t\t\t\tdel self.worker_names[x]\n\t\t\t\t\t\t\t\t\tconn.sendall((\"stopped \"+cmd[1]+\"\\n\").encode())\n\t\t\t\t\t\t\t\texcept ValueError:\n\t\t\t\t\t\t\t\t\tconn.sendall(\"chujaj sie\\n\".encode())\n\t\t\t\t\t\t\telif (cmd[0]==\"showworkers\"):\n\t\t\t\t\t\t\t\ttosend = \"\"\n\t\t\t\t\t\t\t\tif self.worker_names:\n\t\t\t\t\t\t\t\t\tprint(self.worker_names)\n\t\t\t\t\t\t\t\t\tfor i in self.worker_names:\n\t\t\t\t\t\t\t\t\t\tprint(i)\n\t\t\t\t\t\t\t\t\t\ttosend+=(i+'\\n')\n\t\t\t\t\t\t\t\t\tconn.sendall(tosend.encode())\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tconn.sendall('\\n'.encode())\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\telif (cmd[0]==\"echo\"):\n\t\t\t\t\t\t\t\tconn.sendall(\"echo\\n\".encode())\n\t\t\t\t\t\t\telif (cmd[0]==\"exit\"):\n\t\t\t\t\t\t\t\tfor i in self.workers:\n\t\t\t\t\t\t\t\t\ti.stop=True\n\t\t\t\t\t\t\t\t\ti.join()\n\t\t\t\t\t\t\t\tEnd=False\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tconn.sendall(\"\\n\".encode())\n\t\t\tos.remove(\"/tmp/minecraft_worker/1\")\ndef main():\n\tc = ControlPanel()\n\tc.start()\n\tc.join()\nif __name__==\"__main__\":\n\tmain()\t\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611361579","text":"from django.contrib.auth.decorators import login_required\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, get_object_or_404\nfrom django.urls import reverse\nfrom django.views import generic\nfrom itertools import chain\nfrom django.core.paginator import Paginator\n\nfrom .forms import OwnerForm, CondoForm, RoomForm\nfrom .models import Room, Condo, Owner\nfrom .models.condo import CondoImages\nfrom .models.room import RoomImages\n\n\nclass IndexView(generic.ListView):\n template_name = 'estate/index.html'\n condo_list = 'condo_list'\n\n def get_queryset(self):\n return Condo.objects.order_by('name')\n\n\ndef condo(request, condo_id):\n condo = get_object_or_404(Condo, pk=condo_id)\n return render(request, 'estate/condo.html', {'condo': condo})\n\n\ndef room(request, room_id):\n room = get_object_or_404(Room, pk=room_id)\n condo = Condo.objects.get(name=room.condo.name)\n return render(request, 'estate/room.html', {'condo': condo, 'room': room})\n\n\ndef search_by_amnities(request):\n condoSet_list = Condo.objects.order_by('-name')\n\n if request.method == 'GET':\n res = request.GET['selectedfield']\n keywords = res.strip('][').split(', ')\n for index, amenity in enumerate(keywords):\n # remove quotes at begin and end. Somehow strip doesnt work\n # and update back to keywords.\n\n amenity = amenity[1:-1]\n keywords[index] = amenity\n condoSet_list = condoSet_list.filter(amenities__icontains=amenity)\n else:\n keywords = request.POST.getlist('selectedfield')\n for amenity in keywords:\n condoSet_list = condoSet_list.filter(amenities__icontains=amenity)\n\n return keywords, condoSet_list, 'POST', Room.objects.none()\n\n\ndef search_by_keywords(request):\n\n keywords = request.GET['search']\n condoSet_list = Condo.objects.filter(name__icontains=keywords)\n roomSet_list = Room.objects.filter(title__icontains=keywords)\n return keywords, condoSet_list, roomSet_list, request.method\n\n\ndef search(request):\n roomSet_list = Room.objects.order_by('-title')\n\n if request.method == 'GET':\n if 'search' in request.GET: # by keywords\n keywords, condoSet_list, roomSet_list, method = search_by_keywords(\n request)\n else: # by checkbox fields\n keywords, condoSet_list, method, roomSet_list = search_by_amnities(\n request)\n else:\n keywords, condoSet_list, method, roomSet_list = search_by_amnities(\n request)\n\n # if no condo then room shouldnt be return\n if condoSet_list:\n roomSet_list = roomSet_list.filter(\n still_on_contract=False).exclude(condo__in=condoSet_list)\n else:\n roomSet_list = Room.objects.none()\n\n posts = list(chain(condoSet_list, roomSet_list))\n paginator = Paginator(posts, 1)\n page_number = request.GET.get('page')\n page_obj = paginator.get_page(page_number)\n\n context = {\n 'keywords': keywords,\n 'condo_result': condoSet_list,\n 'room_result': roomSet_list,\n 'page_obj': page_obj,\n 'posts': posts,\n 'method': method,\n }\n\n return render(request, 'estate/search_results.html', context)\n\n\n@login_required\ndef upload_owner(request):\n if request.method == 'POST':\n form = OwnerForm(request.POST)\n form.save()\n return HttpResponseRedirect(reverse('estate:index'))\n else:\n form = OwnerForm()\n return render(request, 'estate/upload_owner.html', {'form': form})\n\n\n@login_required\ndef upload_index(request):\n if request.user.role != 'owner':\n return HttpResponseRedirect(reverse('estate:index'))\n condo_form = CondoForm(prefix='condo')\n room_form = RoomForm(prefix='room')\n return render(request, 'estate/upload_index.html', {\n 'condo_form': condo_form,\n 'room_form': room_form,\n })\n\n\n@login_required\ndef upload_condo(request):\n condo_form = CondoForm(request.POST, prefix='condo')\n # Bug - Amenities have to checked at least one to valid the form\n if condo_form.is_valid():\n this_condo = condo_form.save()\n\n for image in request.FILES.getlist('files'):\n CondoImages.objects.create(condo=this_condo, image=image)\n\n return HttpResponseRedirect(reverse('estate:index'))\n\n\n@login_required\ndef upload_room(request):\n room_form = RoomForm(request.POST, prefix='room')\n if room_form.is_valid():\n this_room = room_form.save(commit=False)\n this_room.owner = Owner.objects.first()\n this_room.save()\n\n for image in request.FILES.getlist('files'):\n RoomImages.objects.create(room=this_room, image=image)\n else:\n print(\"==========room_form_invalid===========\", request.POST)\n\n return HttpResponseRedirect(reverse('estate:index'))\n","sub_path":"estate/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"67688867","text":"import sys\r\n\r\n\r\nfreq = {}\r\n\r\n\r\nfd = open('wiki.txt', 'r')\r\n\r\nfor line in fd.readlines():\r\n form = line.strip('\\n')\r\n\r\n if form not in dict:\r\n freq[form] = 0\r\n freq[form] = freq[form] + 1\r\n \r\ntext = sys.stdin.read()\r\nwords = text.split(' ')\r\n\r\nfor word in words:\r\n if word in freq:\r\n print(word)\r\n else:\r\n print('*' + word)\r\n\r\n","sub_path":"spellchecker.py","file_name":"spellchecker.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514212491","text":"import re\nimport urllib.parse\nimport logging\nimport os\n\n\ndef url_to_path(url, data_path):\n try:\n parsed_url = urllib.parse.urlparse(url)\n except ValueError as e:\n logging.warning(str(e))\n return ''\n full_path = os.path.join(data_path, parsed_url.netloc) + parsed_url.path\n full_path = re.sub(r'[<>|:&\\s\\\\;()]', '', full_path)\n return full_path\n\n\ndef id_and_path_to_doc(data_dir, db_service):\n return [(id_doc, url_to_path(url, data_dir))\n for id_doc, url in db_service.get_id_and_url()]\n","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"298437322","text":"#!/usr/bin/python3.6\n# encoding: utf-8\n'''\n@ author: 郑祥忠\n@ license: (C) Copyright 2017-2019, 海格星航\n@ contact: dylenzheng@gmail.com\n@ file: rename.py\n@ time: 4/4/19 8:01 PM\n@ desc: desc\n'''\n\nimport os\nimport utils\n\ndef rename_image(label_dict={}, out_file='train.csv'):\n '''\n 改文件名的同时,修改标签文件。并存储成新的CSV文件train_new.csv\n :param label_dict:\n :param out_file:\n :return:\n '''\n new_label_dict = {}\n i = 1\n # print('i=',i)\n with open(out_file, 'w') as f:\n # 遍历字典的关键字\n for key in label_dict.keys():\n\n # os.path.split()以'PATH'中最后一个'/'作为分割符\n # os.path.split('/home/zhex/soft/python/test.jpg'),返回 '/home/zhex/soft/python'和'test.jpg'\n image_name = os.path.split(key)[-1]\n # print('image_name=',image_name)\n new_image_name = '%09d'%i + '.jpg'\n i = i + 1\n # 改名\n new_key = key.replace(image_name, new_image_name)\n os.renames(key, new_key)\n\n # 生成新的字典\n new_label_dict.setdefault(new_key, label_dict.get(key, []))\n utils.write_csv(new_label_dict, out_path=out_file)\n\n return out_file\nif __name__ == '__main__':\n label_dict_new, _ = utils.read_csv(csv_path=r'./train_origin.csv',pre_dir=r'/home/zhex/fsdownload')\n # print(label_dict_new)\n rename_image(label_dict_new,out_file='train.csv')","sub_path":"crowdHuman2voc/rename.py","file_name":"rename.py","file_ext":"py","file_size_in_byte":1473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484589992","text":"from sklearn.svm import LinearSVC\nimport numpy as np\nfrom sklearn.metrics import accuracy_score\n#1.data\nx_data = [[0,0],[1,0],[0,1],[1,1]]\ny_data = [0,1,1,1]\n\n#And gate\n\n#2. model\n\nmodel = LinearSVC()\n\n#3. trainning\n\nmodel.fit(x_data,y_data)\n\n\n#4. evaluate, predict\ny_pred = model.predict(x_data)\nprint(x_data, \"predict: \",y_pred)\n\nresult = model.score(x_data,y_data)\nprint(\"model.score:\", result) #acuuracy #model.score: 1.0\n\nacc = accuracy_score(y_data, y_pred)\nprint(acc) #1.0","sub_path":".vscode/ml/ml03_2_or.py","file_name":"ml03_2_or.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"297449655","text":"#!/usr/bin/env python\n\ndef diamond(s):\n \n l = s.strip()\n i = len(l)/2\n \n f, b = i + 1, i - 1\n ns = l[i]\n print(ns) \n nl = [ns,]\n while(b >= 0 and f < len(l)):\n ns = l[b] + ns + l[f]\n f = f + 1\n b = b - 1\n nl.append(ns)\n print(ns)\n \n n = len(nl)\n while(n > 0):\n n = n - 1\n print(nl[n])\n\n","sub_path":"Py/sem_toets03/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112049969","text":"#! /usr/bin/env python\n# coding: utf-8\n\nimport sys\nimport tempfile\nimport uuid\nfrom datetime import datetime, timedelta\nfrom time import time\nfrom Tools.Mysql_db import DB\nfrom Class import TIME_FORMAT\nfrom .Check import check_sql_character\n\ntemp_dir = tempfile.gettempdir()\n\n__author__ = 'ZhouHeng'\n\n\nclass BugManager(object):\n\n level_desc = [\"自己发现\", \"本地自己人发现\", \"本地开发人员发现\", \"测试环境自己人发现\", \"测试环境开发人员发现\",\n \"测试环境领导发现\", \"测试环境其他领导发现\", \"生产环境自己人发现\", \"生产环境开发人员发现\",\n \"生产环境领导发现\", \"生产环境其他领导发现\", \"生产环境客户发现\", \"忍无可忍\"]\n status_desc = [\"等待问题确认\", \"已有问题疑似拥有者\", \"已确认问题拥有者\", \"问题已被修复\", \"问题被取消\", \"现象正常\"]\n\n def __init__(self):\n self.db = DB()\n self.bug = \"bug_info\"\n self.bug_owner = \"bug_owner\"\n self.bug_example = \"bug_example\"\n self.t_reason = \"bug_reason\"\n self.user = \"sys_user\"\n\n def new_bug_info(self, bug_title, submitter, bug_level):\n submit_time = datetime.now().strftime(TIME_FORMAT)\n bug_no = uuid.uuid1().hex\n if len(bug_title) < 5:\n return False, \"Bad bug_title\"\n bug_title = check_sql_character(bug_title)[:50]\n kwargs = {\"bug_no\": bug_no, \"bug_title\": bug_title, \"submitter\": submitter, \"submit_time\": submit_time,\n \"bug_level\": bug_level}\n result = self.db.execute_insert(self.bug, kwargs=kwargs)\n if result != 1:\n return False, \"sql execute result is %s \" % result\n if bug_level == 0:\n self.new_bug_link(bug_no, submitter, 2, submitter)\n # self.new_bug_link(bug_no, submitter, 3, submitter)\n return True, kwargs\n\n def new_bug_link(self, bug_no, user_name, link_type, adder):\n if len(bug_no) != 32:\n return False, \"Bad bug_no\"\n link_time = datetime.now().strftime(TIME_FORMAT)\n insert_sql = \"INSERT INTO %s (bug_no,user_name,type,link_time,adder) VALUES ('%s','%s','%s','%s','%s') \" \\\n \"ON DUPLICATE KEY UPDATE adder=adder;\" \\\n % (self.bug_owner, bug_no, user_name, link_type, link_time, adder)\n result = self.db.execute(insert_sql)\n # if result != 1:\n # return False, \"sql execute result is %s \" % result\n self.update_bug_status(bug_no, link_type)\n return True, {\"bug_no\": bug_no, \"user_name\": user_name, \"link_type\": link_type, \"link_time\": link_time}\n\n def del_bug_link(self, bug_no, user_name, link_type, adder):\n if len(bug_no) != 32:\n return False, \"Bad bug_no\"\n delete_sql = \"DELETE FROM %s WHERE bug_no='%s' AND user_name='%s' AND adder='%s' AND type=%s;\" \\\n % (self.bug_owner, bug_no, user_name, adder, link_type)\n result = self.db.execute(delete_sql)\n return True, result\n\n def del_bug(self, bug_no):\n if len(bug_no) != 32:\n return False, \"Bad bug_no\"\n\n def new_bug_example(self, bug_no, content):\n if len(bug_no) != 32:\n return False, \"Bad bug_no\"\n add_time = datetime.now().strftime(TIME_FORMAT)\n if len(content) < 5:\n return False, \"Bad content\"\n\n kwargs = {\"content\": content, \"add_time\": add_time}\n where_value = {\"bug_no\": bug_no}\n l = self.db.execute_update(self.bug_example, where_value=where_value, update_value=kwargs)\n kwargs.update(where_value)\n if l != 1:\n self.db.execute_insert(self.bug_example, kwargs, ignore=True)\n return True, kwargs\n\n def select_bug_example(self, bug_no):\n cols = [\"bug_no\", \"content\", \"add_time\"]\n db_items = self.db.execute_select(self.bug_example, cols=cols, where_value=dict(bug_no=bug_no))\n if len(db_items) > 0:\n return True, db_items[0]\n return True, None\n\n def update_bug_status(self, bug_no, status):\n if len(bug_no) != 32:\n return False, \"Bad bug_no\"\n update_sql = \"UPDATE %s SET bug_status=%s WHERE bug_no='%s';\" % (self.bug, status, bug_no)\n result = self.db.execute(update_sql)\n return True, result\n\n def get_bug_list(self, offset=0, num=20):\n if type(offset) != int or type(num) != int:\n return False, \"Bad offset or num\"\n select_sql = \"SELECT bug_no,bug_title,submitter,submit_time,bug_status,bug_level FROM %s WHERE bug_status<3 \" \\\n \"ORDER BY bug_status,submit_time DESC LIMIT %s, %s;\" % (self.bug, offset, num)\n self.db.execute(select_sql)\n bug_list = []\n for item in self.db.fetchall():\n bug_level = item[5]\n if bug_level < 0 or bug_level >= len(self.level_desc):\n continue\n bug_level_desc = self.level_desc[bug_level].decode(\"utf8\")\n bug_list.append({\"bug_no\": item[0], \"bug_title\": item[1], \"submitter\": item[2],\n \"submit_time\": item[3].strftime(TIME_FORMAT), \"bug_status\": item[4],\n \"bug_level\": bug_level, \"bug_level_desc\": bug_level_desc})\n return True, bug_list\n\n def get_my_bug_list(self, user_name):\n b_cols = [\"bug_no\", \"bug_title\", \"submitter\", \"submit_time\", \"bug_level\", \"bug_status\"]\n j_cols = [\"user_name\", \"type\", \"link_time\"]\n where_value = dict(user_name=user_name, type=2)\n where_cond = [\"bug_status<=3\", \"bug_status>=2\"]\n bug_list = self.db.execute_select_left(self.bug, self.bug_owner, \"bug_no\", b_cols=b_cols, j_cols=j_cols,\n where_value=where_value, where_cond=where_cond, order_by=[\"submit_time\"],\n order_desc=True)\n return True, bug_list\n\n def get_bug_basic(self, bug_no):\n where_value = dict(bug_no=bug_no)\n cols = [\"bug_no\", \"bug_title\", \"submitter\", \"submit_time\", \"bug_level\", \"bug_status\"]\n bug_list = self.db.execute_select(self.bug, where_value=where_value, cols=cols, package=True)\n if len(bug_list) <= 0:\n return False, \"Not Exist\"\n return True, bug_list[0]\n\n def get_bug_info(self, bug_no):\n if len(bug_no) != 32:\n return False, \"Bad bug_no\"\n # 获取基本信息\n select_sql = \"SELECT bug_no,bug_title,submitter,submit_time,bug_status,bug_level,nick_name FROM %s AS i,%s AS u \" \\\n \"WHERE bug_no='%s' AND i.submitter=u.user_name;\" \\\n % (self.bug, self.user, bug_no)\n result = self.db.execute(select_sql)\n if result != 1:\n return False, \"Bad bug_no.\"\n info = self.db.fetchone()\n bug_level = info[5]\n if bug_level < 0 or bug_level >= len(self.level_desc):\n bug_level_desc = \"\"\n else:\n bug_level_desc = self.level_desc[bug_level].decode(\"utf8\")\n basic_info = {\"bug_no\": info[0], \"bug_title\": info[1], \"submitter\": info[2],\n \"submit_time\": info[3].strftime(TIME_FORMAT), \"bug_status\": info[4], \"bug_level\": bug_level,\n \"submit_name\": info[6], \"bug_level_desc\": bug_level_desc}\n # 获取关联的人\n select_sql = \"SELECT o.user_name,type,link_time,adder,nick_name FROM %s AS o, %s AS u \" \\\n \"WHERE bug_no='%s' AND o.user_name=u.user_name;\" % (self.bug_owner, self.user, bug_no)\n self.db.execute(select_sql)\n link_user = {\"ys\": {}, \"owner\": {}, \"fix\": {}, \"channel\": {}, \"design\": {}}\n for item in self.db.fetchall():\n link_info = {\"user_name\": item[0], \"link_type\": item[1], \"link_time\": item[2].strftime(TIME_FORMAT),\n \"adder\": item[3], \"nick_name\": item[4]}\n if item[1] == 1:\n link_user[\"ys\"][item[0]] = link_info\n elif item[1] == 2:\n link_user[\"owner\"][item[0]] = link_info\n elif item[1] == 3:\n link_user[\"fix\"][item[0]] = link_info\n elif item[1] == 4:\n link_user[\"channel\"][item[0]] = link_info\n elif item[1] == 5:\n link_user[\"design\"][item[0]] = link_info\n else:\n pass\n return True, {\"basic_info\": basic_info, \"link_user\": link_user}\n\n def select_bug_link(self, bug_no):\n link_cols = [\"user_name\", \"type\", \"link_time\", \"adder\"]\n db_items = self.db.execute_select(self.bug_owner, where_value=dict(bug_no=bug_no), cols=link_cols)\n return True, db_items\n\n def get_statistic(self):\n # 获得所有的统计信息\n bug_role = 1024\n select_sql = \"SELECT u.user_name,nick_name,count(bug_no) AS bug_num FROM %s as u LEFT JOIN %s as b \" \\\n \"on u.user_name=b.user_name AND type=2 WHERE role & %s = %s \" \\\n \"GROUP BY u.user_name ORDER BY bug_num DESC;\" \\\n % (self.user, self.bug_owner, bug_role, bug_role)\n self.db.execute(select_sql)\n all_data = []\n for item in self.db.fetchall():\n all_data.append({\"user_name\": item[0], \"nick_name\": item[1], \"bug_num\": item[2]})\n # 获得最近一个月的统计信息\n after_time = (datetime.now() - timedelta(days=30)).strftime(TIME_FORMAT)\n select_sql = \"SELECT u.user_name,nick_name,count(bug_no) AS bug_num FROM %s AS u LEFT JOIN %s AS b \" \\\n \"on u.user_name=b.user_name AND type=2 AND link_time>'%s' \" \\\n \"WHERE role & %s = %s GROUP BY u.user_name ORDER BY bug_num DESC;\" \\\n % (self.user, self.bug_owner, after_time, bug_role, bug_role)\n self.db.execute(select_sql)\n month_data = []\n for item in self.db.fetchall():\n month_data.append({\"user_name\": item[0], \"nick_name\": item[1], \"bug_num\": item[2]})\n return True, {\"month\": month_data, \"all\": all_data}\n\n def insert_bug_reason(self, **kwargs):\n kwargs.update(dict(add_time=int(time())))\n l = self.db.execute_insert(self.t_reason, kwargs=kwargs, ignore=True)\n return True, kwargs\n\n def update_bug_reason(self, bug_no, submitter, reason):\n update_value = dict(reason=reason, add_time=int(time()))\n where_value = dict(bug_no=bug_no, submitter=submitter)\n l = self.db.execute_update(self.t_reason, where_value=where_value, update_value=update_value)\n update_value.update(where_value)\n return True, update_value\n\n def select_bug_reason(self, bug_no, submitter=None):\n where_value = dict(bug_no=bug_no)\n if submitter is not None:\n where_value[\"submitter\"] = submitter\n reason_cols = [\"bug_no\", \"submitter\", \"reason\", \"add_time\"]\n db_items = self.db.execute_select(self.t_reason, where_value=where_value, cols=reason_cols)\n return True, db_items\n\n\n\n","sub_path":"Class/Bug.py","file_name":"Bug.py","file_ext":"py","file_size_in_byte":10997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519856910","text":"#CUDA_VISIBLE_DEVICES=X python predict.py --statedict netstate.pth --cuda --inputfile test1.jpg --outdir ./predouts\nfrom __future__ import print_function\nimport argparse\nimport os\nfrom PIL import Image\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torchvision.transforms import Compose, Normalize, ToTensor\nimport torchvision.utils as vutils\nfrom net import NetS\n\n# Debug MWB\nimport sys\nprint('__Python VERSION:', sys.version)\nprint('__pyTorch VERSION:', torch.__version__)\nprint('__CUDNN VERSION:', torch.backends.cudnn.version())\nprint('__Number CUDA Devices:', torch.cuda.device_count())\nprint('Active CUDA Device: GPU', torch.cuda.current_device())\nprint ('Available devices ', torch.cuda.device_count())\n# End Debug MWB\n\nparser = argparse.ArgumentParser(description='Arguments for predict routine')\nparser.add_argument('--statedict', required=True, help='filename of trained network state')\nparser.add_argument('--inputfile', required=True, help='filename of image to run through network')\nparser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use, for now it only supports one GPU')\nparser.add_argument('--outdir', default='./outdir', help='stores predicted images')\nparser.add_argument('--cuda', default=True, help='using GPU or not')\nopts = parser.parse_args()\n\nprint(opts)\n\ntry:\n os.makedirs(opts.outdir)\nexcept OSError:\n pass\n\ncuda = opts.cuda\n\ncudnn.benchmark = True\n\nnetS = NetS(ngpu=1)\nnetS.cuda()\nnetS.eval()\n\nnetS.load_state_dict(torch.load(opts.statedict))\n\nimage = Image.open(opts.inputfile).convert('RGB')\nimage = image.resize((128, 128), Image.BILINEAR)\n\nimg_transform = Compose([\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],std=[0.229, 0.224, 0.225])\n])\n\nprint(\"type(image) = \", type(image))\ndata = img_transform(image)\nprint(\"type(data) = \", type(data))\nprint(\"data.size() = \", data.size())\ndata = data.unsqueeze(0)\nprint(\"data.size() = \", data.size())\ninput = torch.autograd.Variable(data)\nprint(\"type(input.data) = \", type(input.data))\n#if cuda:\ninput = input.cuda()\n\nprint(\"type(input.data) = \", type(input.data))\n\npred = netS(input)\nprint(\"type(pred) = \", type(pred))\nprint(\"pred.size() = \", pred.size())\nvutils.save_image(pred.data, '%s/result_val.png' % opts.outdir, normalize=True)\n\n\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394322901","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport http.client, threading, sys, getopt, os\nfrom multiprocessing import cpu_count\ndefaultlist = ['admin/','administrator/','admin1/','admin2/','admin3/','admin4/','admin5/','usuarios/','usuario/','moderator/','webadmin/','adminarea/','bb-admin/','adminLogin/','admin_area/','panel-administracion/','instadmin/',\n'memberadmin/','administratorlogin/','adm/','admin/account.php','admin/index.php','admin/login/','admin/login.php','admin/admin.php','admin/account.php',\n'admin_area/admin.php','admin_area/login.php','siteadmin/login.php','siteadmin/index.php','siteadmin/login.html','admin/account.html','admin/index.html','admin/login.html','admin/admin.html',\n'admin_area/index.php','bb-admin/index.php','bb-admin/login.php','bb-admin/admin.php','admin/home.php','admin_area/login.html','admin_area/index.html',\n'admin/controlpanel.php','admin.php','admincp/index.asp','admincp/login.asp','admincp/index.html','admin/account.html','adminpanel.html','webadmin.html',\n'webadmin/index.html','webadmin/admin.html','webadmin/login.html','admin/admin_login.html','admin_login.html','panel-administracion/login.html',\n'admin/cp.php','cp.php','administrator/index.php','administrator/login.php','nsw/admin/login.php','webadmin/login.php','admin/admin_login.php','admin_login.php',\n'administrator/account.php','administrator.php','admin_area/admin.html','pages/admin/admin-login.php','admin/admin-login.php','admin-login.php',\n'bb-admin/index.html','bb-admin/login.html','acceso.php','bb-admin/admin.html','admin/home.html','login.php','modelsearch/login.php','moderator.php','moderator/login.php',\n'moderator/admin.php','account.php','pages/admin/admin-login.html','admin/admin-login.html','admin-login.html','controlpanel.php','admincontrol.php',\n'admin/adminLogin.html','adminLogin.html','admin/adminLogin.html','home.html','rcjakar/admin/login.php','adminarea/index.html','adminarea/admin.html',\n'webadmin.php','webadmin/index.php','webadmin/admin.php','admin/controlpanel.html','admin.html','admin/cp.html','cp.html','adminpanel.php','moderator.html','administrator/login.html','user.html','administrator/account.html','administrator.html','login.html','modelsearch/login.html',\n'moderator/login.html','adminarea/login.html','panel-administracion/index.html','panel-administracion/admin.html','modelsearch/index.html','modelsearch/admin.html',\n'admincontrol/login.html','adm/index.html','adm.html','moderator/admin.html','user.php','account.html','controlpanel.html','admincontrol.html',\n'panel-administracion/login.php','wp-login.php','adminLogin.php','admin/adminLogin.php','home.php','admin.php','adminarea/index.php',\n'adminarea/admin.php','adminarea/login.php','panel-administracion/index.php','panel-administracion/admin.php','modelsearch/index.php',\n'modelsearch/admin.php','admincontrol/login.php','adm/admloginuser.php','admloginuser.php','admin2.php','admin2/login.php','admin2/index.php','usuarios/login.php',\n'adm/index.php','adm.php','affiliate.php','adm_auth.php','memberadmin.php','administratorlogin.php', 'webmaster/', 'webmaster.php', 'phpmyadmin', 'PhpMyAdmin']\ndef onerror(py):\n print('========================================')\n print('Multiprocessing and Multitargeting ACP-Scanner by Bastian')\n print('Admin Control Panel Scanner v2.4')\n print('Visit LeakForums.org for more.')\n print(' --http-status, -s: select an integer for HTTP status code (e.g. 200) or leave in blank for default value')\n print('EXAMPLES:')\n print((' python ' + py + ' --target=example.com --panel-list=panels.txt --http-status=400'))\n print((' python ' + py + ' --target=mytargets.txt'))\n print((' python ' + py + ' -t example.com -s 301'))\ntargets = []\npanels = ''\nhttpstat = 200\ninfo = '\\nType --help or -h for more instructions.'\ntry:\n opts, args = getopt.getopt(sys.argv[1:], 'ht:p:s:', ['help', 'target', 'panel-list=', 'http-status='])\n if len(opts) < 1:\n onerror(sys.argv[0])\n sys.exit(1)\nexcept getopt.GetoptError:\n onerror(sys.argv[0])\n sys.exit(2)\nfor command, value in opts:\n if command in ('-h', '--help'):\n onerror(sys.argv[0])\n sys.exit()\n elif command in ('-t', '--target'):\n if os.path.isfile(value):\n if os.access(value, os.R_OK):\n targets = [0, value.strip()]\n else:\n print(('Can\\'t read the given textfile containing the targets.' + info))\n sys.exit(1)\n else:\n try:\n url = value.replace('https://', '').replace('http://', '').replace('www.', '').replace('/', '')\n testing = http.client.HTTPConnection(url)\n testing.request('HEAD', '')\n targets = [1, value.strip()]\n except:\n print(('Can neither request the given single target URL nor open a appropriate textfile.' + info))\n sys.exit(1)\n elif command in ('-p', '--panel-list'):\n if os.path.isfile(value) & os.access(value, os.R_OK):\n panels = value.strip()\n else:\n print(('Can\\'t find or read the given textfile containing the panels.' + info))\n sys.exit(1)\n elif command in ('-s', '--http-status'):\n if (int(value) in range(100, 950)):\n httpstat = int(value.strip())\n else:\n print(('The given HTTP status seems not to be valid. Only integer between 100 and 950 are allowed.' + info))\n sys.exit(1)\nif (targets[0] < 1):\n try:\n tlist = open(targets[1], 'r')\n spect = tlist.readlines()\n tlist.close()\n except:\n print(('Can\\'t open the given textfile ' + targets[1]+ ' containing the targets.'))\n sys.exit(1)\nelse:\n spect = [targets[1]]\nif panels:\n try:\n plist = open(panels, 'r')\n specp = plist.readlines()\n plist.close()\n except:\n print(('Can\\'t open the given textfile ' + targets[1]+ ' with targets.'))\nelse:\n specp = defaultlist\ndef check_panel(url, acp, stat):\n try:\n check = http.client.HTTPConnection(url)\n acpanel = '/' + acp\n check.request('HEAD', acpanel)\n resp = check.getresponse()\n if (resp.status <= stat):\n print(('>>> Founding ' + url + acpanel).strip())\n except:\n pass\ndef requesting_target(url):\n global specp, httpstat\n try:\n obj = url.replace('https://', '').replace('http://', '').replace('www.', '').replace('/', '')\n requesting = http.client.HTTPConnection(obj)\n requesting.request('HEAD', '')\n processes = []\n for acp in specp:\n pmultiproc = threading.Thread(target=check_panel, args=(obj, acp.strip(), httpstat))\n pmultiproc.start()\n processes.append(pmultiproc)\n for thread in processes:\n thread.join()\n except:\n pass\nprint('\\nMultiprocessing Adminpanel Scanner by Bastian from LeakForums.org')\nprint(('Processing with ' + str(cpu_count()) + ' available CPU'))\nprint(('Testing ' + str(len(specp)) + ' possible admin panels on currently ' + str(len(spect)) + ' targets'))\nprint(('Starting...\\n' + ('-')*50))\nfor dest in spect:\n tmultiproc = threading.Thread(target=requesting_target, args=[dest.strip()])\n tmultiproc.start()","sub_path":"ACPSCANNER.py","file_name":"ACPSCANNER.py","file_ext":"py","file_size_in_byte":7277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"551120457","text":"##############################################################################\n#\n# Copyright (c) 2010 Vifib SARL and Contributors. All Rights Reserved.\n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsibility of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs\n# End users who are looking for a ready-to-use solution with commercial\n# guarantees and support are strongly adviced to contract a Free Software\n# Service Company\n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\nimport os\n\nfrom slapos.recipe.librecipe import GenericBaseRecipe\n\nclass Recipe(GenericBaseRecipe):\n\n def install(self):\n self.logger.info(\"Installing dcron...\")\n\n path_list = []\n\n cronstamps = self.options['cronstamps']\n cron_d = self.options['cron-entries']\n crontabs = self.options['crontabs']\n catcher = self.options['catcher']\n\n binary = self.options['binary']\n\n script = self.createPythonScript(binary,\n 'slapos.recipe.librecipe.execute.execute',\n [self.options['dcrond-binary'].strip(), '-s', cron_d, '-c', crontabs,\n '-t', cronstamps, '-f', '-l', '5', '-M', catcher]\n )\n path_list.append(script)\n self.logger.debug('Main cron executable created at : %r', script)\n\n self.logger.info(\"dcron successfully installed.\")\n\n return path_list\n\n\n\nclass Part(GenericBaseRecipe):\n\n def install(self):\n cron_d = self.options['cron-entries']\n name = self.options['name']\n filename = os.path.join(cron_d, name)\n\n with open(filename, 'w') as part:\n part.write('%(frequency)s %(command)s\\n' % {\n 'frequency': self.options['frequency'],\n 'command': self.options['command'],\n })\n\n return [filename]\n","sub_path":"slapos/recipe/dcron.py","file_name":"dcron.py","file_ext":"py","file_size_in_byte":2499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407690311","text":"from django.urls import path\n\nfrom .views import (\n CaseListView,\n CaseDetailView,\n case_submit_view,\n FAQListView,\n HomeView,\n result_view,\n ScholarshipListView,\n ScholarshipDetailView,\n scholarship_submit_view,\n)\n\nurlpatterns = [\n path(\"\", HomeView.as_view(), name=\"home\"),\n path(\"cases/\", CaseListView.as_view(), name=\"case_index\"),\n path(\"cases//\", CaseDetailView.as_view(), name=\"case_detail\"),\n path(\"cases/submit/\", case_submit_view, name=\"case_submit\"),\n path(\"scholarship/\", ScholarshipListView.as_view(), name=\"scholarship_index\"),\n path(\n \"scholarship//\",\n ScholarshipDetailView.as_view(),\n name=\"scholarship_detail\",\n ),\n path(\"scholarship/submit/\", scholarship_submit_view, name=\"scholarship_submit\"),\n path(\"faq/\", FAQListView.as_view(), name=\"faq\"),\n path(\"submission-result/\", result_view, name=\"submission_result\"),\n]\n","sub_path":"legal_db/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"542123725","text":"import sys\n\nsys.stdin = open('input_2819.txt')\n\ndiff = [(1, 0), (-1, 0), (0, 1), (0, -1)]\n\ndef serach(i, j, result):\n global real\n if len(result) == 7:\n return real.append(result)\n\n for (x, y) in diff:\n dx, dy = i + x, j + y\n if 0 <= dx < 4 and 0 <= dy < 4:\n serach(dx, dy, result + str(lst[dx][dy]))\n\n\nT = int(input())\n\nfor t in range(1, T+1):\n real = []\n lst = [list(map(int, input().split())) for _ in range(4)]\n\n for i in range(4):\n for j in range(4):\n result = str(lst[i][j])\n serach(i, j, result)\n print('#{} {}'.format(t, len(list(set(real)))))\n\n","sub_path":"SWEA/2819_격자판의숫자이어붙이기.py","file_name":"2819_격자판의숫자이어붙이기.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"88186411","text":"import uuid\nfrom typing import Dict\nfrom models.item import Item\nfrom models.model import Model\nfrom models.user import User\nfrom dataclasses import dataclass, field\nfrom send_email import send_email\n\n\n@dataclass(eq=False)\nclass Alert(Model):\n collection: str = field(init=False, default=\"alerts\")\n name : str\n item_id : str\n price_limit: float\n user_email: str\n _id: str = field(default_factory=lambda: uuid.uuid4().hex)\n\n def __post_init__(self):\n self.item = Item.get_by_id(self.item_id)\n self.user = User.find_by_email(self.user_email)\n\n\n\n def json(self) -> Dict:\n return {\n \"_id\":self._id,\n \"name\":self.name,\n \"price_limit\":self.price_limit,\n \"item_id\":self.item_id,\n \"user_email\": self.user_email\n }\n\n\n def load_item_price(self):\n self.item.load_price()\n return self.item.price\n\n def notify_if_price_reached(self, email):\n if self.item.price < self.price_limit:\n send_email(email, self.name, self.price_limit)\n\n","sub_path":"models/alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459998513","text":"import requests, json\n\nAPIURLS = {'BITFINEX': ['https://api.bitfinex.com/v1/book/%SYMBOL%', 'BTCUSD'],\n 'KRAKEN': ['https://api.kraken.com/0/public/Depth?pair=%SYMBOL%', 'XBTUSD'],\n 'GDAX': ['https://api.gdax.com/products/%SYMBOL%/book?level=2', 'BTC-USD'],\n 'HITBTC': ['https://api.hitbtc.com/api/2/public/orderbook/%SYMBOL%', 'BTCUSD'],\n 'POLONIEX': ['https://poloniex.com/public?command=returnOrderBook¤cyPair=%SYMBOL%&depth=50', 'USDT_BTC'],\n 'BITTREX': ['https://bittrex.com/api/v1.1/public/getorderbook?market=%SYMBOL%&type=both', 'USDT-BTC'],\n 'ITBIT': ['https://api.itbit.com/v1/markets/%SYMBOL%/order_book', 'XBTUSD'],\n 'UPHOLD': ['https://api.uphold.com/v0/ticker', 'BTCUSD'],\n 'BINANCE': ['https://api.binance.com/api/v1/depth?symbol=%SYMBOL%', 'BTCUSDT'],\n 'BITY': ['https://bity.com/api/v1/rate2/%SYMBOL%', 'BTCUSD']\n}\n\n\ndef get_bity():\n info = APIURLS['BITY']\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)\n return({'bid':float(r['rate_we_buy']), 'ask':float(r['rate_we_sell'])})\n\ndef get_uphold():\n info = APIURLS['UPHOLD']\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)\n t = pd.DataFrame.from_dict(r)\n t = t[t.pair==info[1]]\n return({'bid':float(t.bid), 'ask':float(t.ask)})\n\ndef get_bittrex():\n info = APIURLS['BITTREX']\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)['result']\n res = {}\n for key in ['buy','sell']:\n t = pd.DataFrame.from_dict(r[key])\n t['cs'] = (t.Quantity.astype(float)*t.Rate.astype(float)).cumsum()\n res[key] = t[t.cs > 100000].reset_index().Rate.astype(float)[0]\n return({'bid':res['buy'], 'ask':res['sell']})\n\ndef get_hitbtc():\n info = APIURLS['HITBTC']\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)\n res = {}\n for key in ['bid','ask']:\n t = pd.DataFrame.from_dict(r[key])\n t['cs'] = (t.size.astype(float)*t.price.astype(float)).cumsum()\n res[key] = t[t.cs > 100000].reset_index().price.astype(float)[0]\n return({'bid':res['bid'], 'ask':res['ask']})\n\ndef gdax_type(exch):\n info = APIURLS[exch] # 'GDAX', 'POLONIEX', 'ITBIT', 'BINANCE'\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)\n res = {}\n for key in ['bids','asks']:\n t = pd.DataFrame.from_dict(r[key])\n t['cs'] = (t[1].astype(float)*t[0].astype(float)).cumsum()\n res[key] = t[t.cs > 100000].reset_index()[0].astype(float)[0]\n return({'bid':res['bids'], 'ask':res['asks']})\n\ndef get_kraken(ticker_hm): \n info = APIURLS['KRAKEN']\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)['result'][ticker_hm]\n res = {}\n for key in ['bids','asks']:\n t = pd.DataFrame.from_dict(r[key])\n t['cs'] = (t[1].astype(float)*t[0].astype(float)).cumsum()\n res[key] = t[t.cs > 100000].reset_index()[0].astype(float)[0]\n return({'bid':res['bids'], 'ask':res['asks']})\n\ndef get_bitfinex():\n info = APIURLS['BITFINEX']\n r = json.loads(requests.get(info[0].replace('%SYMBOL%',info[1])).text)\n res = {}\n for key in ['bids','asks']:\n t = pd.DataFrame.from_dict(r[key])\n t['cs'] = (t.amount.astype(float)*t.price.astype(float)).cumsum()\n res[key] = t[t.cs > 100000].reset_index().price.astype(float)[0]\n return({'bid':res['bids'], 'ask':res['asks']})\n\nres = {'BITFINEX': get_bitfinex(),\n 'KRAKEN': get_kraken('XXBTZUSD'),\n 'GDAX': gdax_type('GDAX'),\n 'HITBTC': get_hitbtc(),\n 'POLONIEX': gdax_type('POLONIEX'),\n 'BITTREX': get_bittrex(),\n 'ITBIT': gdax_type('ITBIT'),\n 'UPHOLD': get_uphold(),\n 'BINANCE': gdax_type('BINANCE'),\n 'BITY': get_bity()}\n\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('/home/aslepnev/a/gigi.json', ['https://spreadsheets.google.com/feeds'])\ngc = gspread.authorize(credentials)\nwks = gc.open(\"brokerboard\")\nwks = wks.worksheet('board')\n\necells = wks.range('B6:B'+str(len(res)+5))\nbcells = wks.range('C6:C'+str(len(res)+5))\nocells = wks.range('D6:D'+str(len(res)+5))\n\ni = 0\nfor key in res.keys():\n ecells[i].value = key\n bcells[i].value = res[key]['bid']\n ocells[i].value = res[key]['ask']\n i = i+1 \n \nwks.update_cells(ecells)\nwks.update_cells(bcells)\nwks.update_cells(ocells)\n","sub_path":"exch.py","file_name":"exch.py","file_ext":"py","file_size_in_byte":4402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"260241245","text":"import logging\n\nimport constants\nfrom FoeQ import FoeQ\nfrom FriendQ import FriendQ\nfrom Graphing import Graphing\nfrom Player import Player\nfrom QLearning import Q_learning\nfrom SoccerGame import SoccerGame\n\nfrom CorrelatedQ import CorrelatedQ\n\nlogging.basicConfig(filename=\"debug.log\", level=constants.DEBUG_LEVEL)\n\n\nclass Menu():\n def __init__(self):\n pass\n\n def experiments(self):\n while True:\n print(\"\\n\"\n \"Welcome to SoccerGame!\")\n print(\"----------------------\")\n print(\"Make your choice: \")\n print(\"1 - Q-learning\")\n print(\"2 - Friend-Q\")\n print(\"3 - Foe-Q\")\n print(\"4 - Correlated-Q\")\n print(\"Q - Quit\")\n choice = input('>')\n\n # self.env = SoccerEnv()\n\n play_A = Player(name=constants.PLAYER_A_LETTER)\n play_B = Player(name=constants.PLAYER_B_LETTER)\n if choice == '1':\n env = SoccerGame(play_A, play_B)\n self.Q_learning = Q_learning(env,\n alphaMax=constants.QL_ALP_MAX,\n alphaMin=constants.QL_ALP_MIN,\n epsilonMax=constants.QL_EPS_MAX,\n epsilonMin=constants.QL_EPS_MIN,\n iterations=constants.ITERATIONS,\n gamma=constants.QL_GAMMA)\n err, iter = self.Q_learning.learn(play_A, play_B)\n graph = Graphing(constants.QL_NAME, 1, err, iter)\n graph.draw_graph()\n elif choice == '2':\n print('Experiment Friend-Q Go!!')\n\n env = SoccerGame(play_A, play_B)\n\n friendQ = FriendQ(env,\n alphaMax=constants.FRIEND_ALP_MAX,\n alphaMin=constants.FRIEND_ALP_MIN,\n iterations=constants.ITERATIONS,\n gamma=constants.FRIEND_GAMMA)\n err, iter = friendQ.learn(play_A, play_B)\n graph = Graphing(constants.FRIEND_NAME, 1 , err, iter)\n graph.draw_graph()\n elif choice == '3':\n print('Experiment Foe-Q Go!!')\n SOCCER_GAME = SoccerGame(play_A, play_B)\n foeQ = FoeQ(SOCCER_GAME,\n alphaMax=constants.FOE_ALP_MAX,\n alphaMin=constants.FOE_ALP_MIN,\n iterations=constants.ITERATIONS,\n gamma=constants.FOE_GAMMA)\n err, iter = foeQ.learn()\n graph = Graphing(constants.FOE_NAME, 1 , err, iter)\n graph.draw_graph()\n elif choice == '4':\n print('Experiment Correlated-Q Go!!')\n SOCCER_GAME = SoccerGame(play_A, play_B)\n correlatedQ = CorrelatedQ(SOCCER_GAME,\n alphaMax=constants.CQ_ALP_MAX,\n alphaMin=constants.CQ_ALP_MIN,\n iterations=constants.ITERATIONS,\n gamma=constants.CQ_GAMMA)\n err, iter = correlatedQ.learn()\n graph = Graphing(constants.CQ_NAME, 1, err, iter)\n graph.draw_graph()\n elif choice == 'Q' or choice == 'q':\n break\n else:\n print(\"Invalid choice\")\n\n logging.debug(\"All done!!\")\n\n\n\nif __name__ == '__main__':\n menu = Menu()\n menu.experiments()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520094847","text":"from pm4py import util as pmutil\r\nfrom pm4py.algo.conformance.tokenreplay.versions import token_replay\r\nfrom pm4py.objects.conversion.log import factory as log_converter\r\nfrom pm4py.util import xes_constants as xes_util\r\nfrom pm4py.objects.petri.exporter.versions import pnml as petri_exporter\r\nfrom pm4py.statistics.variants.log import get as variants_module\r\nimport multiprocessing as mp\r\nimport math\r\n\r\nTOKEN_REPLAY = \"token_replay\"\r\nVERSIONS = {TOKEN_REPLAY: token_replay.apply}\r\nVERSIONS_MULTIPROCESSING = {TOKEN_REPLAY: token_replay.apply_variants_list_petri_string_multiprocessing}\r\nVARIANTS_IDX = 'variants_idx'\r\n\r\n\r\ndef apply(log, net, initial_marking, final_marking, parameters=None, variant=TOKEN_REPLAY):\r\n \"\"\"\r\n Factory method to apply token-based replay\r\n \r\n Parameters\r\n -----------\r\n log\r\n Log\r\n net\r\n Petri net\r\n initial_marking\r\n Initial marking\r\n final_marking\r\n Final marking\r\n parameters\r\n Parameters of the algorithm, including:\r\n pm4py.util.constants.PARAMETER_CONSTANT_ACTIVITY_KEY -> Activity key\r\n\r\n variant\r\n Variant of the algorithm to use\r\n \"\"\"\r\n if parameters is None:\r\n parameters = {}\r\n if pmutil.constants.PARAMETER_CONSTANT_ACTIVITY_KEY not in parameters:\r\n parameters[pmutil.constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = xes_util.DEFAULT_NAME_KEY\r\n if pmutil.constants.PARAMETER_CONSTANT_TIMESTAMP_KEY not in parameters:\r\n parameters[pmutil.constants.PARAMETER_CONSTANT_TIMESTAMP_KEY] = xes_util.DEFAULT_TIMESTAMP_KEY\r\n if pmutil.constants.PARAMETER_CONSTANT_CASEID_KEY not in parameters:\r\n parameters[pmutil.constants.PARAMETER_CONSTANT_CASEID_KEY] = pmutil.constants.CASE_ATTRIBUTE_GLUE\r\n return VERSIONS[variant](log_converter.apply(log, parameters, log_converter.TO_EVENT_LOG), net, initial_marking,\r\n final_marking, parameters=parameters)\r\n\r\ndef chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]\r\n\r\ndef apply_multiprocessing(log, net, initial_marking, final_marking, parameters=None, variant=TOKEN_REPLAY):\r\n if parameters is None:\r\n parameters = {}\r\n if pmutil.constants.PARAMETER_CONSTANT_ACTIVITY_KEY not in parameters:\r\n parameters[pmutil.constants.PARAMETER_CONSTANT_ACTIVITY_KEY] = xes_util.DEFAULT_NAME_KEY\r\n if pmutil.constants.PARAMETER_CONSTANT_TIMESTAMP_KEY not in parameters:\r\n parameters[pmutil.constants.PARAMETER_CONSTANT_TIMESTAMP_KEY] = xes_util.DEFAULT_TIMESTAMP_KEY\r\n if pmutil.constants.PARAMETER_CONSTANT_CASEID_KEY not in parameters:\r\n parameters[pmutil.constants.PARAMETER_CONSTANT_CASEID_KEY] = pmutil.constants.CASE_ATTRIBUTE_GLUE\r\n\r\n variants_idxs = parameters[VARIANTS_IDX] if VARIANTS_IDX in parameters else None\r\n if variants_idxs is None:\r\n variants_idxs = variants_module.get_variants_from_log_trace_idx(log, parameters=parameters)\r\n variants_list = [[x, len(y)] for x, y in variants_idxs.items()]\r\n\r\n no_cores = mp.cpu_count()\r\n\r\n petri_net_string = petri_exporter.export_petri_as_string(net, initial_marking, final_marking)\r\n\r\n n = math.ceil(len(variants_list)/no_cores)\r\n\r\n variants_list_split = list(chunks(variants_list, n))\r\n\r\n # Define an output queue\r\n output = mp.Queue()\r\n\r\n processes = [mp.Process(target=VERSIONS_MULTIPROCESSING[variant](output, x, petri_net_string, parameters=parameters)) for x in variants_list_split]\r\n\r\n # Run processes\r\n for p in processes:\r\n p.start()\r\n\r\n results = []\r\n for p in processes:\r\n result = output.get()\r\n results.append(result)\r\n\r\n al_idx = {}\r\n for index, el in enumerate(variants_list_split):\r\n for index2, var_item in enumerate(el):\r\n variant = var_item[0]\r\n for trace_idx in variants_idxs[variant]:\r\n al_idx[trace_idx] = results[index][index2]\r\n\r\n replayed_cases = []\r\n for i in range(len(log)):\r\n replayed_cases.append(al_idx[i])\r\n\r\n return replayed_cases\r\n","sub_path":"seminar/venv/lib/python3.7/site-packages/pm4py/algo/conformance/tokenreplay/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"199291155","text":"#three_color_poster\n#21000348 AndreSeo\n\n\nfrom cs1media import *\nimg = load_picture(\"./martian.jpg\")\n\n\n#Original Image\nimg.show() \n\n\n#Converting\nw,h = img.size()\nfor y in range(h):\n for x in range(w):\n r, g, b=img.get(x,y)\n v=(r+g+b)/3.0\n if v > 130:\n img.set(x,y,(80,65,70))\n elif v < 65:\n img.set(x,y,(200,190,180))\n else:\n img.set(x,y,(180,135,100))\n\n\n#Three Color Poster\nimg.show()\n","sub_path":"Homework #03/three_color_poster.py","file_name":"three_color_poster.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505254825","text":"\"\"\"\n문제\n수열 S가 어떤 수 Sk를 기준으로 S1 < S2 < ... Sk-1 < Sk > Sk+1 > ... SN-1 > SN을 만족한다면, 그 수열을 바이토닉 수열이라고 한다.\n\n예를 들어, {10, 20, 30, 25, 20}과 {10, 20, 30, 40}, {50, 40, 25, 10} 은 바이토닉 수열이지만, {1, 2, 3, 2, 1, 2, 3, 2, 1}과 {10, 20, 30, 40, 20, 30} 은 바이토닉 수열이 아니다.\n\n수열 A가 주어졌을 때, 그 수열의 부분 수열 중 바이토닉 수열이면서 가장 긴 수열의 길이를 구하는 프로그램을 작성하시오.\n\n입력\n첫째 줄에 수열 A의 크기 N이 주어지고, 둘째 줄에는 수열 A를 이루고 있는 Ai가 주어진다. (1 ≤ N ≤ 1,000, 1 ≤ Ai ≤ 1,000)\n\n출력\n첫째 줄에 수열 A의 부분 수열 중에서 가장 긴 바이토닉 수열의 길이를 출력한다.\n\n예제 입력 1\n10\n1 5 2 1 4 3 4 5 2 1\n예제 출력 1\n7\n\"\"\"\nimport sys\nsys.stdin = open('input.txt')\n\na = int(input())\narr = list(map(int,input().split()))\n\ndp = [0]*a\ndp_1 = [0]*a\nfor i in range(a):\n dp[i] = 1\n for j in range(i):\n if arr[j] < arr[i] and dp[j]+1 > dp[i]:\n dp[i] = dp[i]+1\n\narr.reverse()\n\nfor i in range(a):\n dp_1[i] = 1\n for j in range(i):\n if arr[j] < arr[i] and dp_1[j] + 1 > dp_1[i]:\n dp_1[i] = dp_1[i] + 1\ndp_1.reverse()\ndp_2 = [0]*a\nfor i in range(a):\n dp_2[i] = dp[i] + dp_1[i]\nprint(max(dp_2)-1)\n\n\n\n\n\"\"\"arr_2 = arr[i:]\n dp_2 = [0] * len(arr_2)\n for k in range(len(arr_2)):\n dp_2[k] = 1\n for n in range(k):\n if arr_2[n] > arr_2[k] and dp_2[n] + 1 > dp_2[k]:\n dp_2[k] = dp_2[k] + 1\n\"\"\"","sub_path":"python daily coding/2020.5.30 (동적 계획법)/11054번 (가장 긴 바이토닉 부분 수열).py","file_name":"11054번 (가장 긴 바이토닉 부분 수열).py","file_ext":"py","file_size_in_byte":1629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186133743","text":"def main():\n argument_spec = ec2_argument_spec()\n argument_spec.update(dict(state=dict(default='present', choices=['present', 'absent']), name=dict(required=True, type='str'), hash_key_name=dict(required=True, type='str'), hash_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), range_key_name=dict(type='str'), range_key_type=dict(default='STRING', type='str', choices=['STRING', 'NUMBER', 'BINARY']), read_capacity=dict(default=1, type='int'), write_capacity=dict(default=1, type='int'), indexes=dict(default=[], type='list'), tags=dict(type='dict'), wait_for_active_timeout=dict(default=60, type='int')))\n module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)\n if (not HAS_BOTO):\n module.fail_json(msg='boto required for this module')\n if ((not HAS_BOTO3) and module.params.get('tags')):\n module.fail_json(msg='boto3 required when using tags for this module')\n (region, ec2_url, aws_connect_params) = get_aws_connection_info(module)\n if (not region):\n module.fail_json(msg='region must be specified')\n try:\n connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)\n except (NoAuthHandlerFound, AnsibleAWSError) as e:\n module.fail_json(msg=str(e))\n if module.params.get('tags'):\n try:\n (region, ec2_url, aws_connect_kwargs) = get_aws_connection_info(module, boto3=True)\n boto3_dynamodb = boto3_conn(module, conn_type='client', resource='dynamodb', region=region, endpoint=ec2_url, **aws_connect_kwargs)\n if (not hasattr(boto3_dynamodb, 'tag_resource')):\n module.fail_json(msg='boto3 connection does not have tag_resource(), likely due to using an old version')\n boto3_sts = boto3_conn(module, conn_type='client', resource='sts', region=region, endpoint=ec2_url, **aws_connect_kwargs)\n except botocore.exceptions.NoCredentialsError as e:\n module.fail_json(msg='cannot connect to AWS', exception=traceback.format_exc(e))\n else:\n boto3_dynamodb = None\n boto3_sts = None\n state = module.params.get('state')\n if (state == 'present'):\n create_or_update_dynamo_table(connection, module, boto3_dynamodb, boto3_sts)\n elif (state == 'absent'):\n delete_dynamo_table(connection, module)","sub_path":"Data Set/bug-fixing-5/3ac92735077284394d336e1e72b653bede75f119-
-bug.py","file_name":"3ac92735077284394d336e1e72b653bede75f119-
-bug.py","file_ext":"py","file_size_in_byte":2340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474125851","text":"import shutil\n\nfrom custom_json import *\n\n\ndef extract_minidataset(src_path, dst_path, num_folder: int, split_ratio=None):\n if os.path.exists(dst_path):\n shutil.rmtree(dst_path)\n dst_path = os.path.join(dst_path, 'videos')\n src_list = os.listdir(src_path)\n _, dst_list = train_test_split(src_list, test_size=int(num_folder))\n for vid in dst_list:\n video_src_path = os.path.join(src_path, vid)\n file_src_path = [video_src_path + '/' + name for name in os.listdir(video_src_path)]\n if split_ratio is not None:\n _, file_src_path = train_test_split(file_src_path, test_size=split_ratio)\n video_dst_path = os.path.join(dst_path, vid)\n\n os.makedirs(video_dst_path)\n for file in file_src_path:\n shutil.copy(file, video_dst_path)\n\n\nif __name__ == '__main__':\n src = sys.argv[1]\n dst = sys.argv[2]\n extract_minidataset(src, dst, 50)\n process(dst, 0.2, 0.1)","sub_path":"utils/extract_mini_dataset.py","file_name":"extract_mini_dataset.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"502360234","text":"from appium.webdriver.common.mobileby import MobileBy\nfrom appium.webdriver.webdriver import WebDriver\nfrom selenium.common.exceptions import NoSuchElementException\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass BasePage:\n def __init__(self, driver: WebDriver = None):\n # 初始化driver\n self.driver = driver\n\n def find(self, by, value):\n logging.info(by)\n logging.info(value)\n # 查找元素\n return self.driver.find_element(by, value)\n\n def finds(self,by,value):\n logging.info(by)\n logging.info(value)\n return self.driver.find_elements(by, value)\n\n\n def swipe_find(self, text, num=5):\n # num : 默认查找次数\n # 进入滑动查找,改变隐式等待时长,提高查找速度\n self.driver.implicitly_wait(1)\n\n # 滑动查找,通过外部传递的num次数,决定查找次数\n for i in range(0, num):\n try:\n element = self.driver.find_element(MobileBy.XPATH, f\"//*[@text='{text}']\")\n self.driver.implicitly_wait(5)\n # 如果找到了这个元素,则返回\n return element\n except NoSuchElementException:\n print(\"未找到,滑动\")\n # 滑动一页,继续查找\n size = self.driver.get_window_size()\n # self.driver.get_window_rect()\n width = size['width']\n height = size['height']\n # 'width', 'height'\n start_x = width / 2\n start_y = height * 0.9\n\n end_x = start_x\n end_y = height * 0.2\n\n duration = 2000 # ms\n # 完成滑动操作\n self.driver.swipe(start_x, start_y, end_x, end_y, duration)\n if i == num - 1:\n # 如果达到 num-1次没有找到,则抛出这个异常\n self.driver.implicitly_wait(5)\n raise NoSuchElementException(f\"找了{i}次,未找到\")\n","sub_path":"appium_project/demo_test0424/page/base_page.py","file_name":"base_page.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33177088","text":"import sqlite3 \r\nfrom sqlite3 import Error \r\n\r\ndef sql_connection():\r\n try:\r\n conn = sqlite3.connect('mydatabase.db')\r\n return conn\r\n except Error:\r\n print(Error) \r\n\r\ndef sql_table(conn):\r\n cursorObj = conn.cursor()\r\n# Create the table\r\n cursorObj.execute(\"CREATE TABLE salesman_6(salesman_id n(5), name char(30), city char(35), commission decimal(7,2));\")\r\n# Insert records\r\n cursorObj.executescript(\"\"\"\r\n INSERT INTO salesman_6 VALUES(5001,'James Hoog', 'New York', 0.15);\r\n INSERT INTO salesman_6 VALUES(5002,'Nail Knite', 'Paris', 0.25);\r\n INSERT INTO salesman_6 VALUES(5003,'Pit Alex', 'London', 0.15);\r\n INSERT INTO salesman_6 VALUES(5004,'Mc Lyon', 'Paris', 0.35);\r\n INSERT INTO salesman_6 VALUES(5005,'Paul Adam', 'Rome', 0.45);\r\n \"\"\") \r\n#Muestreo antes del UPDATE\r\n cursorObj.execute(\"SELECT * FROM salesman_6\")\r\n rows = cursorObj.fetchall()\r\n print(\"Agent details:\")\r\n for row in rows:\r\n print(row)\r\n\r\n#UPDATE\r\n print(\"\\nUpdate commission .15 to .45 where id is 5003:\")\r\n sql_update_query = \"\"\"Update salesman_6 set commission = .45 where salesman_id = 5003\"\"\"\r\n cursorObj.execute(sql_update_query)\r\n conn.commit()\r\n\r\n#Muestreo después del UPDATE\r\n print(\"Record Updated successfully \") \r\n cursorObj.execute(\"SELECT * FROM salesman_6\")\r\n rows = cursorObj.fetchall()\r\n print(\"\\nAfter updating Agent details:\")\r\n for row in rows:\r\n print(row)\r\n\r\nsqllite_conn = sql_connection()\r\nsql_table(sqllite_conn)\r\nif (sqllite_conn):\r\n sqllite_conn.close()\r\n print(\"\\nThe SQLite connection is closed.\")","sub_path":"Python_Databse/10.Update_row_Before-After.py","file_name":"10.Update_row_Before-After.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"33744000","text":"import numpy as np\nimport cv2 as cv\nimport random\nfrom otsu import OtsuFastMultithreshold\nfrom sklearn.cluster import KMeans\nfrom sklearn.metrics import silhouette_samples, silhouette_score\n\nfor name_idx in range(1, 22):\n\tname = str(name_idx)\n\timg = cv.imread('Images/new/'+name+'.jpg')\n\timage = cv.resize(img,(0,0),fx=0.2,fy=0.2)\n\thsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\timage = cv.resize(hsv,(0,0),fx=0.2,fy=0.2)\n\treshaped = image.reshape(image.shape[0] * image.shape[1], image.shape[2])\n\n\tsilhouette = []\n\n\tfor k in range(2,6):\n\t\tkmeans = KMeans(n_clusters=k, n_init=40, max_iter=500).fit(reshaped)\n\t\tsilhouette.append(silhouette_score(reshaped, kmeans.labels_))\n\n\t# optimal k\n\tk = silhouette.index(max(silhouette)) + 1\n\tprint(\"k = \", k)\n\n\t# k = 1\n\n\tb, g, r = img[:, :, 0], img[:, :, 1], img[:, :, 2]\n\tgray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\n\thsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)\n\th,s,v = img[:,:,0], img[:,:,1], img[:,:,2]\n\t\n\totsu = OtsuFastMultithreshold()\n\totsu.load_image(h)\n\tkThresholds = otsu.calculate_k_thresholds(k)\n\n\tcrushed = otsu.apply_thresholds_to_image(kThresholds)\n\t# cv.imshow(\"crushed\", crushed)\n\t# cv.waitKey(0)\n\t# exit(0)\n\n\t# otsu = OtsuFastMultithreshold()\n\t# otsu.load_image(b)\n\t# kThresholds = otsu.calculate_k_thresholds(k)\n\t# thresholds = np.array(kThresholds)\n\t# # print thresholds\n\t# otsu = OtsuFastMultithreshold()\n\t# otsu.load_image(g)\n\t# thresholds += otsu.calculate_k_thresholds(k)\n\t# # print thresholds\n\t# otsu = OtsuFastMultithreshold()\n\t# otsu.load_image(r)\n\n\t# # temp = otsu.calculate_k_thresholds(k)\n\t# thresholds += otsu.calculate_k_thresholds(k)\n\t# # print thresholds\n\n\t# for i in range(0, thresholds.shape[0]):\n\t# \tthresholds[i] /= 3\n\n\t# kThresholds = thresholds \n\tprint(kThresholds)\n\tnum_segments = len(kThresholds) + 1\n\tsegments = np.zeros(shape = [num_segments, gray.shape[0], gray.shape[1]], dtype = np.uint8)\n\n\tfor k in range(0,num_segments):\n\t\tif(k == 0):\n\t\t\tsegments[k][gray < kThresholds[0]] = 255\n\t\telif(k == num_segments - 1):\n\t\t\tsegments[k][gray >= kThresholds[k - 1]] = 255\n\t\telse:\n\t\t\tsegments[k][gray >= kThresholds[k - 1]] = 255\n\t\t\tsegments[k][gray >= kThresholds[k]] = 0\n\n\tunknown = np.zeros(gray.shape)\n\tsure_fg_gl = np.zeros(gray.shape)\n\n\tfor k in range(0, num_segments-1):\n\t\tkernel = np.ones((3, 3), np.uint8)\n\t\topening = cv.morphologyEx(segments[k],cv.MORPH_OPEN,kernel, iterations = 2)\n\t\t# opening = segments[k]\n\t\tsure_bg = cv.dilate(opening, kernel, iterations = 2)\n\t\tdist_transform = cv.distanceTransform(opening,cv.DIST_L2,5)\n\t\tret, sure_fg = cv.threshold(dist_transform,0.1*dist_transform.max(),255,0)\n\t\t# sure_fg = cv.erode(opening, kernel, iterations = 3)\n\t\tsure_fg = np.uint8(sure_fg)\n\t\tunknown += cv.subtract(sure_bg, sure_fg)\n\t\t# cv.imshow('result', sure_fg)\n\t\t# cv.waitKey(0)\n\t\tsure_fg_gl += sure_fg\n\n\n\n\t# markers = np.ones(gray.shape)\n\t# sure_fg_gl = cv.erode(sure_fg_gl, kernel, iterations = 3)\n\t# cv.imshow('sfg', sure_fg_gl)\n\t# cv.waitKey(0)\n\t# cv.erode(unknown, kernel, iterations = 1)\n\n\tsure_fg_gl = np.uint8(sure_fg_gl)\n\tret, markers = cv.connectedComponents(sure_fg_gl)\n\t# markers = markers*255/(markers.max() - markers.min())\n\t# cv.imshow('markers', unknown)\n\t# cv.waitKey(0)\n\tmarkers = markers + 1\n\tmarkers[unknown == 255] = 0;\n\n\tmarkers = cv.watershed(img, markers)\n\tresult = np.zeros(gray.shape)\n\t# result[markers == -1] = 255\n\t# result = img\n\t# result[unknown== 255] = (255,0,0)\n\tresult[markers == -1] = 255\n\t# cv.imshow('result', result)\n\t# cv.waitKey(0)\n\tcv.imwrite('Images/output/'+ name + '_'+str(k)+'.jpg', result)","sub_path":"Test/watershed.py","file_name":"watershed.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332368188","text":"#!/usr/bin/env python\n\"\"\"\n@author Mike Smith\n@email michaesm@marine.rutgers.edu\n@purpose Parse CODAR radial files utilizing the Radial subclass and run class defined quality control (QC) methods\n\"\"\"\n\nimport logging\nimport os\nimport sys\nimport glob\nimport datetime as dt\nfrom hfradar.src.radials import Radial\n\n# Set up the parse_wave_files logger\nlogger = logging.getLogger(__name__)\nlog_level = 'INFO'\nlog_format = '%(module)s:%(levelname)s:%(message)s [line %(lineno)d]'\nlogging.basicConfig(stream=sys.stdout, format=log_format, level=log_level)\n\n\ndef main(radial_file, save_path, qc_values, export_type='radial'):\n \"\"\"\n Main function to parse and qc radial files\n :param radial_file: Path to radial file\n :param save_path: Path to save quality controlled radial file\n :param qc_values: Dictionary containing thresholds for each QC test\n \"\"\"\n try:\n r = Radial(radial_file, mask_over_land=False)\n except Exception as err:\n logging.error('{} - {}'.format(radial_file, err))\n return\n\n if r.is_valid():\n t0 = r.time - dt.timedelta(hours=1)\n previous_radial = '{}_{}'.format('_'.join(r.file_name.split('_')[:2]), t0.strftime('%Y_%m_%d_%H00.ruv'))\n previous_full_file = os.path.join(os.path.dirname(r.full_file), previous_radial)\n qc_keys = qc_values.keys()\n\n # run high frequency radar qartod tests on open radial file\n r.initialize_qc()\n r.qc_qartod_syntax()\n\n if 'qc_qartod_maximum_velocity' in qc_keys:\n r.qc_qartod_maximum_velocity(**qc_values['qc_qartod_maximum_velocity'])\n\n r.qc_qartod_valid_location()\n\n if 'qc_qartod_radial_count' in qc_keys:\n r.qc_qartod_radial_count(**qc_values['qc_qartod_radial_count'])\n\n if 'qc_qartod_spatial_median' in qc_keys:\n r.qc_qartod_spatial_median(**qc_values['qc_qartod_spatial_median'])\n\n if 'qc_qartod_temporal_gradient' in qc_keys:\n r.qc_qartod_temporal_gradient(previous_full_file)\n\n if 'qc_qartod_avg_radial_bearing' in qc_keys:\n r.qc_qartod_avg_radial_bearing(**qc_values['qc_qartod_avg_radial_bearing'])\n\n if 'qc_qartod_primary_flag' in qc_keys:\n r.qc_qartod_primary_flag(**qc_values['qc_qartod_primary_flag'])\n\n # Export radial file to either a radial or netcdf\n try:\n r.export(os.path.join(save_path, r.file_name), export_type)\n except ValueError as err:\n logging.error('{} - {}'.format(radial_file, err))\n pass\n\n\nif __name__ == '__main__':\n radial_path = '../../data/radials/ruv/SEAB/'\n radials = glob.glob(os.path.join(radial_path, '*.ruv'))\n save_path = '../../data/radials_qc/ruv/SEAB/'\n export_type = 'radial'\n\n qc_values = dict(\n qc_qartod_avg_radial_bearing=dict(reference_bearing=151, warning_threshold=15, failure_threshold=30),\n qc_qartod_radial_count=dict(radial_min_count=75.0, radial_low_count=225.0),\n qc_qartod_maximum_velocity=dict(radial_max_speed=300.0, radial_high_speed=100.0),\n qc_qartod_spatial_median=dict(radial_smed_range_cell_limit=2.1, radial_smed_angular_limit=10,\n radial_smed_current_difference=30),\n qc_qartod_temporal_gradient=dict(gradient_temp_fail=32, gradient_temp_warn=25),\n qc_qartod_primary_flag=dict(include=['qc_qartod_syntax', 'qc_qartod_valid_location', 'qc_qartod_radial_count',\n 'qc_qartod_maximum_velocity', 'qc_qartod_spatial_median'])\n )\n\n radials = glob.glob(os.path.join(radial_path, '*.ruv'))\n\n for radial in sorted(radials):\n main(radial, save_path, qc_values, export_type)\n","sub_path":"hfradar/methods/radials/qc_radial_file.py","file_name":"qc_radial_file.py","file_ext":"py","file_size_in_byte":3694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"226006393","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[6]:\n\n\nresult = 1\nfor i in range(1,11): #range(1,11)은 1부터 10번까지의 숫자를 의미하므로 for문을 이용하여 1부터 10을 하나씩 i에 대입한다.\n result=result*i # i에 10까지의 숫자가 계속 대입되면서 1부터 10까지의 숫자가 모두 곱해진 결과가 나온다.\nprint(result)\n\n","sub_path":"ex170.py","file_name":"ex170.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442949482","text":"import numpy as np\npath = \"../../../Projects/MS Projects/ML/trec8/data.csv\"\npath2 = \"../../../Projects/MS Projects/ML/trec8/tfidf_feats.csv\"\npath3 = \"../../../Projects/MS Projects/ML/trec8/esbuiltin.csv\"\nqrel_root = \"../../../Projects/MS Projects/ML/trec8/qrel.trec8\"\ndata = np.genfromtxt(path, delimiter=\",\")\ndata2 = np.genfromtxt(path2, delimiter=\",\")\ndata3 = np.genfromtxt(path3, delimiter=\",\")\nqrel = np.genfromtxt(qrel_root,dtype=str, delimiter=\" \")\nX = data[:,:-7]\nX = np.concatenate((X, data2[:,:-1]), axis=1)\nX = np.concatenate((X, data3[:,:-1]), axis=1)\n# X = MinMaxScaler().fit_transform(X)\ny = data[:,-1].astype(int)\nquery_IDs = qrel[:,0].astype(int)\nquerys = np.arange(401,451)\noutput = open(\"../../../Projects/MS Projects/ML/trec8/ranklibdata.txt\", \"w\")\nfor i in range(X.shape[0]):\n output.write(str(y[i]))\n output.write(\" qid:\"+str(query_IDs[i])+\" \")\n output.write(\" \".join([\"{:}:{:}\".format(j, Xi) for j, Xi in enumerate(X[i])]))\n output.write(\" # info\\n\")\noutput.close()\n","sub_path":"project/ranklib_formatter.py","file_name":"ranklib_formatter.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35526265","text":"import os\n\nimport PySimpleGUI as sg\nfrom utils import Plasmainp, UnitConversionKey, Units\n\n\nclass Loader:\n def __init__(self):\n self.applyers = {} # lambda Plasmainp, Units: value\n self.exceptors = {} # lambda Plasmainp, Units: bool\n\n def add_applyer(self, key, applyer, exceptor=None):\n self.applyers[key] = applyer\n\n if exceptor is None:\n def exceptor(inp, unit): return True\n self.exceptors[key] = exceptor\n\n def apply(self, inp, convkey, window):\n unit = Units(convkey.dx, convkey.to_c)\n for key, applyer in self.applyers.items():\n if not self.exceptors[key](inp, unit):\n continue\n try:\n value = applyer(inp, unit)\n except KeyError:\n continue\n window[key].Update(value=value)\n\n def load(self, filename, window):\n if filename is None or not os.path.exists(filename):\n return None\n\n convkey = UnitConversionKey.load(filename)\n if convkey is None:\n dx = sg.PopupGetText('このパラメータファイルに用いたグリッド幅[m]を入力してください')\n try:\n dx = float(dx)\n except:\n return None\n\n to_c = sg.PopupGetText('このパラメータファイルに用いたEMSES単位系での光速の値を入力してください')\n try:\n to_c = float(to_c)\n except:\n return None\n\n convkey = UnitConversionKey(dx, to_c)\n\n inp = Plasmainp(filename)\n self.apply(inp, convkey, window)\n\n window['basefile'].Update('Base file: {}'.format(filename))\n\n return inp\n\n\ndef create_default_loader(use_physical_dt=False):\n loader = Loader()\n\n loader.add_applyer('use_em', lambda i, u: i['emflag'] == 1)\n loader.add_applyer('use_pe', lambda i, u: i['nspec'] == 3)\n\n loader.add_applyer('dx', lambda i, u: u.dx)\n loader.add_applyer('em_c', lambda i, u: u.to_c)\n\n if use_physical_dt:\n loader.add_applyer('dt', lambda i, u: u.t.reverse(i['dt']))\n else:\n loader.add_applyer('dt', lambda i, u: i['dt'])\n\n loader.add_applyer('nx', lambda i, u: int(i['nx']))\n loader.add_applyer('ny', lambda i, u: int(i['ny']))\n loader.add_applyer('nz', lambda i, u: int(i['nz']))\n loader.add_applyer('nstep', lambda i, u: int(i['nstep']))\n\n loader.add_applyer('jobnum', lambda i, u: ' '.join(\n list(map(str, i['jobnum']))))\n loader.add_applyer('nodesx', lambda i, u: i['nodes'][0])\n loader.add_applyer('nodesy', lambda i, u: i['nodes'][1])\n loader.add_applyer('nodesz', lambda i, u: i['nodes'][2])\n\n return loader\n","sub_path":"src/default/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272597789","text":"#!/usr/bin/python3\n\"\"\"\n\tconvert extracted runDMD image file to a series of .gif files\n\tusage:\n\t\tdmd2gif.py ./dmd.img ./outFolder\n\"\"\"\nimport sys\nfrom numpy import array, zeros, uint8\nfrom PIL import Image\n\nif len(sys.argv) != 3:\n\tprint(__doc__)\n\tsys.exit()\n\nfName = sys.argv[1]\noutFolder = sys.argv[2]\n\ndef getFrame( buf, xSize=128, ySize=32, frameOffset=0, byteOffset=None ):\n \"\"\" returns unpacked image data, 1 byte per pixel \"\"\"\n if byteOffset is None:\n byteOffset = xSize*ySize*frameOffset\n rawDat = buf[byteOffset:byteOffset+ySize*xSize//2]\n # unpack 2 pixels / byte to 1 pixel / byte\n a = zeros(xSize*ySize, dtype=uint8)\n a[1::2] = rawDat & 0x0F\n a[0::2] = rawDat >> 4\n return a.reshape((ySize,xSize))\n\ndef isFooter( dat ):\n \"\"\" returns >= 0 if a footer frame of size 0x200 bytes is detected \"\"\"\n # we search for this sequence: xx 01 xx 02 xx 03 xx 04 xx\n tag = b\"\\x01\\x02\\x03\\x04\"\n a = b\"\".join( dat.flatten()[:frameSize][0::2] )\n b = b\"\".join( dat.flatten()[:frameSize][1::2] )\n ind = a.find( tag )\n if ind >= 0:\n return 2*ind\n ind = b.find( tag )\n if ind >= 0:\n return 2*ind+1\n return ind\n\ndef getNextFname():\n \"\"\" get next file name from the header table \"\"\"\n global fNameOffset\n fName = b\"\".join( rawDat[fNameOffset:fNameOffset+32] ).replace(b\"\\x00\",b\"\").decode(\"ascii\")\n fNameOffset += 0x200\n return fName\n\nwith open(fName,\"rb\") as f:\n rawDat = array( bytearray(f.read()), dtype=uint8 )\n\nxSize = 128\nySize = 32\nframeSize = xSize * ySize // 2 # each data byte holds two 4 bit pixels\noffset = 0x12C400 # Start of first animation\nfNameOffset = 0x00C814 # Start of first filename\nloopOn = {\n \"24_002\": 5,\n \"THE_CHAMPION_PUB_002\": 5,\n \"THE_CHAMPION_PUB_003\": 5,\n \"THE_CHAMPION_PUB_004\": 5,\n \"THE_CHAMPION_PUB_007\": 3,\n \"THE_CHAMPION_PUB_015\": 3,\n \"THE_CHAMPION_PUB_018\": 5,\n \"THE_CHAMPION_PUB_025\": 5,\n \"THE_CHAMPION_PUB_032\": 2,\n \"THE_CHAMPION_PUB_042\": 3,\n \"THE_CHAMPION_PUB_044\": 5,\n \"THE_CHAMPION_PUB_045\": 5,\n \"THE_CHAMPION_PUB_046\": 5,\n \"THE_CHAMPION_PUB_049\": 5,\n \"THE_CHAMPION_PUB_050\": 7,\n \"THE_CHAMPION_PUB_067\": 5,\n \"THE_CHAMPION_PUB_068\": 5,\n}\nimlist = []\naniStartOffset = offset\naniLengthBytes = 0\nwith open(\"offsets.txt\",\"w\") as f:\n while(1):\n aniDat = rawDat[offset:offset+frameSize]\n if len(aniDat) < frameSize:\n print(\"done\")\n break\n fOffs = isFooter( aniDat )\n if fOffs >= 0:\n # end of current file, save it\n fName = getNextFname()\n fullFName = \"./gif/{}.gif\".format( fName )\n print( hex(offset), fullFName )\n f.write(\"0x{:08x},0x{:08x}\\n\".format(aniStartOffset, aniLengthBytes) )\n if fName in loopOn:\n for im in imlist:\n im.info[\"loop\"] = loopOn[fName]\n imlist[0].save( fullFName, \"GIF\", optimize=False, save_all=True, append_images=imlist[1:] )\n #start a new file\n imlist = []\n offset += fOffs + 0x200\n aniStartOffset = offset\n aniLengthBytes = 0\n else:\n imgData = getFrame(aniDat, xSize, ySize) * 16\n img = Image.fromarray(imgData, mode=\"L\")\n img.info[\"background\"] = 0x00*16\n img.info[\"transparency\"] = 0x0A*16\n img.info[\"duration\"] = 1/10 * 1000\n imlist.append( img )\n aniLengthBytes += frameSize\n offset += frameSize ","sub_path":"dev/dmd2gif.py","file_name":"dmd2gif.py","file_ext":"py","file_size_in_byte":3548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368149487","text":"'''Saves a WikiSql jsonl file of data, after data cleaning, into an sqlite database.\nNeeds a hard-coded path for the jsonl file.'''\nimport json\nimport sqlite3\nimport re\nfrom sqlite3 import Error\ndef create_connection(db_file):\n \"\"\" create a database connection to a SQLite database \"\"\"\n connection = None\n try:\n connection = sqlite3.connect(db_file)\n print(sqlite3.version)\n except Error as err:\n print(err)\n return connection\n\ndef create_table(connection, create_table_sql):\n \"\"\" create a table from the create_table_sql statement\n :param conn: Connection object\n :param create_table_sql: a CREATE TABLE statement\n :return:\n \"\"\"\n try:\n cursor = connection.cursor()\n cursor.execute(create_table_sql)\n except Error as err:\n print(err)\n\ndef clean_string(string):\n '''Cleans a string that represents a table or column name. Returns cleaned string.'''\n #bad characters for an identifier\n result = string.replace(\" \",\"_\").replace(\".\",\"_\").replace(\"#\",\"Nr\").replace(\"@\",\"_\")\n result = result.replace(\"&\",\"_\").replace(\"%\",\"_\").replace(\"$\",\"dolars_\").replace(\")\",\"_\")\n result = result.replace(\"-\",\"_\").replace(\"/\",\"_\").replace(\"?\",\"_\").replace(\":\",\"_\")\n result = result.replace(\"+\",\"_\").replace(\";\",\"_\").replace(\"'\",\"_\").replace(\">\",\"_\")\n result = result.replace(\"\\\"\",\"_\").replace(\"]\",\"_\").replace(\"[\",\"_\").replace(\"!\",\"\")\n result = result.replace(\"\\\\\",\"_\").replace(\"~\",\"approx_\").replace(\"^\",\"_\").replace(\"{\",\"_\")\n result = result.replace(\"’\",\"_\").replace(\"}\",\"_\").replace(\"′^\",\"_\").replace(\"=\",\"_\")\n result = result.replace(\",\",\"_\").replace(\"(\",\"_\").replace(\"*\",\"_\")\n reserved_words = {\"from\" : \"source\", \"group\" : \"team\", \"to\" : \"target\",#reserved words\n \"order\" : \"ranking\", \"index\" : \"ind\", \"as\" : \"A_s\", \"drop\" : \"Drops\",\n \"where\" : \"location\", \"table\" : \"tab\", \"total\" : \"tot\"}\n if result.lower() in reserved_words:\n return reserved_words[result.lower()]\n if result.isnumeric():\n if int(result)>1500:\n result = \"year_\"+result #if it looks like a year, we append that\n else:\n result = \"points_\"+result\n #if it starts with a digit we make it start with an underscore\n if len(result)>0 and result[0].isdigit():\n result = \"_\"+result\n #we reduce to a size of one, all sequences of underscores after our replacements\n result = re.sub('_+', '_', result)\n return result\n\ndef main():\n '''core data cleaning and database generation/filling'''\n conn = create_connection(r\"/home/campero/Desktop/wikisql.db\")\n data = []\n with open('train.tables.jsonl') as input_file:\n for line in input_file:\n data.append(json.loads(line))\n all_tables = 0\n tables_inserted=set()\n for item in data:\n all_tables+=1\n if \"page_title\" in item:\n table_name = clean_string(item[\"page_title\"])\n version_number = 1\n temp_str = table_name\n while temp_str in tables_inserted:\n temp_str = table_name+\"_\"+str(version_number)\n version_number+=1\n if version_number>1:\n table_name+=\"_\"+str(version_number-1)\n tables_inserted.add(table_name)\n sql_create_table = \"\"\" CREATE TABLE IF NOT EXISTS \"\"\"+table_name+\"\"\" ( \"\"\"\n columns = set()\n temp_str = clean_string(item[\"header\"][0])\n columns.add(temp_str.lower())\n sql_create_table+=\" \"+temp_str+\" \"+item[\"types\"][0]+\" \"\n for i in range(len(item[\"header\"])-1):\n temp_str = clean_string(item[\"header\"][i+1])\n if temp_str.lower() in columns:\n version_number = 1\n while temp_str.lower()+\"_\"+str(version_number) in columns:\n version_number+=1\n temp_str += \"_\"+str(version_number)\n columns.add(temp_str.lower())\n sql_create_table+=\", \"+temp_str+\" \"+item[\"types\"][i+1]\n sql_create_table+=\"\"+\"\"\" ); \"\"\"\n #print(sql_create_table)\n create_table(conn,sql_create_table)\n conn.commit()\n records = [tuple(row) for row in item[\"rows\"]]\n try:\n temp_str = \",\".join([\"?\" for row in item[\"header\"]])\n conn.executemany(\"INSERT INTO \"+table_name+\" VALUES(\"+temp_str+\");\",records)\n conn.commit()\n print(\"{}: Created-{}\".format(str(all_tables), table_name))\n except Error as err:\n print(err)\n #print(records)\n #print(item[\"rows\"])\n #print(sql_create_table)\n print(\"{}: Error-{}\".format(str(all_tables),table_name))\n else:\n print(\"{}: Skipped (no page title/table name)\".format(str(all_tables)))\n print(\"Created {}/{} tables.\".format(str(len(tables_inserted)),str(all_tables)))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"python_app/wikisql_jsonl_to_sqlite_loader.py","file_name":"wikisql_jsonl_to_sqlite_loader.py","file_ext":"py","file_size_in_byte":4988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"565887495","text":"from sys import stdin\ninput = stdin.readline\n\nN, K = map(int, input().split())\narr = [int(input()) for i in range(N)]\nfor i in range(N):\n if arr[i] > K:\n here = i\n break\nelse:\n here = N-1\n\ncnt = 0\nwhile K:\n cnt += K // arr[here]\n K -= arr[here] * (K//arr[here])\n here -= 1\nprint(cnt)","sub_path":"201228/bj_11047.py","file_name":"bj_11047.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613211315","text":"import cv2\nimport numpy as np\nimport math\n# Read the image\nimg = cv2.imread('pugnew.jpg',1)\nheight_ = 600\nwidth_ = 600\nimg = cv2.resize(img,(width_,height_))\nheight, width = img.shape[:2]\ncv2.imshow('Imagen Original', img)\n\nA = np.array([[2.0,0.0],\n [0.0,2.0]])\nB = np.array([0.0,0.0]) \n\n# Imagen escalada sin solve\nnewImg = np.array(np.zeros((height_*2)*(width_*2)*3).reshape((img.shape[0]*2,img.shape[1]*2,img.shape[2])));\nnewImg = np.array(newImg, dtype=np.uint8)\n\nfor i in range(height):\n for j in range(width):\n position = np.array([i,j])\n new_position = np.dot(A,position) + B # [new_i,new_j]\n new_position = new_position.astype(int)\n if ((new_position[0] >= 0 and new_position[0] < height*2) and (new_position[1] >= 0 and new_position[1] < width*2)):\n newImg[new_position[0],new_position[1]] = img[i,j]\ncv2.imshow('Imagen Escalada ', newImg)\nfilename = 'EscalacionPugERROR.jpg'\ncv2.imwrite(filename, newImg)\n\n# Imagen escalada con solve\nnewImg2 = np.array(np.zeros((height_)*(width_)*3).reshape((img.shape)));\nnewImg2 = np.array(newImg2, dtype=np.uint8)\n\nX = np.array([0,0])\nfor u in range(width):\n for v in range(height):\n Y = np.array([u,v]) - B\n solve_= cv2.solve(A, Y, X)\n X = solve_[1]\n X = X.astype(int)\n x = X[0]\n y = X[1]\n if ((x >= 0 and x < width) and (y>= 0 and y < height)):\n # print(x)\n # print(y)\n newImg2[v, u] = img[y, x]\n\n\ncv2.imshow('Imagen Escalada con solve', newImg2)\n \nfilename = 'EscalacionPug2.jpg'\ncv2.imwrite(filename, newImg2)\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"parcial_3/lab_10/Affine_transformation/Ejercicio2/ejercicio_2_2.py","file_name":"ejercicio_2_2.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594375140","text":"###########################\r\n# 6.0002 Problem Set 1a: Space Cows \r\n# Name:\r\n# Collaborators:\r\n# Time:\r\n\r\nfrom ps1_partition import get_partitions\r\nimport time\r\n\r\n#================================\r\n# Part A: Transporting Space Cows\r\n#================================\r\n\r\n# Problem 1\r\ndef load_cows(filename):\r\n \"\"\"\r\n Read the contents of the given file. Assumes the file contents contain\r\n data in the form of comma-separated cow name, weight pairs, and return a\r\n dictionary containing cow names as keys and corresponding weights as values.\r\n\r\n Parameters:\r\n filename - the name of the data file as a string\r\n\r\n Returns:\r\n a dictionary of cow name (string), weight (int) pairs\r\n \"\"\"\r\n\r\n cows = {}\r\n with open(filename) as f:\r\n for line in f:\r\n # strip new line characters and split by commas\r\n (name, weight) = line.rstrip().split(',')\r\n if name not in cows:\r\n # convert weight to int and store in cows dictionary\r\n cows[name] = int(weight)\r\n\r\n return cows\r\n\r\n\r\n\r\n# Problem 2\r\ndef greedy_cow_transport(cows, limit=10):\r\n \"\"\"\r\n Uses a greedy heuristic to determine an allocation of cows that attempts to\r\n minimize the number of spaceship trips needed to transport all the cows. The\r\n returned allocation of cows may or may not be optimal.\r\n The greedy heuristic should follow the following method:\r\n\r\n 1. As long as the current trip can fit another cow, add the largest cow that will fit\r\n to the trip\r\n 2. Once the trip is full, begin a new trip to transport the remaining cows\r\n\r\n Does not mutate the given dictionary of cows.\r\n\r\n Parameters:\r\n cows - a dictionary of name (string), weight (int) pairs\r\n limit - weight limit of the spaceship (an int)\r\n \r\n Returns:\r\n A list of lists, with each inner list containing the names of cows\r\n transported on a particular trip and the overall list containing all the\r\n trips\r\n \"\"\"\r\n\r\n # created list of cows sorted by weight descending\r\n sorted_cows = sorted(cows.items(), key = lambda x:x[1], reverse=True)\r\n\r\n # init list of chartered shuttles\r\n shuttles = []\r\n\r\n # keep going to all cows are chartered on a shuttle!\r\n while sorted_cows:\r\n # init list of cows on current shuttle and set shuttle space limit\r\n this_shuttle = []\r\n space_left = limit\r\n for i in range(len(sorted_cows)):\r\n # take heaviest cow in list\r\n (name, weight) = sorted_cows.pop(0)\r\n # try to fit on shuttle\r\n if weight <= space_left:\r\n this_shuttle.append(name)\r\n space_left -= weight\r\n # else put back into queue\r\n else:\r\n sorted_cows.append((name, weight))\r\n \r\n # if shuttle has cargo, add to transport charter\r\n if this_shuttle:\r\n shuttles.append(this_shuttle)\r\n\r\n return shuttles\r\n \r\n\r\n\r\n# Problem 3\r\ndef brute_force_cow_transport(cows, limit=10):\r\n \"\"\"\r\n Finds the allocation of cows that minimizes the number of spaceship trips\r\n via brute force. The brute force algorithm should follow the following method:\r\n\r\n 1. Enumerate all possible ways that the cows can be divided into separate trips \r\n Use the given get_partitions function in ps1_partition.py to help you!\r\n 2. Select the allocation that minimizes the number of trips without making any trip\r\n that does not obey the weight limitation\r\n \r\n Does not mutate the given dictionary of cows.\r\n\r\n Parameters:\r\n cows - a dictionary of name (string), weight (int) pairs\r\n limit - weight limit of the spaceship (an int)\r\n \r\n Returns:\r\n A list of lists, with each inner list containing the names of cows\r\n transported on a particular trip and the overall list containing all the\r\n trips\r\n \"\"\"\r\n\r\n # init least number of shuttles based on having one cow in each shuttle\r\n # (worst case scenario)\r\n least_shuttles = len(cows)\r\n shuttles = [[cow] for cow in cows.keys()]\r\n # init generator to enumerate all possible shuttle partitionings\r\n partitionings = get_partitions(cows)\r\n # check weight of each partition in each partitioning\r\n for partition in partitionings:\r\n # get number of shuttles in partition\r\n nb_shuttles = len(partition)\r\n # if not better than current best number, skip to next partition\r\n if nb_shuttles >= least_shuttles:\r\n continue\r\n # check all shuttles meet weight requirements\r\n limit_exceeded = False\r\n for shuttle in partition:\r\n # sum weight of all cows in shuttle\r\n shuttle_weight = sum([cows[name] for name in shuttle])\r\n # if weight is over the limit, flag and break out of loop\r\n if shuttle_weight > limit:\r\n limit_exceeded = True\r\n break\r\n if not limit_exceeded:\r\n least_shuttles = nb_shuttles\r\n shuttles = partition\r\n\r\n return shuttles\r\n\r\n\r\n\r\n# Problem 4\r\ndef compare_cow_transport_algorithms():\r\n \"\"\"\r\n Using the data from ps1_cow_data.txt and the specified weight limit, run your\r\n greedy_cow_transport and brute_force_cow_transport functions here. Use the\r\n default weight limits of 10 for both greedy_cow_transport and\r\n brute_force_cow_transport.\r\n \r\n Print out the number of trips returned by each method, and how long each\r\n method takes to run in seconds.\r\n\r\n Returns:\r\n Does not return anything.\r\n \"\"\"\r\n \r\n cows = load_cows('ps1_cow_data.txt')\r\n\r\n tic = time.time()\r\n trips = greedy_cow_transport(cows, limit=10)\r\n toc = time.time()\r\n print(f'greedy_cow_transport algo solves for {len(trips)} trips in {toc - tic:.4f} seconds')\r\n \r\n tic = time.time()\r\n trips = brute_force_cow_transport(cows, limit=10)\r\n toc = time.time()\r\n print(f'greedy_cow_transport algo solves for {len(trips)} trips in {toc - tic:.4f} seconds')\r\n\r\n return","sub_path":"PS1/ps1a_completed.py","file_name":"ps1a_completed.py","file_ext":"py","file_size_in_byte":6036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98463558","text":"from django import forms\nfrom django.forms import ModelForm, RadioSelect, CheckboxInput\nfrom app1040nrezlocal.models import modelInput, modelPostTaxInput\nfrom crispy_forms.layout import Layout, Fieldset, Submit, HTML\nfrom crispy_forms.helper import FormHelper\n\n# pretax interview questions\nclass TaxModelForm(forms.ModelForm):\n \n # radio buttons\n Q01 = forms.ChoiceField(\n label = \"1) Please select tax year\",\n choices = (('a', \"Current Year\"),('b', \"Prior Year(s)\")),\n widget = forms.RadioSelect,\n initial = 'a',\n required = False,\n ) \n Q01_01 = forms.ChoiceField(\n label = \"1.1) Please select a prior tax year\",\n choices = ((\"a\", \"2010\"), (\"b\", \"2011\"), (\"c\", \"2012\")),\n widget = forms.RadioSelect,\n required = False,\n ) \n Q02 = forms.ChoiceField(\n label = \"2) Please select your service need:\",\n choices = ((\"a\", \"Nonresident Tax Return\"), (\"b\", \"Social Security & Medicare Tax Refunds\")),\n widget = forms.RadioSelect,\n required = False,\n ) \n Q02_01 = forms.ChoiceField(\n label = \"2.1) Please select your country of origin:\",\n choices = ((\"a\", \"China\"), (\"b\", \"Mexico\")),\n widget = forms.RadioSelect,\n required = False,\n ) \n Q03 = forms.ChoiceField(\n label = \"3) Please select your forms:\",\n choices = ((\"a\", \"1040NR-EZ\"), (\"b\", \"1040NR\")),\n widget = forms.RadioSelect,\n required = False,\n ) \n Q03_01 = forms.ChoiceField(\n label = \"3.1) What do you want to file as?\",\n choices = ((\"a\", \"Single\"), (\"b\", \"Married\")),\n widget = forms.RadioSelect,\n required = False,\n ) \n \n def __init__(self, *args, **kwargs):\n super(TaxModelForm, self).__init__(*args, **kwargs)\n # make fields read-only\n self.fields['Q02_01_01'].widget.attrs['readonly'] = True\n self.fields['Q03_01_01'].widget.attrs['readonly'] = True\n self.fields['Q03_01_02'].widget.attrs['readonly'] = True\n # Django crispy form\n self.helper = FormHelper()\n self.helper.form_id = 'id-exampleForm'\n self.helper.form_method = 'post'\n self.helper.form_action = '/app/'\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-4'\n self.helper.field_class = 'col-lg-8'\n self.helper.add_input(Submit('submit', 'Submit'))\n self.helper.layout = Layout(\n Fieldset(\n 'Pre tax interview questions',\n 'A01', \n 'A02',\n 'Q01',\n 'Q01_01',\n 'Q02',\n 'Q02_01',\n 'Q02_01_01',\n 'Q03',\n 'Q03_01',\n 'Q03_01_01',\n 'Q03_01_02',\n ),\n Fieldset(\n 'Income sources',\n HTML(\"

4. Please select source(s) of income:

\"),\n 'Q04_a',\n 'W2L01',\n 'W2L02',\n 'W2L03',\n 'W2L04',\n 'W2L05',\n 'W2L06',\n 'W2L12aB',\n 'W2L12bB',\n 'W2L15aA',\n 'W2L15bA',\n 'W2L16A',\n 'W2L17A',\n 'W2L18A',\n 'W2L19A',\n 'W2L20A',\n 'W2L07',\n 'W2L08',\n 'W2L10',\n 'W2L11',\n 'W2L14',\n 'Q04_b',\n 'F1099GL01',\n 'F1099GL02',\n 'F1099GL03',\n 'F1099GL04',\n 'F1099GL05',\n 'F1099GL06',\n 'F1099GL07',\n 'F1099GL08',\n 'F1099GL09',\n 'F1099GL10aA',\n 'F1099GL10bA',\n 'F1099GL11A',\n HTML(\"

5. Do you have any exclusions or deductions to income?

\"),\n 'Q05_scholarship',\n 'Q05_student_loan',\n HTML(\"

6. How much taxes were withheld from each?

\"),\n 'Q06_4137_8919',\n 'Q06_estimated_tax',\n 'Q06_1040C',\n ),\n )\n \n\n class Meta:\n # associate with ModelInput for automatically generated fields\n model = modelInput\n\n\n \nclass postTaxInputForm(forms.ModelForm):\n \n #Change widget of boolean field from checkbox to RadioSelect\n #TODO: refractor to a function\n SCHOILC = forms.TypedChoiceField(\n label = \"C. Have you ever applied to be a green card holder (lawful permanent resident) of the United States?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n SCHOILD1 = forms.TypedChoiceField(\n label = \"A U.S. citizen?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n SCHOILD2 = forms.TypedChoiceField(\n label = \"A green card holder (lawful permanent resident) of the United States?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n SCHOILF = forms.TypedChoiceField(\n label = \"F. Have you ever changed your visa type (nonimmigrant status) or U.S. immigration status?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n SCHOILI = forms.TypedChoiceField(\n label = \"I. Did you file a U.S. income tax return for any prior year?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n F8843TeachersL08 = forms.TypedChoiceField(\n label = \"8. Were you present in the United States as a teacher, trainee, or student for any part of 2 of the 6 prior calendar years (2007 through 2012)?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n F8843StudentsL12 = forms.TypedChoiceField(\n label = \"12. Were you present in the United States as a teacher, trainee, or student for any part of more than 5 calendar years?\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n F8843StudentsL13 = forms.TypedChoiceField(\n label = \"13. During 2013, did you apply for, or take other affirmative steps to apply for, lawful permanent resident status\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n F8843AthletesL16a = forms.TypedChoiceField(\n label = \"Agreed\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n INFOL09 = forms.TypedChoiceField(\n label = \"Select appropriate:\",\n choices = ((1, \"Yes\"), (0, \"No\")),\n coerce = lambda x: bool(int(x)),\n widget = forms.RadioSelect,\n initial = '0',\n required = True,\n )\n \n def __init__(self, *args, **kwargs):\n super(postTaxInputForm, self).__init__(*args, **kwargs)\n self.helper = FormHelper()\n self.helper.form_id = 'id-exampleForm'\n self.helper.form_method = 'post'\n self.helper.form_action = '/app/postTax/'\n self.helper.form_class = 'form-horizontal'\n self.helper.label_class = 'col-lg-4'\n self.helper.field_class = 'col-lg-8'\n self.helper.add_input(Submit('submit', 'Submit'))\n self.helper.layout = Layout(\n Fieldset(\n 'Schedule OI questions',\n 'SCHOILA',\n 'SCHOILB',\n 'SCHOILC',\n HTML(\"

D. Were you ever:

\"),\n 'SCHOILD1',\n 'SCHOILD2',\n 'SCHOILE',\n 'SCHOILF',\n 'SCHOILFc',\n HTML(\"

G. List all dates you entered and left the United States during 2013 (see instructions).

\"), \n HTML(\"Note. If you are a resident of Canada or Mexico AND commute to work in the United States at frequent intervals,
\"), \n HTML(\"check the box for Canada or Mexico and skip to item H
\"), \n 'SCHOILGa',\n 'SCHOILGb',\n 'SCHOILGc',\n 'SCHOILGd',\n HTML(\"

H. Give number of days (including vacation, nonworkdays, and partial days) you were present in the United States during:

\"),\n 'SCHOILHa',\n 'SCHOILHb',\n 'SCHOILHc',\n 'SCHOILI',\n 'SCHOILIc',\n HTML(\"J. Income Exempt from Tax-If you are claiming exemption from income tax under a U.S. income tax treaty with a foreign country,
\"),\n HTML(\"complete (1) and (2) below. See Pub. 901 for more information on tax treaties.
\"),\n HTML(\"
\"),\n ),\n Fieldset(\n 'F8843 questions',\n Fieldset(\n 'F8843 Part I: General Information',\n 'F8843L01A',\n 'F8843L01B',\n 'F8843L02',\n 'F8843L03A',\n 'F8843L03B',\n HTML(\"

4a. Enter the number of days in 2013 you claim you can exclude for purposes of the substantial presence test

\"),\n 'F8843L04Aa',\n 'F8843L04Ab',\n 'F8843L04Ac',\n 'F8843L04B',\n ),\n Fieldset(\n 'Please check all that applies to you:',\n 'F8843Teachers',\n 'F8843Students', \n 'F8843Athletes',\n 'F8843Medical',\n ),\n Fieldset(\n 'Part II - Teachers and Trainees',\n HTML(\"
\"),\n 'F8843TeachersL05',\n 'F8843TeachersL06',\n HTML(\"

7. Enter the type of U.S. visa (J or Q) you held during:

\"),\n 'F8843TeachersL07a',\n 'F8843TeachersL07b',\n 'F8843TeachersL07c',\n 'F8843TeachersL07d',\n 'F8843TeachersL07e',\n 'F8843TeachersL07f',\n HTML(\"

If the type of visa you held during any of these years changed, attach a statement showing the new visa type and the date it was acquired.

\"),\n 'F8843TeachersL08',\n HTML(\"

If you checked the 'Yes' box on line 8, you cannot exclude days of presence as a teacher or trainee unless you meet the Exception explained in the instructions.

\"),\n ),\n Fieldset(\n 'Part III - Students',\n HTML(\"
\"),\n 'F8843StudentsL09',\n 'F8843StudentsL10',\n HTML(\"

11. Enter the type of U.S. visa (F, J, M, or Q) you held during:

\"),\n 'F8843StudentsL11a',\n 'F8843StudentsL11b',\n 'F8843StudentsL11c',\n 'F8843StudentsL11d',\n 'F8843StudentsL11e',\n 'F8843StudentsL11f',\n HTML(\"

If the type of visa you held during any of these years changed, attach a statement showing the new visa type and the date it was acquired.

\"),\n 'F8843StudentsL12',\n HTML(\"

If you checked the 'Yes' box on line 12, you must provide sufficient facts on an attached statement to establish that you do not intend to reside permanently in the United States.

\"),\n 'F8843StudentsL13',\n 'F8843StudentsL14',\n ),\n Fieldset(\n 'Part IV - Professional Athletes',\n HTML(\"
\"),\n 'F8843AthletesL15',\n 'F8843AthletesL16',\n HTML(\"

16a. Note. You must attach a statement to verify that all of the net proceeds of the sports event(s) were contributed to the charitable organization(s) listed on line 16.

\"),\n 'F8843AthletesL16a',\n ),\n Fieldset(\n 'Part V - Individuals With a Medical Condition or Medical Problem',\n HTML(\"
\"),\n 'F8843MedicalL17A',\n 'F8843MedicalL17B',\n 'F8843MedicalL17C',\n HTML(\"

18. Physcian's Statement

\"),\n 'F8843MedicalL18a',\n HTML(\"

was unable to leave the United States on the date shown on line 17b because of the medical condition or medical problem was unable to leave the United States on the date shown on line 17b because of the medical condition or medical problem

\"),\n 'F8843MedicalL18b',\n 'F8843MedicalL18c',\n ),\n ),\n Fieldset(\n 'General Information',\n HTML(\"

1. Please enter your present home address

\"),\n 'INFOL04',\n 'INFOL05',\n HTML(\"

2. Please enter your foreign address

\"),\n 'INFOL06',\n 'INFOL07',\n 'INFOL08',\n HTML(\"

3. Do you want to allow another person to discuss this return with the IRS?

\"),\n 'INFOL09',\n HTML(\"

3.1 Please enter the person's information:

\"),\n 'INFOL10',\n 'INFOL11',\n 'INFOL12', \n HTML(\"

4. What is your occupation in the United States?

\"),\n 'INFOL15',\n HTML(\"

5. If the IRS sent you an Identity Protection PIN, enter it here

\"),\n 'INFOL16',\n ),\n )\n \n \n class Meta:\n model = modelPostTaxInput\n\n","sub_path":"app1040nrezlocal/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":14652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"209754213","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@brief test log(time=10s)\n\"\"\"\nimport unittest\nimport pickle\nimport textwrap\nfrom sklearn import datasets\nfrom sklearn.linear_model import LogisticRegression\nfrom pyquickhelper.pycode import get_temp_folder, ExtTestCase\nfrom lightmlrestapi.mlapp.mlstorage import MLStorage, ZipStorage\n\n\nclass TestStorage(ExtTestCase):\n\n def test_storage(self):\n temp = get_temp_folder(__file__, \"temp_zip_storage\")\n stor = ZipStorage(temp)\n data = {'one.txt': b\"1\", 'two.txt': b\"2\"}\n stor.add(\"dto/k_\", data)\n data2 = stor.get(\"dto/k_\")\n self.assertEqual(data, data2)\n names = list(stor.enumerate_names())\n self.assertEqual(names, [\"dto/k_\"])\n meta = stor.get_metadata(\"dto/k_\")\n self.assertEqual(meta, {})\n\n def mlstorage(self, n, suf):\n\n # Train a model\n iris = datasets.load_iris()\n X = iris.data[:, :2] # we only take the first two features.\n y = iris.target\n clf = LogisticRegression()\n clf.fit(X, y)\n model_data = pickle.dumps(clf)\n\n # application\n code = textwrap.dedent(\"\"\"\n import os\n import pickle\n\n # We declare an id for the REST API.\n def restapi_version():\n return \"0.1.1234\"\n\n # We declare a loading function.\n def restapi_load():\n here = os.path.dirname(__file__)\n with open(os.path.join(here, \"iris2.pkl\"), \"rb\") as f:\n loaded_model = pickle.load(f)\n return loaded_model\n\n # We declare a predict function.\n def restapi_predict(clf, X):\n return clf.predict_proba(X)\n \"\"\")\n temp = get_temp_folder(__file__, \"temp_ml_storage\" + suf)\n stor = MLStorage(temp, cache_size=3)\n\n for i in range(0, n):\n app = {\"iris_%d.pkl\" % i: model_data,\n \"model.py\": code.replace(\"iris2.pkl\", \"iris_%d.pkl\" % i).encode(\"utf-8\")}\n\n name = \"ml%s/iris%d\" % (suf, i)\n stor.add(name, app)\n data2 = stor.get(name)\n self.assertEqual(app, data2)\n names = list(stor.enumerate_names())\n self.assertNotEmpty(names)\n meta = stor.get_metadata(name)\n self.assertEqual(meta, {'main_script': 'model.py'})\n\n exp = clf.predict_proba(X[:1, :2])\n predict = stor.call_predict(name, X[:1, :2])\n self.assertEqual(exp, predict)\n version = stor.call_version(name)\n self.assertEqual(version, \"0.1.1234\")\n\n self.assertLesser(len(stor._cache), n) # pylint: disable=W0212\n\n def test_mlstorage(self):\n self.mlstorage(1, \"1\")\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_mlapp/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"162889768","text":"#coding=UTF-8\nimport Queue,threading\n\n#队列里只有put和get两个方法,列表的那些方法都没有\n#join是用来阻塞进程,与task_done配合使用才有意义。可以用Event对象来理解,每次put(),join里面的计数器加1,每次task_done(),计数器减1,计数器为0的时候,才能进行下次put()\n\n\nq=Queue.Queue()\n\ndef foo():\n\tq.put(111)\n\tq.put(222)\n\tq.put(333)\n\tq.join() #有个join,程序就停在这里\n\tprint('ok') \ndef bar():\n\tprint(q.get())\n\tq.task_done()\n\tprint(q.get())\n\tq.task_done()\n\tprint(q.get())\n\tq.task_done() #要在每个get()语句后面都加上\n\nt1=threading.Thread(target=foo,args=())\nt1.start()\nt2=threading.Thread(target=bar,args=())\nt2.start()\n\n#t1,t2谁先谁后无所谓,因为会阻塞住,等待信号\n","sub_path":"queue/task_done.py","file_name":"task_done.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155315812","text":"from PySide2.QtGui import (QBrush, QColor)\n\nclass Blur:\n \n def __init__(self, cell, row, column, status=False, time=None):\n self.cell = cell\n self.time = time\n self.column = column\n self.row = row\n self.hidden = status\n\n def hide(self):\n try:\n self.cell.setBackground(QBrush(QColor(\"black\")))\n self.hidden = False\n except:\n raise \"Could not hide cell!\"\n\n def show(self):\n try:\n self.cell.setBackground(QBrush(QColor(\"white\")))\n self.hidden = True\n except:\n raise \"Could not show cell!\"\n","sub_path":"core/animation.py","file_name":"animation.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350835164","text":"from PyQt5 import QtWidgets as qWidget\nfrom PyQt5 import QtGui as qGui\nfrom PyQt5 import QtCore as qCore\nfrom PyQt5 import uic\nfrom pprint import pprint\nimport sys\nimport os\nimport re\n\n# \"{\\\\frac {}{ ))\"\n\nsampleText = \"\"\"\n{\\\\displaystyle_90_}\n{\\\\displaystyle {\n\\\\begin{aligned}\n\\\\tan {\\\\frac {\\\\theta }{2))&=\\\\csc \\\\theta -\\\\cot \\\\theta =\\\\pm \\\\,{\\\\sqrt {\\\\frac {1-\\\\cos \\\\theta }{1+\\\\cos \\\\theta ))}={\\\\frac {\\\\sin \\\\theta }{1+\\\\cos \\\\theta ))\\\\\\\\\n&={\\\\frac {1-\\\\cos \\\\theta }{\\\\sin \\\\theta ))={\\\\frac {-1\\\\pm {\\\\sqrt {1+\\\\tan ^{2}\\\\theta ))}{\\\\tan \\\\theta ))={\\\\frac {\\\\tan \\\\theta }{1+\\\\sec {\\\\theta ))}\n\\\\end{aligned))}\n{\\\\displaystyle\n\\\\cot {\\\\frac {\\\\theta }{2))=\\\\csc \\\\theta +\\\\cot \\\\theta =\\\\pm \\\\,{\\\\sqrt\n{\\\\frac {1+\\\\cos \\\\theta }{1-\\\\cos \\\\theta ))}={\\\\frac {\\\\sin \\\\theta }{1-\\\\cos \\\\theta ))={\\\\frac {1+\\\\cos \\\\theta }{\\\\sin \\\\\\\\theta ))}\n{\\\\sqrt {\\\\frac {1-\\\\sin \\\\theta }{1+\\\\sin \\\\theta ))}={\\\\frac {1-\\\\tan {\\\\frac {\\\\theta }{2))}{1+\\\\tan {\\\\frac {\\\\theta }{2))))\n\"\"\"\n\ndef findClosing(inputString, closeBracket = \")\", searchPos = -1):\n\n if closeBracket == \")\":\n openBracket = \"(\"\n elif closeBracket == \"}\":\n openBracket = \"{\"\n elif closeBracket ==\"]\":\n openBracket =\"[\"\n\n brackets = 1\n\n while brackets >= 1:\n if searchPos >= len(inputString):\n break\n if inputString[searchPos] == closeBracket:\n brackets -= 1\n elif inputString[searchPos] == openBracket:\n brackets += 1\n\n searchPos += 1\n\n if brackets >= 1:\n searchPos = -1\n\n return searchPos\n\ndef fixDisplayStyle (inputText):\n targetString = \"{\\displaystyle\"\n foundID = inputText.find(targetString)\n foundIndexes = list()\n seachSize = len(targetString)\n\n while foundID != -1:\n closingID = findClosing(inputText, \"}\", foundID + seachSize)\n\n if closingID != -1:\n foundIndexes.append(closingID - 1)\n\n foundID = inputText.find(targetString, foundID + seachSize)\n\n\n result = list(inputText)\n\n for section in reversed(foundIndexes):\n try:\n del result[section]\n except Exception as e:\n pass\n\n inputText = \"\".join(result)\n\n for i in range(len(foundIndexes)):\n inputText = inputText.replace(targetString, \"\")\n\n return(inputText)\n\n\ndef fixFrac(inputText):\n\n indexes = list()\n foundID = inputText.find(\"{\\\\frac\")\n\n numRuns = 0\n\n while foundID != -1:\n print(\"Found frac pos: {}\".format(foundID))\n searchPos = foundID\n\n brackets = 0\n\n while brackets > -2:\n if searchPos >= len(inputText):\n break\n if inputText[searchPos] == \")\":\n brackets -= 1\n print (\"Found closing pos: {}\".format(searchPos))\n elif inputText[searchPos] == \"(\":\n brackets += 1\n print (\"Found openingpos: {}\".format(searchPos))\n\n searchPos += 1\n\n foundID = inputText.find(\"{\\\\frac\", foundID + 2)\n\n searchPos += 1\n indexes.append(searchPos - 1)\n\n\n pprint(indexes)\n\n result = list(inputText)\n\n for bracket in reversed(indexes):\n result[bracket - 1] = \"}\"\n result[bracket - 2] = \"\"\n\n inputText = \"\".join(result)\n\n inputText = re.sub(r\"{\\\\frac\", \"\\\\cfrac\", inputText)\n\n return(inputText)\n\n\n\nclass mainWindow(qWidget.QMainWindow):\n \"\"\"Main window class.\"\"\"\n\n def __init__(self, *args):\n \"\"\"Init.\"\"\"\n super(mainWindow, self).__init__(*args)\n ui = os.path.join(os.path.dirname(__file__), 'fixWikipedia.ui')\n uic.loadUi(ui, self)\n self.initUI()\n\n def initUI(self):\n self.applyButton.clicked.connect(self.on_applyButton_clicked)\n self.textInput.setPlainText(sampleText)\n\n def on_applyButton_clicked(self):\n inputString = self.textInput.toPlainText()\n inputString = self.fixText(inputString)\n self.textOutput.setPlainText(inputString)\n\n def fixText(self, inputText = \"\"):\n\n inputText = fixDisplayStyle(inputText)\n\n #Fix limits\n\n inputText = re.sub(r\"(\\\\lim)\\s*(_{)\", r\"\\\\lim\\\\limits_{\", inputText)\n inputText = re.sub(r\"(\\\\sum)\\s*(_{)\", r\"\\\\sum\\\\limits_{\", inputText)\n inputText = re.sub(r\"(\\\\limsup)\\s*(_{)\", r\"\\\\limsup\\\\limits_{\", inputText)\n inputText = re.sub(r\"(\\\\liminf)\\s*(_{)\", r\"\\\\liminf\\\\limits_{\", inputText)\n\n inputText = fixFrac(inputText)\n\n return (inputText)\n\n\napp = qWidget.QApplication(sys.argv)\nwindow = mainWindow()\nwindow.show()\nsys.exit(app.exec_())\n","sub_path":"all/utilities/fixWikipedia.py","file_name":"fixWikipedia.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229139682","text":"#!/usr/bin/python\n#\n# Scan Duktape code base for references to built-in strings, i.e. for\n# strings which will need DUK_STRIDX_xxx constants and a place in the\n# thr->strs[] array.\n#\n\nimport os\nimport sys\nimport re\nimport json\n\nre_stridx = re.compile(r'DUK_STRIDX_(\\w+)', re.MULTILINE)\nre_heap = re.compile(r'DUK_HEAP_STRING_(\\w+)', re.MULTILINE)\nre_hthread = re.compile(r'DUK_HTHREAD_STRING_(\\w+)', re.MULTILINE)\n\ndef main():\n\tdefs = {}\n\n\tfor fn in sys.argv[1:]:\n\t\twith open(fn, 'rb') as f:\n\t\t\td = f.read()\n\t\t\tfor m in re.finditer(re_stridx, d):\n\t\t\t\tdefs[m.group(1)] = True\n\t\t\tfor m in re.finditer(re_heap, d):\n\t\t\t\tdefs[m.group(1)] = True\n\t\t\tfor m in re.finditer(re_hthread, d):\n\t\t\t\tdefs[m.group(1)] = True\n\n\tused = []\n\tdoc = { 'used_stridx_defines': used }\n\tfor k in sorted(defs.keys()):\n\t\tused.append('DUK_STRIDX_' + k)\n\tdoc['count_used_stridx_defines'] = len(used)\n\n\tprint(json.dumps(doc, indent=4))\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"src/scan_used_stridx.py","file_name":"scan_used_stridx.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"207528778","text":"from django.db import models\nfrom citations.models import Citation\n\nclass Chemical(models.Model):\n id = models.BigAutoField(primary_key=True)\n pmid = models.ForeignKey(Citation, models.DO_NOTHING, db_column='pmid', blank=True, null=True)\n idx = models.SmallIntegerField(blank=True, null=True)\n uid = models.CharField(max_length=256, blank=True, null=True)\n name = models.CharField(max_length=256)\n\n class Meta:\n managed = False\n db_table = 'chemicals'\n unique_together = (('pmid', 'idx'),)","sub_path":"src/chemicals/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186163688","text":"# https://www.pyimagesearch.com/2015/12/28/increasing-raspberry-pi-fps-with-python-and-opencv/\n# import the necessary packages\nfrom picamera.array import PiRGBArray\nfrom picamera import PiCamera\nfrom threading import Thread\nimport cv2\nimport time\n \nclass PiVideoStream:\n def __init__(self, resolution=(320, 240), framerate=32, threaded=True):\n # initialize the camera and stream\n self.camera = PiCamera()\n self._thread = None\n self.camera.resolution = resolution\n self.camera.framerate = framerate\n self.rawCapture = PiRGBArray(self.camera, size=resolution)\n self.threaded = threaded\n if threaded:\n self.stream = self.camera.capture_continuous(self.rawCapture,\n format=\"bgr\", use_video_port=True)\n # number of frames read from camera since last read() call\n self.framesElapsed = 0\n else:\n # stays 1 if not threaded. Meaningless but allows the same logic to be used as for threaded\n self.framesElapsed = 1\n self.stream = None\n # initialize the frame and the variable used to indicate\n # if the thread should be stopped\n self.frame = None\n self.stopped = False\n #lastFrameTime = time.perf_counter()\n \n def start(self):\n # start the thread to read frames from the video stream\n if self.threaded:\n self._thread = Thread(target=self.update, args=())\n self._thread.start()\n return self\n \n def update(self):\n # keep looping infinitely until the thread is stopped\n for f in self.stream:\n # grab the frame from the stream and clear the stream in\n # preparation for the next frame\n self.frame = f.array\n self.rawCapture.truncate(0)\n self.framesElapsed += 1\n \n # if the thread indicator variable is set, stop the thread\n # and resource camera resources\n if self.stopped:\n self.stream.close()\n self.rawCapture.close()\n self.camera.close()\n return\n def read(self):\n if self.threaded:\n self.framesElapsed = 0\n # return the frame most recently read\n return self.frame\n else:\n self.camera.capture(self.rawCapture, format=\"bgr\")\n self.frame = self.rawCapture.array\n self.rawCapture.truncate(0)\n return self.frame\n \n def stop(self):\n # stop the camera thread and wait for it to finish executing\n self.stopped = True\n if self.threaded:\n self._thread.join()\n else:\n self.rawCapture.close()\n self.camera.close()","sub_path":"pivideostream.py","file_name":"pivideostream.py","file_ext":"py","file_size_in_byte":2759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277776210","text":"import unittest\nimport configparser\nfrom src.endpoints.dataset import Dataset, Question\n\n\nclass DatasetTest(unittest.TestCase):\n def test_parse_raises_exception_file_not_found(self):\n dataset = Dataset('foo', './tests/endpoints/config')\n with self.assertRaises(FileNotFoundError):\n dataset.parse()\n\n def test_parse_raises_exception_no_section(self):\n dataset = Dataset('bar', './tests/endpoints/config')\n with self.assertRaises(configparser.NoSectionError):\n dataset.parse()\n\n def test_parse_valid_instance(self):\n dataset = Dataset('got', './tests/endpoints/config')\n dataset.parse()\n self.assertIn('got', dataset.endpoints)\n self.assertEqual(1, len(dataset.questions))\n self.assertIn('amount', dataset.statistics)\n\n\nclass TestQuestion(unittest.TestCase):\n def test_display_prefixes(self):\n init = {\n 'question': 'Do you know anything Jon Snow?',\n 'keywords': 'Jon Snow',\n 'query': 'select distinct ?s where { ?s ?p ?o }'\n }\n prefixes = {\n 'dc': 'http://purl.org/dc/elements/1.1/',\n 'foaf': 'http://xmlns.com/foaf/0.1/',\n 'rdf': 'http://www.w3.org/1999/02/22-rdf-syntax-ns#',\n 'rdfs': 'http://www.w3.org/2000/01/rdf-schema#'\n }\n expected = [\n \"PREFIX dc: \",\n \"PREFIX foaf: \",\n \"PREFIX rdf: \",\n \"PREFIX rdfs: \",\n ]\n question = Question(init, prefixes)\n self.assertEqual(question.display_prefixes(), '\\n'.join(expected))\n","sub_path":"tests/endpoints/test_dataset.py","file_name":"test_dataset.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62875062","text":"#!/usr/bin/python3\n\"\"\" Check the perimeter of an island \"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\" Return the perimeter of an island \"\"\"\n rows, columns, perimeter = len(grid), len(grid[0]), 0\n for row in range(rows):\n for position in range(columns):\n if grid[row][position] == 0:\n continue\n if row == 0 or grid[row - 1][position] == 0:\n perimeter += 1\n if row == rows - 1 or grid[row + 1][position] == 0:\n perimeter += 1\n if position == 0 or grid[row][position - 1] == 0:\n perimeter += 1\n if position == columns - 1 or grid[row][position + 1] == 0:\n perimeter += 1\n return perimeter\n","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147796934","text":"import socket, time\n\nhost = socket.gethostbyname(socket.gethostname())\nport = 9090\n\nclients = []\n\ns = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket.AF_INET -- TCP, socket.SOCK_DGRAM -- IP\ns.bind((host, port)) # creation TCP/IP\n\nquit = False\nprint('=== SERVER STARTED ===')\n\nwhile not quit:\n try:\n data, addr = s.recvfrom(1024)\n if addr not in clients:\n clients.append(addr)\n itsatime = time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime())\n\n print('[' + addr[0] + ']=[' + str(addr[1]) + ']=[' + itsatime + ']/', end='')\n print(data.decode('utf-8'))\n\n for client in clients:\n if addr != client:\n s.sendto(data, client)\n except:\n print('\\n\\n\\n=== Server Stoped ===')\n quit = True\n\ns.close()\n\n\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326680203","text":"from math import inf\ndef dejkstra(graph):\n '''\n Алгоритм дейкстры\n :param graph: Входной граф в виде листа\n :return: weight веса графа, и маршрут\n '''\n route=[[] for i in range(len(graph))]\n sosedy=[]\n weight=[inf if idx!=0 else 0 for idx,i in enumerate(graph)]\n for idx,i in enumerate(graph): #Проходимся по вершинам графа\n sosedy=[idz for idz,q in enumerate(graph[idx]) if q!=0] #Ищем соседей\n for sosed in sosedy: #Проходимся по соедям\n if idx==0 and sosed!=0: #Если есть соседи у 0 вершины то записываем для них маршруты\n weight[sosed]=graph[sosed][sosed]\n route[sosed]=route[sosed]+[0,sosed]\n if idx!=0 and sosed!=idx: #Если не 0 вершина и сосед не равен сам себе\n weight1=weight[sosed]+graph[idx][idx] #Расчитываем вес для пути и если он меньше имеющегося обновляем маршрут\n if weight[sosed]!=inf and weight[idx]>weight1:\n weight[idx]=weight1\n route[idx]=route[sosed]+[idx]\n while True: #находим не пройденые вершины и повторяем алгоритм\n inf_array=[idx for idx, i in enumerate(weight) if i==inf]\n if (len(inf_array)>0):\n for i in inf_array:\n sosedy = [idz for idz, q in enumerate(graph[i]) if q != 0]\n for sosed in sosedy:\n if weight[sosed] acc_valid:\n acc_valid = acc_v\n i_valid = i + 1\n if acc_t > acc_test:\n acc_test = acc_t\n i_test = i + 1\n # if total_loss < 0.001:\n # break\n\n\n rows = [args.embsize, args.bst, args.drop_f, args.bilstm, args.clip, acc_valid, acc_test, i_valid, i_test, args.iter]\n with open('my.csv', 'a+', newline='') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow(rows)\n\n\n #run_epoch_test(data, model)","sub_path":"baseline/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76903799","text":"#Pair programmed by: Jonathan Mier & Jose Perez \n# crud.py\n\n## Pair Programming Guide\n# * Work in Pairs (1 keyboard + 2 brains)\n# * Switch for every iteration (micro-story)\n# * Test - Code - Refactor (Fail, Pass, Beautify)\n# * Typer - Talker\n# * Check your ego at the door '?> Cooperate\n# * Save both product and test code\n# * Execute all tests for each micro-story\n# * Record a log of your time on each test\n# * Use the main script hack to run your code directly\n# * Finish with a beautiful module call social_net_crud.py\n\nimport csv\nimport os\n\n\ndef create_author_table():\n open('author.csv','w').write('')\n \ndef add_author_table(author, email):\n with open('author.csv', 'a') as newFile:\n newFileWriter = csv.writer(newFile)\n newFileWriter.writerow([author, email])\n\ndef add_mulitple_authors(authors):\n with open('author.csv', 'a') as newFile:\n newFileWriter = csv.writer(newFile)\n for author in authors:\n newFileWriter.writerow([author, authors[author]])\ndef read_authors():\n with open('author.csv') as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n if (row):\n print(row)\n \ndef print_author_email(author):\n with open('author.csv') as csvDataFile:\n csvReader = csv.reader(csvDataFile)\n for row in csvReader:\n for value in row:\n if (value == author):\n print(row[1])\n \ndef change_author_email(author, new_email):\n r = csv.reader(open('author.csv'))\n lines = [l for l in r]\n count = 0\n for line in lines:\n for value in line:\n if (value == author):\n lines[count][1] = new_email\n count += 1\n \n open('author.csv','w').write('')\n with open('author.csv', 'a') as newFile:\n newFileWriter = csv.writer(newFile)\n for line in lines:\n if (line):\n newFileWriter.writerow(line)\n \ndef delete_author(author):\n r = csv.reader(open('author.csv'))\n lines = [l for l in r]\n count = 0\n for line in lines:\n for value in line:\n if (value == author):\n lines[count][0] = ''\n lines[count][1] = ''\n count += 1\n \n open('author.csv','w').write('')\n with open('author.csv', 'a') as newFile:\n newFileWriter = csv.writer(newFile)\n for line in lines:\n if (line):\n newFileWriter.writerow(line)\n \n# Test building Article CRUD\ndef test_author_crud():\n\n # Tests to write for Author CRUD\n\n # A CSV file exists\n from os.path import exists\n create_author_table()\n assert(exists('author.csv'))\n\n # * CSV file Author 'Bill, Bill@Here.com'\n add_author_table('Bill', 'bill@here.com')\n # * Add 'Sue' to Author table\n add_author_table('Sue', 'sue@here.com')\n # * Add list of other names (10 people)\n authors = {\"Jon\":\"jon@here.com\",\n \"Jane\":\"jane@here.com\",\n \"Jill\":\"jill@here.com\",\n \"Jim\":\"jim@here.com\",\n \"Jack\":\"jack@here.com\",\n \"Jerry\":\"jerry@here.com\",\n \"Bob\":\"bbo@here.com\",\n \"Berry\":\"berry@here.com\",\n \"Tod\":\"tod@here.com\",\n \"Al\":\"al@here.com\"}\n add_mulitple_authors(authors)\n # * Read CSV records\n read_authors()\n # * Print Author email\n print_author_email('Jim')\n # * Change email\n change_author_email('Jim', 'jimmy@here.com')\n # * Delete Author\n delete_author('Jon')\n\ndef open_CSV():\n open('articles.csv','w').write('Rattlesnakes, I hate snakes')\n\ndef print_article():\n ifile = open('articles.csv', \"r\", encoding='UTF8')\n read = csv.reader(ifile)\n for row in read :\n print (row) \n\ndef add_articles():\n open('articles.csv','a').write('Kittens, Kittens are Fuzzy')\n\ndef write_article(text):\n open('articles.csv','a').write(text)\n\ndef add_authors():\n multiline = os.linesep + 'author_id1' + os.linesep + 'author_id2' + os.linesep + 'author_id3' + os.linesep + 'author_id4'\n open('articles.csv','a').write(multiline)\n \n\ndef print_articles_authors():\n ifile = open('articles.csv', \"r\", encoding='UTF8')\n read = csv.reader(ifile)\n for row in read :\n print (row)\n\ndef remove_article():\n open('articles.csv','a').write('')\n\n\ndef remove_authors():\n open('articles.csv','a').write('')\n\n\ndef test_article_crud():\n\n # * CSV file Article 'Rattlesnakes, I hate snakes'\n open_CSV()\n # * Print Article list \n print_article()\n # * Add Article 'Kittens, Kittens are Fuzzy'\n add_articles()\n #write.('Kittens, Kittens are Fuzzy')\n write_article('Kittens, Kittens are Fuzzy')\n # * Add author_id of 4 to Articles\n add_authors()\n # * Print Articles showing Author names\n print_articles_authors()\n # * Remove Article\n remove_article()\n # * Remove Author\n remove_authors()\n \n text = \"line1\\nline2\"\n path = 'test.txt'\n #write_file(path, text)\n #t = read_file(path)\n #print('text:'+text+'$')\n #print('t:'+t+'$')\n #assert(t==text)\n #assert(t!=text)\n pass\n\n# Run test\nif __name__ == '__main__' :\n test_author_crud()\n test_article_crud()\n","sub_path":"Exercises/Results/pere1222/crud.py","file_name":"crud.py","file_ext":"py","file_size_in_byte":5266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614261111","text":"from __future__ import print_function\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom sklearn.metrics import accuracy_score\n\n\n# create cnn\nclass net(nn.Module):\n def __init__(self):\n # structure\n super(net, self).__init__()\n self.conv1 = nn.Conv1d(2, 128, 50, stride=3)\n self.conv2 = nn.Conv1d(128, 32, 7, stride=1)\n self.conv3 = nn.Conv1d(32, 32, 10, stride=1)\n self.conv4 = nn.Conv1d(32, 128, 5, stride=2)\n self.conv5 = nn.Conv1d(128, 256, 15, stride=1)\n self.conv6 = nn.Conv1d(256, 512, 5, stride=1)\n self.conv7 = nn.Conv1d(512, 128, 3, stride=1)\n self.dense1 = nn.Linear(1152, 512)\n self.dense2 = nn.Linear(512, 17)\n\n self.bn1 = nn.BatchNorm1d(128)\n self.bn2 = nn.BatchNorm1d(32)\n\n self.dropout = nn.Dropout(0.1)\n self.faltten = nn.Flatten()\n\n # forward propagation\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = self.bn1(x)\n x = F.max_pool1d(x, 2, stride=3)\n\n x = F.relu(self.conv2(x))\n x = self.bn2(x)\n x = F.max_pool1d(x, 2, stride=2)\n\n x = F.relu(self.conv3(x))\n x = F.relu(self.conv4(x))\n x = F.max_pool1d(x, 2, stride=2)\n\n x = F.relu(self.conv5(x))\n x = F.max_pool1d(x, 2, stride=2)\n x = F.relu(self.conv6(x))\n x = F.relu(self.conv7(x))\n\n x = self.faltten(x)\n x = self.dropout(F.relu(self.dense1(x)))\n output = self.dense2(x)\n\n return output\n\n def train_on_dataset(self, loader, optimizer):\n self.train()\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n cost = nn.CrossEntropyLoss()\n with tqdm(total=len(loader)) as progress_bar:\n for batch_idx, (data, label) in enumerate(loader):\n data = data.to(device)\n label = label.to(device)\n optimizer.zero_grad()\n output = self(data)\n loss = cost(output, label)\n\n preds = torch.argmax(output, axis=-1)\n acc = (preds == label).sum() / float(label.shape[0])\n info = {\n 'loss': loss.item(),\n 'acc': acc.item()\n }\n progress_bar.set_postfix(**info)\n # back propagation\n loss.backward()\n optimizer.step()\n progress_bar.update(1)\n\n # validation, inlcudes evaluation on validation set\n def test_on_dataset(self, val_loader, checkpoint=None):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n\n if checkpoint is not None:\n model_state = torch.load(checkpoint)\n self.load_state_dict(model_state)\n # Need this line for things like dropout etc.\n # evalution\n self.eval()\n preds = []\n targets = []\n cost = nn.CrossEntropyLoss()\n losses = []\n\n with torch.no_grad():\n # calculate a series of variables for evaluation\n for batch_idx, (data, label) in enumerate(val_loader):\n # original data\n data = data.to(device)\n # labels (0,1)\n target = label.clone()\n # predicted labels\n output = self(data)\n preds.append(output.cpu().numpy())\n targets.append(target.cpu().numpy())\n output = output.to(device)\n label = label.to(device)\n # loss\n losses.append(cost(output, label))\n # average loss value\n loss = torch.mean(torch.stack(losses))\n # reshape predicted labels\n preds = np.argmax(np.concatenate(preds), axis=1)\n # reshape original labels\n targets = np.concatenate(targets)\n # accurancy\n acc = accuracy_score(targets, preds)\n\n return loss, acc\n","sub_path":"cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275419862","text":"\"\"\"\n The MIT License (MIT)\n Copyright (c) 2016 Intel Corporation\n\n Permission is hereby granted, free of charge, to any person obtaining a copy of \n this software and associated documentation files (the \"Software\"), to deal in \n the Software without restriction, including without limitation the rights to \n use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of \n the Software, and to permit persons to whom the Software is furnished to do so, \n subject to the following conditions:\n\n The above copyright notice and this permission notice shall be included in all \n copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS \n FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR \n COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER \n IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN \n CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\"\"\"\n\n\"\"\"create db row table\n\nRevision ID: 50b48155ef53\nRevises: 37ec9fe97cbf\nCreate Date: 2015-10-09 13:21:34.198137\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '50b48155ef53'\ndown_revision = '37ec9fe97cbf'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy import UniqueConstraint\nfrom sqlalchemy.schema import CreateSequence, Sequence\n\n\ndef upgrade():\n op.execute(\n CreateSequence(\n Sequence('db_row_tile_row_id_seq', minvalue=0, start=0, increment=1)\n )\n )\n op.create_table(\n 'db_row',\n sa.Column('id', sa.BigInteger, primary_key=True),\n sa.Column('db_array_id', sa.BigInteger, sa.ForeignKey('db_array.id'), nullable=False),\n sa.Column('tile_row_id', sa.BigInteger, Sequence(\n 'db_row_tile_row_id_seq'), nullable=False)\n )\n\n\ndef downgrade():\n op.execute(DropSequence('db_row_tile_row_id_seq'))\n op.drop_table('db_row')\n","sub_path":"metadb/alembic/versions/50b48155ef53_create_db_row_table.py","file_name":"50b48155ef53_create_db_row_table.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647924281","text":"'''\r\nCreated on Dec 1, 2012\r\n\r\n@author: Jacob Mokris\r\n\r\nBased on the following grammar: http://science.kennesaw.edu/~dgayler/cs3150/docs/grammar.html\r\n\r\nAll relevant class files based on: http://science.kennesaw.edu/~dgayler/cs3150/examples/java_project_instructor/\r\n'''\r\nfrom LexicalAnalyzer import LexicalAnalyzer\r\nfrom ParserException import ParserException\r\nfrom StatementList import StatementList\r\nfrom BinaryExpression import BinaryExpression\r\nfrom Id import Id\r\nfrom WhileStatement import WhileStatement\r\nfrom DisplayStatement import DisplayStatement\r\nfrom BooleanExpression import BooleanExpression\r\nfrom EQExpression import EQExpression\r\nfrom GEExpression import GEExpression\r\nfrom GTExpression import GTExpression\r\nfrom LEExpression import LEExpression\r\nfrom NEExpression import NEExpression\r\nfrom LTExpression import LTExpression\r\nfrom Operand import Operand\r\nfrom NumberFormatException import NumberFormatException\r\nfrom LiteralInteger import LiteralInteger\r\nfrom IfStatement import IfStatement\r\nfrom ArithmeticExpression import ArithmeticExpression\r\nfrom AddExpression import AddExpression\r\nfrom SubExpression import SubExpression\r\nfrom DivExpression import DivExpression\r\nfrom MulExpression import MulExpression\r\nfrom Program import Program\r\nfrom Statement import Statement\r\nfrom AssignmentStatement import AssignmentStatement\r\nfrom UnaryExpression import UnaryExpression\r\n\r\nclass Parser(object):\r\n \r\n lex = LexicalAnalyzer\r\n \r\n def __init__(self, fileName):\r\n # global lex\r\n Parser.lex = LexicalAnalyzer(fileName)\r\n \r\n def parse(self):\r\n # global lex\r\n self.match(\"main\")\r\n self.match(\"(\")\r\n self.match(\")\")\r\n stmntList = self.getStatementList()\r\n tok = Parser.lex.getToken()\r\n if (tok != \"$\"):\r\n raise ParserException(\"garbage at end of program\")\r\n return Program(stmntList)\r\n \r\n def getStatementList(self):\r\n # global lex\r\n stmntList = StatementList\r\n stmnt = self.getStatement()\r\n stmntList.add(stmnt)\r\n tok = Parser.lex.getLookaheadToken()\r\n while (tok == \";\"):\r\n tok = Parser.lex.getToken()\r\n stmnt = self.getStatement()\r\n stmntList.add(stmnt)\r\n tok = Parser.lex.getLookaheadToken()\r\n return stmntList\r\n \r\n def getStatement(self):\r\n # global lex\r\n stmnt = Statement\r\n tok = Parser.lex.getLookaheadToken()\r\n if (tok == \"if\"):\r\n stmnt = self.getIfStatement()\r\n elif (tok == \"while\"):\r\n stmnt = self.getWhileStatement()\r\n elif (tok == \"display\"):\r\n stmnt = self.getDisplayStatement()\r\n else:\r\n stmnt = self.getAssignmentStatement()\r\n return stmnt\r\n \r\n def getAssignmentStatement(self):\r\n var = self.getId()\r\n self.match(\"<-\")\r\n expr = self.getArithmeticExpression()\r\n return AssignmentStatement(var, expr)\r\n \r\n def getArithmeticExpression(self):\r\n # global lex\r\n expr = ArithmeticExpression\r\n tok = Parser.lex.getLookaheadToken()\r\n if (self.isValidArithmeticOp(tok)):\r\n op = self.getArithmeticOperator()\r\n op1 = self.getOperand()\r\n op2 = self.getOperand()\r\n expr = self.createBinaryExpression(op, op1, op2)\r\n else:\r\n op = self.getOperand()\r\n expr = UnaryExpression(op)\r\n return expr \r\n \r\n \r\n def createBinaryExpression(self, op, op1, op2):\r\n expr = ArithmeticExpression \r\n if (op == BinaryExpression.ArithmeticOperator.add):\r\n expr = AddExpression(op1, op2)\r\n elif (op == BinaryExpression.ArithmeticOperator.sub):\r\n expr = SubExpression(op1, op2)\r\n elif (op == BinaryExpression.ArithmeticOperator.mul):\r\n expr = MulExpression(op1, op2)\r\n else:\r\n expr = DivExpression(op1, op2)\r\n return expr \r\n \r\n def getArithmeticOperator(self): \r\n # global lex \r\n op = BinaryExpression.ArithmeticOperator\r\n tok = Parser.lex.getToken()\r\n if (tok == \"+\"):\r\n op = BinaryExpression.ArithmeticOperator.add\r\n elif (tok == \"-\"):\r\n op = BinaryExpression.ArithmeticOperator.sub\r\n elif (tok == \"*\"):\r\n op = BinaryExpression.ArithmeticOperator.mul\r\n elif (tok == \"/\"):\r\n op = BinaryExpression.ArithmeticOperator.div\r\n else:\r\n ParserException(\"arithmetic operator expected\")\r\n \r\n return op\r\n \r\n def isValidArithmeticOp(self, s):\r\n return (s == \"+\") or (s == \"-\") or (s == \"*\") or (s == \"/\")\r\n \r\n def getDisplayStatement(self):\r\n self.match(\"display\")\r\n self.match(\"(\")\r\n var = self.getId()\r\n self.match(\")\")\r\n return DisplayStatement(var)\r\n \r\n def getId(self):\r\n # global lex\r\n tok = Parser.lex.getToken() \r\n if (tok == None) or (tok.__len__() != 1):\r\n raise ParserException(\"id expected\")\r\n return Id(tok[0])\r\n \r\n def getWhileStatement(self):\r\n self.match(\"while\")\r\n expr = self.getBooleanExpression() \r\n self.match(\"do\")\r\n stmntList = self.getStatementList()\r\n self.match(\"end\")\r\n return WhileStatement(expr, stmntList) \r\n \r\n def getBooleanExpression(self):\r\n op = self.getRelativeOp()\r\n op1 = self.getOperand()\r\n op2 = self.getOperand()\r\n return self.createBooleanExpression(op, op1, op2)\r\n \r\n def createBooleanExpression(self, op, op1, op2):\r\n if (op == BooleanExpression.RelativeOperator.EQ):\r\n expr = EQExpression (op1, op2)\r\n elif (op == BooleanExpression.RelativeOperator.NE):\r\n expr = NEExpression (op1, op2)\r\n elif (op == BooleanExpression.RelativeOperator.GT):\r\n expr = GTExpression (op1, op2)\r\n elif (op == BooleanExpression.RelativeOperator.GE):\r\n expr = GEExpression (op1, op2)\r\n elif (op == BooleanExpression.RelativeOperator.LT):\r\n expr = LTExpression (op1, op2)\r\n else:\r\n expr = LEExpression (op1, op2)\r\n return expr\r\n \r\n def getRelativeOp(self):\r\n # global lex\r\n op = BooleanExpression.RelativeOperator\r\n tok = Parser.lex.getToken()\r\n if (tok == \"<\"):\r\n op = BooleanExpression.RelativeOperator.LT\r\n elif (tok == \"<=\"):\r\n op = BooleanExpression.RelativeOperator.LE\r\n elif (tok == \">\"):\r\n op = BooleanExpression.RelativeOperator.GT\r\n elif (tok == \">=\"):\r\n op = BooleanExpression.RelativeOperator.GE\r\n elif (tok == \"=\"):\r\n op = BooleanExpression.RelativeOperator.EQ\r\n elif (tok == \"/=\"):\r\n op = BooleanExpression.RelativeOperator.NE\r\n else:\r\n raise ParserException (\"relative operator expected\");\r\n return op\r\n \r\n def getOperand(self):\r\n # global lex\r\n op = Operand\r\n try:\r\n tok = Parser.lex.getLookaheadToken()\r\n if (tok == None):\r\n raise ParserException(\"operand expected\")\r\n if (tok[0].isdigit()):\r\n op = self.getLiteralInteger()\r\n else:\r\n op = self.getId() \r\n except ParserException:\r\n print (\"operand expected\")\r\n return op\r\n \r\n def getLiteralInteger(self):\r\n # global lex\r\n tok = Parser.lex.getToken()\r\n n = 0\r\n try: \r\n n = int(tok)\r\n except NumberFormatException:\r\n print(\"literal integer expected\")\r\n return LiteralInteger(n)\r\n \r\n def getIfStatement(self):\r\n self.match(\"if\")\r\n expr = self.getBooleanExpression()\r\n self.match(\"then\")\r\n stmntList1 = self.getStatementList()\r\n self.match(\"else\")\r\n stmntList2 = self.getStatementList()\r\n self.match(\"end\")\r\n return IfStatement(expr, stmntList1, stmntList2) \r\n \r\n def match(self, expected):\r\n tok = Parser.lex.getToken() \r\n if (expected != tok):\r\n raise ParserException(\"expected: \", expected, \" got: \", tok) \r\n","sub_path":"Parser.py","file_name":"Parser.py","file_ext":"py","file_size_in_byte":8309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"376256164","text":"import flask\nimport os\n\napp = flask.Flask(__name__)\n\n@app.route('/')\ndef index():\n return flask.render_template(\n \"index.html\",)\n \napp.run(\n port = int(os.getenv(\"PORT\", 8080)), \n host = os.getenv(\"IP\", \"0.0.0.0\"),\n debug = True\n)","sub_path":"launchpage.py","file_name":"launchpage.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427671607","text":"from sklearn.cluster import KMeans\nfrom sklearn.externals import joblib\nimport numpy as np\nimport os\n\nif __name__ == \"__main__\":\n print(\"Load model\")\n kmeans = joblib.load(\"models/k_means.joblib\")\n for file in os.listdir(\"features/vgg16_fc2/test\"):\n data = np.load(os.path.join(\"features/vgg16_fc2/test\", file))\n group = kmeans.predict(data)\n token = file.split(\"+\")\n class_name = token[0]\n file_name = token[1].replace(\".npy\",\"\")\n print(\"File \", file_name, \" | class : \", class_name, \" | group : \", group)\n ","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601552950","text":"from .common import EWSService\nfrom ..properties import AlternateId, AlternatePublicFolderId, AlternatePublicFolderItemId, ID_FORMATS\nfrom ..util import create_element, set_xml_value\nfrom ..version import EXCHANGE_2007_SP1\n\n\nclass ConvertId(EWSService):\n \"\"\"Take a list of IDs to convert. Returns a list of converted IDs or exception instances, in the same order as the\n input list.\n\n MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/convertid-operation\n \"\"\"\n\n SERVICE_NAME = 'ConvertId'\n supported_from = EXCHANGE_2007_SP1\n\n def call(self, items, destination_format):\n if destination_format not in ID_FORMATS:\n raise ValueError(\"'destination_format' %r must be one of %s\" % (destination_format, ID_FORMATS))\n return self._elems_to_objs(\n self._chunked_get_elements(self.get_payload, items=items, destination_format=destination_format)\n )\n\n def _elems_to_objs(self, elems):\n cls_map = {cls.response_tag(): cls for cls in (\n AlternateId, AlternatePublicFolderId, AlternatePublicFolderItemId\n )}\n for elem in elems:\n if isinstance(elem, Exception):\n yield elem\n continue\n yield cls_map[elem.tag].from_xml(elem, account=None)\n\n def get_payload(self, items, destination_format):\n supported_item_classes = AlternateId, AlternatePublicFolderId, AlternatePublicFolderItemId\n convertid = create_element('m:%s' % self.SERVICE_NAME, attrs=dict(DestinationFormat=destination_format))\n item_ids = create_element('m:SourceIds')\n for item in items:\n if not isinstance(item, supported_item_classes):\n raise ValueError(\"'item' value %r must be an instance of %r\" % (item, supported_item_classes))\n set_xml_value(item_ids, item, version=self.protocol.version)\n if not len(item_ids):\n raise ValueError('\"items\" must not be empty')\n convertid.append(item_ids)\n return convertid\n\n @classmethod\n def _get_elements_in_container(cls, container):\n # We may have other elements in here, e.g. 'ResponseCode'. Filter away those.\n return container.findall(AlternateId.response_tag()) \\\n + container.findall(AlternatePublicFolderId.response_tag()) \\\n + container.findall(AlternatePublicFolderItemId.response_tag())\n","sub_path":"exchangelib/services/convert_id.py","file_name":"convert_id.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424861149","text":"\"\"\"Add last_modified and previous_price columns\n\nRevision ID: 2118d6930b75\nRevises: 5d22ed705ac\nCreate Date: 2015-06-05 10:15:45.137611\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '2118d6930b75'\ndown_revision = '5d22ed705ac'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n op.add_column('costco_booking', sa.Column('last_modified', sa.TIMESTAMP(), nullable=True))\n op.add_column('costco_booking', sa.Column('previous_price', sa.INTEGER(), nullable=True))\n op.execute(\"UPDATE costco_booking SET last_modified = NOW();\")\n op.execute(\"ALTER TABLE costco_booking ALTER COLUMN last_modified SET NOT NULL;\")\n op.execute(\"UPDATE costco_booking SET previous_price = original_price;\")\n op.execute(\"ALTER TABLE costco_booking ALTER COLUMN previous_price SET NOT NULL;\")\n\n\ndef downgrade():\n op.drop_column('costco_booking', 'previous_price')\n op.drop_column('costco_booking', 'last_modified')\n\n","sub_path":"migrations/versions/2118d6930b75_.py","file_name":"2118d6930b75_.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262216549","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as nnf\nfrom torchvision.models import resnet18, resnet34, resnet50, resnet101, resnet152\n\nfrom ...core.logging import log_important, log_warning\nfrom ..blocks.blocks import _DecoderBlock, _PredictionOutput, _Refine\nfrom .base import _DenseBase\nfrom .pspnet import PyramidModule\n\n\nclass _ResNetDense(_DenseBase):\n \"\"\"\n decoder shape defines different factors for the number of maps in the decoder layers.\n \"\"\"\n\n def __init__(self, base_network, decoder_shape='m', out_channels=10, out_channel_weights=None, with_mask=False,\n binary=False, init_state_dict=None, pretrained=False, multipredict=False, thresholds=None,\n pyramid=False, class_mean=False, dropout=None, small_resnet=False, transfer_mode=False):\n super().__init__(3, out_channels, out_channel_weights, binary, with_mask, class_mean=class_mean,\n transfer_mode=transfer_mode)\n\n self.transfer_exclude_parameters = ['post_conv.weight', 'post_conv.bias', 'thresholds']\n self.pyramid = pyramid\n self.dropout = dropout\n self.multipredict = multipredict\n self.resnet = base_network(pretrained=pretrained)\n self.pretrained = pretrained\n\n if not self.pretrained:\n log_warning('You are not using the ImageNet-pretrained weights!')\n\n if init_state_dict is not None:\n log_important('using init state dict', init_state_dict)\n\n state_ext = torch.load(init_state_dict)['state_dict'] # external state dict\n state_int = self.state_dict() # internal state dict\n\n external_states = {key: v for key, v in state_ext.items() if key in state_int and v.size() == state_int[key].size()}\n log_important('no weights found for', set(state_int.keys()).difference(external_states.keys()))\n\n # if external key is available use it, otherwise use internal\n common_state_dict = {k: external_states[k] if k in external_states else state_int[k] for k, v in state_ext.items()}\n self.load_state_dict(common_state_dict)\n\n self.decoder_shape = decoder_shape\n K = 16\n if decoder_shape == 's': dec_factors = (16, 8, 4, 1, 1)\n elif decoder_shape == 'm': dec_factors = (16, 8, 4, 2, 2)\n elif decoder_shape == 'm+': dec_factors = (24, 16, 4, 2, 2)\n elif decoder_shape == 'l': dec_factors = (16, 8, 4, 3, 3)\n elif decoder_shape == 'l+': dec_factors = (24, 16, 4, 3, 3)\n elif decoder_shape == 'xl': dec_factors = (16, 8, 4, 4, 4)\n elif decoder_shape == 'xl+': dec_factors = (24, 16, 4, 4, 4)\n elif decoder_shape == 'xxl': dec_factors = (16, 8, 6, 6, 6)\n elif decoder_shape == 'xxl+': dec_factors = (24, 16, 6, 6, 6)\n elif decoder_shape == 'bottleneck': dec_factors = (16, 8, 1, 1, 1)\n else:\n raise ValueError('Invalid decoder_shape')\n\n # decoder_factors = (16, 8, 2, 1, 1)\n # decoder_factors = (16, 8, 2, 1, 1)\n\n\n\n\n if not small_resnet:\n enc_factors = [192 * K, 32 * K, 16 * K, 4 * K, 3]\n else:\n enc_factors = [48 * K, 8 * K, 4 * K, 4 * K, 3]\n\n if pyramid:\n # old: 2048\n feats = {'d3': 512, 'd4': 1024, 'd5': 2048, 'u2': 256, 'u3': 128, 'u4': 64}[pyramid]\n self.pyramid = PyramidModule(in_features=feats, mid_features=512, out_features=feats)\n self.pyramid_layout = pyramid\n else:\n self.pyramid = None\n self.pyramid_layout = None\n\n self.decoder2 = _DecoderBlock(enc_factors[0], dec_factors[0]*K, dropout=dropout)\n self.decoder3 = _DecoderBlock(dec_factors[0]*K + enc_factors[1], dec_factors[1]*K, dropout=dropout)\n self.decoder4 = _DecoderBlock(dec_factors[1]*K + enc_factors[2], dec_factors[2]*K, dropout=dropout)\n self.decoder5 = _DecoderBlock(dec_factors[2]*K + enc_factors[3], dec_factors[3]*K, dropout=dropout)\n self.decoder6 = _DecoderBlock(dec_factors[3]*K + enc_factors[4], dec_factors[4]*K, dropout=dropout)\n self.decoders = [self.decoder2, self.decoder3, self.decoder4, self.decoder5, self.decoder6]\n self.post_conv = nn.Conv2d(dec_factors[4]*K, self.out_channels, (1, 1))\n\n\n\n ######## Refination test ###########\n\n# if not small_resnet:\n# k_r=3\n# enc_factors_refine_extra = [512 * k_r, 256 * k_r, 128 * k_r, 64 * k_r, 0 , 0 ]\n# else:\n# k_r=1\n# #enc_factors_refine = [512 * k_r, 256 * k_r, 128 * k_r, 64 * k_r, 64, 3]\n# enc_factors_refine_extra = [ 0, 0, 0, 0, 0, 0]\n#\n# \n# enc_factors_refine = [512 , 256 , 128 , 64 , 64, 3]#\n#\n# self.post_conv_refination = nn.Conv2d(16,12,(1,1))\n# self.dec2 = _Refine(enc_factors_refine[1] + enc_factors_refine_extra[1],enc_factors_refine[0] + enc_factors_refine_extra[0],1)\n# self.dec3 = _Refine(enc_factors_refine[2] + enc_factors_refine_extra[2], enc_factors_refine[1], 2)\n# self.dec4 = _Refine(enc_factors_refine[3] + enc_factors_refine_extra[3], enc_factors_refine[2], 3)\n# self.dec5 = _Refine(enc_factors_refine[4] + enc_factors_refine_extra[4], enc_factors_refine[3], 4)\n# self.dec6 = _Refine(enc_factors_refine[5] + enc_factors_refine_extra[5], int(enc_factors_refine[4] * 0.5) ,5)\n\n #self.post_conv_refination = nn.Conv2d(16,12,(1,1))\n #self.dec2 = _Refine(256,512,1)\n #self.dec3 = _Refine(128,256,2)\n #self.dec4 = _Refine(64,128,3)\n #self.dec5 = _Refine(64,64,4)\n #self.dec6 = _Refine(3,32,5)\n ######## Refination test ###########\n\n if self.multipredict:\n self.pred_out = nn.ModuleList([\n _PredictionOutput(dec_factors[i] * K, self.out_channels, with_softmax=True, dropout=dropout) for i in range(1, 4)\n ])\n\n if thresholds:\n self.set_thresholds(thresholds)\n\n def name(self):\n return '{}-{}{}{}{}{}{}{}{}'.format(self.__class__.__name__, self.decoder_shape,\n '-pyr' + self.pyramid_layout if self.pyramid else '',\n '-pretrain' if self.pretrained else '',\n '-mulpred' if self.multipredict else '',\n '-clsmean' if self.class_mean else '',\n '-mask' if self.with_mask else '',\n '-drop' + str(self.dropout) if self.dropout is not None else '',\n '-cw' if self.out_channel_weights is not None else '')\n\n def n_parameters(self):\n return sum([np.prod(p.size()) for p in self.parameters()])\n\n # def get_features(self, x, decode_steps=None):\n # x0 = self.normalize(x)\n #\n # x1 = self.resnet.conv1(x0)\n # x1 = self.resnet.bn1(x1)\n # x1 = self.resnet.relu(x1)\n # x1 = self.resnet.maxpool(x1)\n #\n # x2 = self.resnet.layer1(x1)\n # x3 = self.resnet.layer2(x2)\n # x4 = self.resnet.layer3(x3)\n # x5 = self.resnet.layer4(x4)\n #\n # # if self.pyramid is not None:\n # # x5 = self.pyramid(x5)\n #\n # x_up = x5\n #\n # for decoder, x_skip in list(zip(self.decoders, [x4, x3, x2, x1, x0]))[:decode_steps]:\n # x_up = decoder(x_up, x_skip)\n #\n # # x_up = self.decoder6(x_up, x0)\n #\n # return x_up,\n\n def forward(self, x):\n\n\n x0 = self.normalize(x)\n #print('size x0: ',x0.shape)\n x1 = self.resnet.conv1(x0)\n x1 = self.resnet.bn1(x1)\n x1 = self.resnet.relu(x1)\n x1 = self.resnet.maxpool(x1)\n #print('size x1: ',x1.shape)\n\n x2 = self.resnet.layer1(x1)\n #print('size x2: ',x2.shape)\n x3 = self.resnet.layer2(x2)\n\n if self.pyramid_layout == 'd3': x3 = self.pyramid(x3)\n x4 = self.resnet.layer3(x3)\n #print('size x3: ',x3.shape)\n if self.pyramid_layout == 'd4': x4 = self.pyramid(x4)\n x5 = self.resnet.layer4(x4)\n #print('size x4: ',x4.shape)\n #print('size x5: ',x5.shape)\n if self.pyramid_layout == 'd5': x5 = self.pyramid(x5)\n\n # if self.pyramid is not None:\n # x5 = self.pyramid(x5)\n\n \n\n ######## Refination test ###########\n# x_up2 = self.dec2(x5,x4)\n# #print(\"x_up2: \",x_up2.shape)\n# x_up3 = self.dec3(x_up2,x3)\n# #print(\"x_up3: \",x_up3.shape)\n# x_up4 = self.dec4(x_up3,x2)\n# #print(\"x_up4: \",x_up4.shape)\n# x_up5 = self.dec5(x_up4,x1)\n# #print(\"x_up5: \",x_up5.shape)\n# x_up6 = self.dec6(x_up5,x0) \n# #print(\"x_up6: \",x_up6.shape)\n# \n# x_up = self.post_conv_refination(x_up6)\n# #print('x_up : ',x_up.shape)\n\n ######## Refination test ###########\n\n\n ######## Original ###########\n\n x_up2 = self.decoder2(x5, x4)\n #print('size x_up2: ',x_up2.shape)\n if self.pyramid_layout == 'u2': x_up2 = self.pyramid(x_up2)\n x_up3 = self.decoder3(x_up2, x3)\n #print('size x_up3: ',x_up3.shape)\n if self.pyramid_layout == 'u3': x_up3 = self.pyramid(x_up3)\n x_up4 = self.decoder4(x_up3, x2)\n # print('size x_up4: ',x_up4.shape)\n if self.pyramid_layout == 'u4': x_up4 = self.pyramid(x_up4)\n x_up5 = self.decoder5(x_up4, x1)\n #print('size x_up5: ',x_up5.shape)\n x_up6 = self.decoder6(x_up5, x0)\n #print('size x_up6: ',x_up6.shape)\n\n x_up = self.post_conv(x_up6)\n #print('x_up--> ' , x_up.shape)\n\n ######## Original ###########\n\n # x_up1 = self.decoder1(x5)\n # x_up2 = self.decoder2(torch.cat((x_up1, x4), dim=1))\n # x_up3 = self.decoder3(torch.cat((x_up2, x3), dim=1))\n # x_up4 = self.decoder4(torch.cat((x_up3, x2), dim=1))\n # x_up5 = self.decoder5(torch.cat((x_up4, x), dim=1))\n\n # sizes\n # x torch.Size([1, 3, 550, 824])\n # x0 torch.Size([1, 3, 550, 824])\n # x1 torch.Size([1, 64, 138, 206])\n # x2 torch.Size([1, 256, 138, 206])\n # x3 torch.Size([1, 512, 69, 103])\n # x4 torch.Size([1, 1024, 35, 52])\n # x5 torch.Size([1, 2048, 18, 26])\n\n # x_up2 torch.Size([1, 256, 35, 52])\n # x_up3 torch.Size([1, 128, 69, 103])\n # x_up4 torch.Size([1, 64, 138, 206])\n # x_up5 torch.Size([1, 32, 138, 206])\n # x_up6 torch.Size([1, 32, 550, 824])\n # x_up torch.Size([1, 12, 550, 824])\n\n \n # x_up *= 0.01 # prevents extreme values at the beginning of the training\n\n if self.multipredict:\n x_up *= 0.4\n for i, x_up_in in enumerate([x_up3, x_up4, x_up5]):\n x_up += 0.2*self.pred_out[i](x_up_in, (x_up.size(2), x_up.size(3)))\n\n if self.binary:\n class_pred = nnf.sigmoid(x_up)\n else:\n # not necessary because loss does it\n class_pred = nnf.log_softmax(x_up, dim=1)\n\n return class_pred,\n\n\nclass ResNet18Dense(_ResNetDense):\n\n def __init__(self, **kwargs):\n super().__init__(resnet18, **kwargs, small_resnet=True)\n\n\nclass ResNet34Dense(_ResNetDense):\n\n def __init__(self, **kwargs):\n super().__init__(resnet34, **kwargs,small_resnet=True)\n\n\nclass ResNet50Dense(_ResNetDense):\n\n def __init__(self, **kwargs):\n super().__init__(resnet50, **kwargs)\n\n\nclass ResNet101Dense(_ResNetDense):\n\n def __init__(self, **kwargs):\n super().__init__(resnet101, **kwargs)\n\n\nclass ResNet152Dense(_ResNetDense):\n\n def __init__(self, **kwargs):\n super().__init__(resnet152, **kwargs)\n\n","sub_path":"src/ava/models/dense/dense_resnet.py","file_name":"dense_resnet.py","file_ext":"py","file_size_in_byte":11755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272558242","text":"import torch\nimport torch.nn as nn\n\n\nclass Flatten(nn.Module):\n def forward(self, x):\n N, C, H, W = x.size() # read in N, C, H, W\n return x.view(N, -1) # \"flatten\" the C * H * W values into a single vector per image\n\nclass Unflatten(nn.Module):\n \"\"\"\n An Unflatten module receives an input of shape (N, C*H*W) and reshapes it\n to produce an output of shape (N, C, H, W).\n \"\"\"\n def __init__(self, N=-1, C=128, H=7, W=7):\n super(Unflatten, self).__init__()\n self.N = N\n self.C = C\n self.H = H\n self.W = W\n\n def forward(self, x):\n return x.view(self.N, self.C, self.H, self.W)\n\nclass Initializer:\n def __init__(self):\n pass\n\n @staticmethod\n def initialize(model, initialization, **kwargs):\n\n def weights_init(m):\n if isinstance(m, nn.Conv2d):\n initialization(m.weight.data, **kwargs)\n try:\n initialization(m.bias.data)\n except:\n pass\n\n elif isinstance(m, nn.Linear):\n initialization(m.weight.data, **kwargs)\n try:\n initialization(m.bias.data)\n except:\n pass\n\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1.0)\n m.bias.data.fill_(0)\n\n elif isinstance(m, nn.BatchNorm1d):\n m.weight.data.fill_(1.0)\n m.bias.data.fill_(0)\n\n model.apply(weights_init)\n\nclass VAEGAN_Gen(nn.Module):\n def __init__(self, device=None, latent_size=100, num_channels=3):\n super().__init__()\n\n self.device = device if device != None else torch.device('cpu')\n self.latent_size = latent_size\n\n # 3 x 64 x 64\n self.encoder = nn.Sequential(\n nn.Conv2d(num_channels, 32, 4, stride=2, padding=1), # 32 x 32 x 32\n nn.BatchNorm2d(32),\n nn.LeakyReLU(),\n\n nn.Conv2d(32, 64, 4, stride=2, padding=1), # 64 x 16 x 16 \n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n\n nn.Conv2d(64, 128, 4, stride=2, padding=1), # 128 x 8 x 8\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n\n\n nn.Conv2d(128, 256, 4, stride=2, padding=1), # 256 x 4 x 4\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n\n Flatten(), # 256 * 4 * 4\n\n# nn.Linear(256* 4 * 4, self.latent_size),\n# nn.BatchNorm1d(self.latent_size),\n# nn.LeakyReLU(),\n#\n\n# nn.Linear(128 * 4 * 4, 100)\n )\n\n self.fc_mu = nn.Linear(256 * 4 * 4, self.latent_size)\n self.fc_logvar = nn.Linear(256 * 4 * 4, self.latent_size)\n\n self.decoder = nn.Sequential(\n nn.Linear(self.latent_size, 256 * 4 * 4),\n# nn.BatchNorm1d(self.latent_size),\n# nn.PReLU(),\n\n# nn.Linear(self.latent_size, 128 * 8 * 8),\n# nn.BatchNorm1d(128 * 8 * 8),\n# nn.LeakyReLU(),\n Unflatten(C=256, H=4, W=4),\n\n nn.Upsample(scale_factor=2, mode='nearest'), # 256, 8, 8\n nn.Conv2d(256, 128, 3, stride=1, padding=1), # 128, 8, 8\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n\n nn.Upsample(scale_factor=2, mode='nearest'), # 128, 16, 16\n nn.Conv2d(128, 64, 3, stride=1, padding=1), # 64, 16, 16\n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n\n nn.Upsample(scale_factor=2, mode='nearest'), # 64, 32, 32\n nn.Conv2d(64, 32, 3, stride=1, padding=1), # 32, 32, 32\n nn.BatchNorm2d(32),\n nn.LeakyReLU(),\n\n nn.Upsample(scale_factor=2, mode='nearest'), # 32, 64, 64\n nn.Conv2d(32, num_channels, 3, stride=1, padding=1), # 3, 64, 64\n nn.Tanh()\n )\n\n self.discriminator = nn.Sequential(\n nn.Conv2d(num_channels, 32, 4, stride=2, padding=1), # 32 x 32 x 32\n nn.BatchNorm2d(32),\n nn.LeakyReLU(),\n\n nn.Conv2d(32, 64, 4, stride=2, padding=1), # 64 x 16 x 16 \n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n\n nn.Conv2d(64, 128, 4, stride=2, padding=1), # 128 x 8 x 8\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n\n\n nn.Conv2d(128, 256, 4, stride=2, padding=1), # 256 x 4 x 4\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n\n Flatten(), # 256 * 4 * 4\n\n nn.Linear(256 * 4 * 4, self.latent_size),\n nn.BatchNorm1d(self.latent_size),\n nn.LeakyReLU(),\n\n nn.Linear(self.latent_size, 1) # no sigmoid applied, use BCEWithLogitsLoss\n \n \n# nn.Linear(256* 4 * 4, self.latent_size),\n# nn.BatchNorm1d(self.latent_size),\n# nn.LeakyReLU(),\n#\n\n# nn.Linear(128 * 4 * 4, 100)\n )\n\n \n self.to(device=device)\n\n def sample_latent(self, mu, logvar):\n if self.training:\n std = torch.exp(0.5 * logvar)\n eps = torch.randn_like(std)\n return mu + eps*std\n else:\n return mu\n\n def get_latent_z(self, x):\n enc_y = self.encoder(x)\n mu, logvar = self.fc_mu(enc_y), self.fc_logvar(enc_y)\n z = self.sample_latent(mu, logvar) \n return z\n\n def encode(self, x):\n enc_y = self.encoder(x)\n mu, logvar = self.fc_mu(enc_y), self.fc_logvar(enc_y)\n z = self.sample_latent(mu, logvar) \n return z\n\n\n def forward(self, x):\n enc_y = self.encoder(x)\n mu, logvar = self.fc_mu(enc_y), self.fc_logvar(enc_y)\n z = self.sample_latent(mu, logvar) \n y = self.decoder(z)\n\n return y, mu, logvar \n\n\nclass VAEGAN_Disc(nn.Module):\n def __init__(self, device=None, latent_size=100, num_channels=3):\n super().__init__()\n\n self.device = device if device != None else torch.device('cpu')\n self.latent_size = latent_size\n\n self.discriminator = nn.Sequential(\n nn.Conv2d(num_channels, 32, 4, stride=2, padding=1), # 32 x 32 x 32\n nn.BatchNorm2d(32),\n nn.LeakyReLU(),\n\n nn.Conv2d(32, 64, 4, stride=2, padding=1), # 64 x 16 x 16 \n nn.BatchNorm2d(64),\n nn.LeakyReLU(),\n\n nn.Conv2d(64, 128, 4, stride=2, padding=1), # 128 x 8 x 8\n nn.BatchNorm2d(128),\n nn.LeakyReLU(),\n\n\n nn.Conv2d(128, 256, 4, stride=2, padding=1), # 256 x 4 x 4\n nn.BatchNorm2d(256),\n nn.LeakyReLU(),\n\n Flatten(), # 256 * 4 * 4\n\n nn.Linear(256 * 4 * 4, self.latent_size),\n nn.BatchNorm1d(self.latent_size),\n nn.LeakyReLU(),\n\n nn.Linear(self.latent_size, 1) # no sigmoid applied, use BCEWithLogitsLoss\n \n )\n\n self.to(device=device)\n\n def forward(self, x):\n logits = self.discriminator(x)\n return logits\n","sub_path":"src/models/vaegan.py","file_name":"vaegan.py","file_ext":"py","file_size_in_byte":7014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"137410322","text":"import binascii\n\nimport synapse.exc as s_exc\nimport synapse.common as s_common\n\nimport synapse.lib.stormtypes as s_stormtypes\n\n@s_stormtypes.registry.registerLib\nclass HexLib(s_stormtypes.Lib):\n '''\n A Storm library which implements helpers for hexadecimal encoded strings.\n '''\n _storm_locals = (\n {'name': 'encode', 'desc': 'Encode bytes into a hexadecimal string.',\n 'type': {'type': 'function', '_funcname': 'encode',\n 'args': (\n {'name': 'valu', 'type': 'bytes', 'desc': 'The bytes to be encoded into a hex string.'},\n ),\n 'returns': {'type': 'str', 'desc': 'The hex encoded string.', }\n }},\n {'name': 'decode', 'desc': 'Decode a hexadecimal string into bytes.',\n 'type': {'type': 'function', '_funcname': 'decode',\n 'args': (\n {'name': 'valu', 'type': 'str', 'desc': 'The hex string to be decoded into bytes.'},\n ),\n 'returns': {'type': 'bytes', 'desc': 'The decoded bytes.', }\n }},\n {'name': 'toint', 'desc': 'Convert a big endian hexadecimal string to an integer.',\n 'type': {'type': 'function', '_funcname': 'toint',\n 'args': (\n {'name': 'valu', 'type': 'str', 'desc': 'The hex string to be converted.'},\n {'name': 'signed', 'type': 'bool', 'default': False,\n 'desc': 'If true, convert to a signed integer.'},\n ),\n 'returns': {'type': 'int', 'desc': 'The resulting integer.', }\n }},\n {'name': 'fromint', 'desc': 'Convert an integer to a big endian hexadecimal string.',\n 'type': {'type': 'function', '_funcname': 'fromint',\n 'args': (\n {'name': 'valu', 'type': 'int', 'desc': 'The integer to be converted.'},\n {'name': 'length', 'type': 'int', 'desc': 'The number of bytes to use to represent the integer.'},\n {'name': 'signed', 'type': 'bool', 'default': False,\n 'desc': 'If true, convert as a signed value.'},\n ),\n 'returns': {'type': 'str', 'desc': 'The resulting hex string.', }\n }},\n {'name': 'trimext', 'desc': 'Trim sign extension bytes from a hexadecimal encoded signed integer.',\n 'type': {'type': 'function', '_funcname': 'trimext',\n 'args': (\n {'name': 'valu', 'type': 'str', 'desc': 'The hex string to trim.'},\n ),\n 'returns': {'type': 'str', 'desc': 'The trimmed hex string.', }\n }},\n {'name': 'signext', 'desc': 'Sign extension pad a hexadecimal encoded signed integer.',\n 'type': {'type': 'function', '_funcname': 'signext',\n 'args': (\n {'name': 'valu', 'type': 'str', 'desc': 'The hex string to pad.'},\n {'name': 'length', 'type': 'int', 'desc': 'The number of characters to pad the string to.'},\n ),\n 'returns': {'type': 'str', 'desc': 'The sign extended hex string.', }\n }},\n )\n\n _storm_lib_path = ('hex',)\n\n def getObjLocals(self):\n return {\n # TODO 'dump': self.dump,\n 'toint': self.toint,\n 'encode': self.encode,\n 'decode': self.decode,\n 'fromint': self.fromint,\n 'trimext': self.trimext,\n 'signext': self.signext,\n }\n\n async def encode(self, valu):\n if not isinstance(valu, bytes):\n raise s_exc.BadArg(mesg='$lib.hex.encode() requires a bytes argument.')\n return s_common.ehex(valu)\n\n async def decode(self, valu):\n valu = await s_stormtypes.tostr(valu)\n try:\n return s_common.uhex(valu)\n except binascii.Error as e:\n raise s_exc.BadArg(mesg=f'$lib.hex.decode(): {e}')\n\n async def toint(self, valu, signed=False):\n valu = await s_stormtypes.tostr(valu)\n signed = await s_stormtypes.tobool(signed)\n\n try:\n byts = s_common.uhex(valu)\n except binascii.Error as e:\n raise s_exc.BadArg(mesg=f'$lib.hex.toint(): {e}')\n\n return int.from_bytes(byts, 'big', signed=signed)\n\n async def fromint(self, valu, length, signed=False):\n valu = await s_stormtypes.toint(valu)\n length = await s_stormtypes.toint(length)\n signed = await s_stormtypes.tobool(signed)\n\n try:\n byts = valu.to_bytes(length, 'big', signed=signed)\n return s_common.ehex(byts)\n except OverflowError as e:\n raise s_exc.BadArg(mesg=f'$lib.hex.fromint(): {e}')\n\n async def trimext(self, valu):\n valu = await s_stormtypes.tostr(valu)\n\n try:\n s_common.uhex(valu)\n except binascii.Error as e:\n raise s_exc.BadArg(mesg=f'$lib.hex.trimext(): {e}')\n\n while len(valu) >= 4:\n bits = int(valu[:4], 16) >> 7\n if bits == 0b111111111 or bits == 0b000000000:\n valu = valu[2:]\n continue\n break\n return valu\n\n async def signext(self, valu, length):\n valu = await s_stormtypes.tostr(valu)\n length = await s_stormtypes.toint(length)\n\n try:\n if int(valu[0], 16) >> 3 == 0:\n return valu.rjust(length, '0')\n return valu.rjust(length, 'f')\n except ValueError as e:\n raise s_exc.BadArg(mesg=f'$lib.hex.signext(): {e}')\n","sub_path":"synapse/lib/stormlib/hex.py","file_name":"hex.py","file_ext":"py","file_size_in_byte":5591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363924874","text":"from equal.marks import marks\nfrom equal.spread import spreadSheet\n\nwhile True:\n print(\"\"\"Επιλέξτε Λειτουργία:\n 1. Εισαγωγή Βαθμολογίας Μαθημάτων\n 2. Παραγωγή Λογιστικού Φύλλου Excel με τις Αντίστοιχες Βαθμολογίες\n 3. Έξοδος\"\"\")\n\n choice = int(input())\n if choice == 1: marks()\n elif choice == 2: spreadSheet()\n elif choice == 3: exit()\n else: print(\"Unknown Choice\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73102429","text":"import tensorflow as tf\nimport sys\nimport numpy as np\n\nbin_size = 128\nnum_features = 15\n\ndef load_graph(frozen_graph_filename):\n # We load the protobuf file from the disk and parse it to retrieve the\n # unserialized graph_def\n with tf.compat.v1.gfile.GFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(f.read())\n\n # Then, we can use again a convenient built-in function to import a graph_def into the\n # current default Graph\n with tf.Graph().as_default() as graph:\n tf.import_graph_def(\n graph_def,\n input_map=None,\n return_elements=None,\n name=\"\",\n op_dict=None,\n producer_op_list=None\n )\n return graph \n\ngraph = load_graph(sys.argv[1])\n\nwith tf.compat.v1.Session(graph=graph) as sess:\n out = sess.run(\"Identity:0\", feed_dict={\"x:0\": np.random.randn(1, 39*bin_size, num_features)})\n print(out)\n","sub_path":"scripts/test_load_tfmodel.py","file_name":"test_load_tfmodel.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332655922","text":"from extractFeature import extractFeature\nfrom fancymodel import *\nfrom extractFeature_Pandas import extractFeature_Pandas\nfrom damnrule import ruleFile\n\npwd = 'z:\\\\theblueisland\\\\'\nruleScore = 1\ntrainthreshold = 3\nonlineThreshold = 3\n\ndef updateFeature(begin = '11_18', days = 1, target = 0):\n f = open('update_feature_log.log', 'w')\n if (target):\n f.write('target updating... ')\n extractFeature_Pandas('target ', random = 0, target = 1)\n f.write('update complete\\n')\n f.flush()\n daySet = getDates(begin, days)\n for d in daySet:\n f.write(d + ' updating... ')\n extractFeature_Pandas(d)\n f.write(d + ' update complete\\n')\n f.flush()\n f.close()\n\ndef train(begin = '11_18', days = 1):\n X, y = generateXy(begin, days) \n gridTrain(X, y)\n \ndef localTest(): \n 'test on the local set'\n X_test = np.load(pwd + 'X_test.npy')\n y_test = np.load(pwd + 'y_test.npy')\n y = pd.DataFrame(index = range(len(X_test)))\n clf, score= modelFactory('predict')[0]\n y_pred = clf.predict(X_test)\n y_pred = np.logical_and(y_pred, y_pred)\n i = 1\n y.loc[y_pred, i] = score\n print (clf)\n print ('y_pred: ' + str(y_pred.sum()))\n print (classification_report(y_test, y_pred))\n print ('')\n for clf, score in modelFactory('predict')[1 : ]:\n y_pred = clf.predict(X_test)\n y_pred = np.logical_and(y_pred, y_pred)\n i += 1\n y.loc[y_pred, i] = score\n print (clf)\n print ('y_pred: ' + str(y_pred.sum()))\n print (classification_report(y_test, y_pred))\n print ('')\n y.fillna(value = 0, inplace = True)\n y = y.sum(axis = 1) >= trainthreshold\n print ('final predict:')\n print ('y_pred: ' + str(y.sum()))\n print (classification_report(y_test, y))\n \ndef onlineSet(norule = 1):\n index = ['user_id', 'item_id']\n #1 predict the y_pred\n X = pd.read_csv(prefix + 'feature_target.csv', header = None)\n X = MinMaxScaler().fit_transform(X)\n # y = pd.DataFrame(index = range(len(X)))\n y = pd.read_csv(prefix + 'example_target.csv', names = index)\n clf, score = modelFactory('predict')[0]\n y_pred = clf.predict(X)\n y_pred = np.logical_and(y_pred, y_pred)\n i = 1\n y.loc[y_pred, i] = score\n print (y_pred.sum())\n for clf, score in modelFactory('predict')[1 : ]:\n y_pred = clf.predict(X)\n y_pred = np.logical_and(y_pred, y_pred)\n i += 1\n y.loc[y_pred, i] = score\n print (y_pred.sum())\n y.fillna(value = 0, inplace = True)\n print ('online set before rule: ' \n + str((y.ix[ : , 1 :].sum(axis = 1) >= onlineThreshold).sum()))\n # add rule\n if (norule == 0):\n rule = pd.read_csv(ruleFile, names = ['user_id', 'item_id'])\n rule['rule'] = ruleScore\n y = pd.merge(y, rule, how = 'outer')\n y.fillna(value = 0, inplace = True)\n # threshold, get online\n online = y[y.ix[ : , 1 :].sum(axis = 1) >= onlineThreshold]\n online = online.ix[ : , 'user_id' : 'item_id']\n print ('online set before cross: ' + str(len(online)))\n # cross the subItem set\n l = pd.read_csv(pwd + 'data_version2\\\\subItem.csv', \n names = ['item_id', 'item_category'])\n online = pd.merge(online, l)\n # remove the repeat (user_id, item_id)\n online.drop_duplicates(inplace = True)\n print ('online set final: ' + str(len(online)))\n # into file\n online.ix[ :, : -1].to_csv(pwd + 'tianchi_mobile_recommendation_predict.csv', \n na_rep = '0', index = False, header = True)\n\ndef test():\n updateFeature('11_18', 10)\n extractFeature_Pandas('12_8', 0)\n extractFeature_Pandas('target', target = 1)\n train('11_18', 10)\n localTest()\n onlineSet()\n \ndef main():\n updateFeature('11_18', 21)\n extractFeature_Pandas('12_8', 0)\n extractFeature_Pandas('target', target = 1)\n train('11_18', 20)\n localTest()\n train('11_18', 21)\n onlineSet()\n\nif __name__ == '__main__': test()\n","sub_path":"rockup.py","file_name":"rockup.py","file_ext":"py","file_size_in_byte":4003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594457356","text":"# Darrell S Powe III\n# This is simple list based program that builds on the previous\n# This is a practice exercise from the book 'Python Crash Course' by Eric Matthes\n\nguests = ['Frank', 'Bob', 'Doug', 'Justin']\n\nfor i in range(len(guests)):\n print('Hello ' + guests[i] + ' I am writing you all to inform you that since Zack couldn\\'t make it to dinner ' +\n guests[3] + ' will be taking his place instead. I have also found a bigger dinner table, so will be inviting '\n 'more guests shortly')\n\nguests.insert(0, 'Steve')\nguests.insert(3, 'Robert')\nguests.append('Gregory')\nprint('\\n')\nfor i in range(len(guests)):\n print('Here are new initiations for you all, ' + guests[i] + ', I hope you all will be able to attend my birthday')\n\n","sub_path":"MoreGuests.py","file_name":"MoreGuests.py","file_ext":"py","file_size_in_byte":768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627875676","text":"\"\"\"\nThis sphinx extension allows the user to include wavedrom waveform diagrams\nin its documentation, by just using the textual description, rather than\ngenerating and including static images.\n\nAt the moment, the extension doesn't generate static images itself, but\nrelies on the js scripts provided by wavedrom to generate them in the browser.\n\nBy default, the script will use the js hosted on the wavedrom servers.\nThis is the easiest setup, but relies on an active internet connection and a\nstable hosting on wavedrom's side. In the near future, I aim to add the possibility\nto use a local version of the js scripting, to allow for offline reading.\n\"\"\"\n\nfrom docutils import nodes\nfrom docutils.parsers.rst import Directive\nfrom sphinx.util import copy_static_entry\nfrom os import path\n\nONLINE_SKIN_JS = \"http://wavedrom.com/skins/default.js\"\nONLINE_WAVEDROM_JS = \"http://wavedrom.com/WaveDrom.js\"\n\nWAVEDROM_HTML = \"\"\"\n
\n\n
\n\"\"\"\n\nclass WavedromDirective(Directive):\n \"\"\"\n Directive to declare items and their traceability relationships.\n Syntax::\n\n .. wavedrom::\n\n [wavedrom_description]\n\n This directive will trigger the generation of a \"raw\" docutils node.\n The raw html content will be the same as the one passed on to the directive,\n but surrounded by some HTML tags that allow rendering of the image through javascript.\n\n \"\"\"\n # Setting has_content to true marks that this directive has content (stored in self.content)\n has_content = True\n\n def run(self):\n env = self.state.document.settings.env\n text = WAVEDROM_HTML.format(content=\"\\n\".join(self.content))\n content = nodes.raw(text=text, format='html')\n return [content]\n\ndef builder_inited(app):\n \"\"\"\n We instruct sphinx to include some javascript files in the output html.\n Depending on the settings provided in the configuration, we take either\n the online files from the wavedrom server, or the locally provided wavedrom\n javascript files\n \"\"\"\n if app.config.offline_skin_js_path is not None:\n app.add_javascript(path.basename(app.config.offline_skin_js_path))\n else:\n app.add_javascript(ONLINE_SKIN_JS)\n if app.config.offline_wavedrom_js_path is not None:\n app.add_javascript(path.basename(app.config.offline_wavedrom_js_path))\n else:\n app.add_javascript(ONLINE_WAVEDROM_JS)\n\ndef build_finished(app, exception):\n \"\"\"\n When the build is finished, we copy the javascript files (if specified)\n to the build directory (the static folder)\n \"\"\"\n if app.config.offline_skin_js_path is not None:\n copy_static_entry(path.join(app.builder.srcdir, app.config.offline_skin_js_path), path.join(app.builder.outdir, '_static'), app.builder)\n if app.config.offline_wavedrom_js_path is not None:\n copy_static_entry(path.join(app.builder.srcdir, app.config.offline_wavedrom_js_path), path.join(app.builder.outdir, '_static'), app.builder)\n\ndef doctree_resolved(app, doctree, fromdocname):\n \"\"\"\n When the document, and all the links are fully resolved, we inject one\n raw html element for running the command for processing the wavedrom\n diagrams at the onload event.\n \"\"\"\n text = \"\"\"\n \"\"\"\n doctree.append(nodes.raw(text=text, format='html'))\n# -----------------------------------------------------------------------------\n# Extension setup\n\ndef setup(app):\n app.add_config_value('offline_skin_js_path', None, 'html')\n app.add_config_value('offline_wavedrom_js_path', None, 'html')\n app.add_directive('wavedrom', WavedromDirective)\n app.connect('build-finished', build_finished)\n app.connect('builder-inited', builder_inited)\n app.connect('doctree-resolved', doctree_resolved)\n\n","sub_path":"sphinxcontrib/wavedrom.py","file_name":"wavedrom.py","file_ext":"py","file_size_in_byte":3950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"285800733","text":"flag = False\r\n\r\ndef search(game_list, sub_list, size, start_list, end_list, index, x1, y1):\r\n global flag\r\n if x1 == end_list[index][0] and y1 == end_list[index][1]:\r\n if index == len(end_list) - 1:\r\n for each in game_list:\r\n print(each)\r\n flag = True\r\n else:\r\n search(game_list, sub_list, size, start_list, end_list, index + 1, start_list[index + 1][0], start_list[index + 1][1])\r\n return\r\n\r\n if flag:\r\n return\r\n\r\n for i in range(len(end_list)):\r\n if game_list[end_list[i][0]][end_list[i][1]] != 0 and game_list[end_list[i][0]][end_list[i][1]] != i + 1:\r\n return\r\n\r\n direction_list = [[-1, 0], [1, 0], [0, -1], [0, 1]]\r\n for x0, y0 in direction_list:\r\n x = x1 + x0\r\n y = y1 + y0\r\n if 0 <= x < size and 0 <= y < size and not game_list[x][y]:\r\n sub_list[index].append((x, y))\r\n game_list[x][y] = index + 1\r\n search(game_list, sub_list, size, start_list, end_list, index, x, y)\r\n sub_list[index].pop()\r\n game_list[x][y] = 0\r\n\r\n\r\nif __name__ == '__main__':\r\n size = 6\r\n game_list = [[0 for _ in range(size)] for _ in range(size)]\r\n start_list = [(3, 0), (4, 0), (1, 1), (1, 2), (1, 3), (0, 3)]\r\n end_list = [(0, 2), (5, 5), (4, 5), (3, 5), (2, 5), (0, 5)]\r\n\r\n sub_list = [[] for i in range(len(start_list))]\r\n for i in range(len(start_list)):\r\n sub_list[i].append(start_list[i])\r\n game_list[start_list[i][0]][start_list[i][1]] = i + 1\r\n\r\n search(game_list, sub_list, size, start_list, end_list, 0, start_list[0][0], start_list[0][1])\r\n","sub_path":"python/star-star.py","file_name":"star-star.py","file_ext":"py","file_size_in_byte":1655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467406023","text":"import argparse\nimport cv2\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nCROP_SIZE = 256\n\ndef load_graph(frozen_graph_filename):\n \"\"\"Load a (frozen) Tensorflow model into memory.\"\"\"\n graph = tf.Graph()\n with graph.as_default():\n od_graph_def = tf.GraphDef()\n with tf.gfile.GFile(frozen_graph_filename, 'rb') as fid:\n serialized_graph = fid.read()\n od_graph_def.ParseFromString(serialized_graph)\n tf.import_graph_def(od_graph_def, name='')\n return graph\n\n\ndef main():\n # TensorFlow\n graph = load_graph(args.frozen_model_file)\n image_tensor = graph.get_tensor_by_name('image_tensor:0')\n output_tensor = graph.get_tensor_by_name('generate_output/output:0')\n sess = tf.Session(graph=graph)\n\n # determine image size automatically from trained model\n CROP_SIZE = int(image_tensor.shape[0])\n print(\"CROP_SIZE:\", CROP_SIZE)\n\n # OpenCV\n cap = cv2.VideoCapture(args.video_source)\n fps = cap.get(cv2.CAP_PROP_FPS)\n print(\"FPS:\", fps)\n\n # Define the codec and create VideoWriter object\n fourcc = cv2.VideoWriter_fourcc(*'mp4v')\n out = cv2.VideoWriter(args.video_output, fourcc, fps, (CROP_SIZE, CROP_SIZE))\n\n while cap.isOpened():\n try:\n ret, frame = cap.read()\n except Exception as e:\n print(\"Failed to grab\", e)\n break\n\n if not ret or frame is None:\n break\n\n # generate prediction\n image_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # OpenCV uses BGR instead of RGB\n image_rgb = np.concatenate([image_rgb, image_rgb], axis=1)\n generated_image = sess.run(output_tensor, feed_dict={image_tensor: image_rgb})\n image_bgr = cv2.cvtColor(np.squeeze(generated_image), cv2.COLOR_RGB2BGR)\n out.write(image_bgr)\n\n sess.close()\n cap.release()\n out.release()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('video_source', help='Device index of the camera.')\n parser.add_argument('video_output', help='Output video file.')\n parser.add_argument('frozen_model_file', help='Frozen TensorFlow model file.')\n args = parser.parse_args()\n main()\n","sub_path":"pose_generation.py","file_name":"pose_generation.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85984281","text":"import requests\n\nfrom bs4 import BeautifulSoup\nimport csv\n\nfile_name = 'jokbal.csv'\nfile = open(file_name, mode=\"w\", encoding=\"utf-8\")\nwriter = csv.writer(file)\nwriter.writerow(['title', 'location', 'work_time', 'payment', 'regDate'])\n\n# print(url)\n# print(name)\n# r = requests.get(url)\n# soup = BeautifulSoup(r.text, 'html.parser')\n# f = open(\"eng.html\") # simplified for the example (no urllib)\n# f = open(\"jokbal.html\", encoding='utf8') # simplified for the example (no urllib)\n# f = open(\"/home/jeonghn/dev/python/scraping-challenge/day7/jokbal.html\",'rb')\n# soup = BeautifulSoup(f)\n# f.close()\n\n# with open(\"/home/jeonghn/dev/python/scraping-challenge/day7/jokbal.html\", encoding=\"utf8\") as f:\n# contents = f.read()\n# soup = BeautifulSoup(contents, \"html.parser\")\n# soup=BeautifulSoup(open(\"/home/jeonghn/dev/python/scraping-challenge/day7/jokbal.html\"))\nsoup=BeautifulSoup(open(\"/home/jeonghn/dev/python/scraping-challenge/day7/jokbal.html\",'rb',encoding='utf8'),'html.parser')\nlists = soup.find('div', {\"class\": \"goodsList\"})\ntable = lists.find('tbody')\njobs = table.find_all(\"tr\", {\"class\": [\"divide\", \"\"]})\n# trs=table.find_all(\"tr\")\n# result = [tr for tr in trs if 'summaryView' not in tr['class']]\n# print(jobs[:1])\n\njob_list = []\nfor job in jobs[:]:\n # print(job)\n job_detail = job.find_all('td')\n # print(job_detail[0].text)\n # print(job_detail[1].find('a').find('span', {\"class\":\"company\"}).text)\n # # print(job_detail[1].find('a').find_all('span').select('.company'))\n # print(job_detail[2].find('span',{\"class\":\"time\"}).text)\n # print(job_detail[3].find('span', {\"class\":'payIcon'}).text)\n # print(job_detail[3].find('span', {\"class\": 'number'}).text)\n # print(job_detail[4].text)\n if job_detail is not None:\n job = {'title': job_detail[1].find('a').find('span', {\"class\": \"company\"}).text,\n 'location': job_detail[0].text, 'work_time': job_detail[2].find('span', {\"class\": \"time\"}).text,\n 'payment': job_detail[3].find('span', {\"class\": 'payIcon'}).text + job_detail[3].find('span', {\n \"class\": 'number'}).text, 'regDate': job_detail[4].text}\n # print(job.values)\n # print([job['title'],job['location']])\n writer.writerow([job['title'], job['location'], job['work_time'], job['payment'], job['regDate']])\n else:\n writer.writerow('No available jobs')\n\nfile.close()\n#\n# for job in jobs[:2]:","sub_path":"day7/parsing_local_file.py","file_name":"parsing_local_file.py","file_ext":"py","file_size_in_byte":2437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631775777","text":"# encoding: utf-8\r\n\r\nimport random\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport string\r\n\r\nclass Captcha(object):\r\n # 生成几位数的验证码\r\n number = 4\r\n # 验证码图片的宽度和高度\r\n size = (100,30)\r\n # 验证码字体大小\r\n frontsize = 25\r\n # 加入干扰线的条数\r\n line_number = 2\r\n\r\n # 构建一个验证码源文件\r\n SOURCE = list(string.ascii_letters)\r\n for index in range(0,10):\r\n SOURCE.append(str(index))\r\n\r\n # 生成随机的颜色\r\n @classmethod\r\n def __gene_random_color(cls,start=0,end=255):\r\n # random.seed()\r\n return (random.randint(start,end),random.randint(start,end),random.randint(start,end))\r\n\r\n # 随机选择一种字体\r\n @classmethod\r\n def __gene_random_font(cls):\r\n fonts = [\r\n 'Courgette-Regular.ttf',\r\n 'LHANDW.TTF',\r\n 'Lobster-Regular.ttf',\r\n 'verdana.ttf'\r\n ]\r\n font = random.choice(fonts)\r\n return 'utils/captcha/'+font\r\n\r\n # 随机生成字符串\r\n @classmethod\r\n def gene_text(cls, number):\r\n return ''.join(random.sample(cls.SOURCE,number))\r\n\r\n # 绘制干扰线\r\n @classmethod\r\n def __gene_line(cls,draw,width,height):\r\n begin = (random.randint(0,width),random.randint(0,height))\r\n end = (random.randint(0,width),random.randint(0,height))\r\n draw.line([begin,end], fill=cls.__gene_random_color(),width=2)\r\n\r\n # 绘制干扰点\r\n @classmethod\r\n def __gene_points(cls,draw,point_chance,width,height):\r\n chance = min(100,max(0,int(point_chance)))\r\n for w in range(width):\r\n for h in range(height):\r\n tmp = random.randint(0,100)\r\n if tmp > 100 - chance:\r\n draw.point((w,h),fill=cls.__gene_random_color())\r\n\r\n ### 生成验证码 ###\r\n @classmethod\r\n def gene_graph_captcha(cls):\r\n # 验证码图片的宽和高\r\n width,height = cls.size\r\n # 创建图片\r\n image = Image.new('RGBA',(width,height),cls.__gene_random_color(0,100))\r\n # 验证码的字体\r\n font = ImageFont.truetype(cls.__gene_random_font(),cls.frontsize)\r\n # 创建画笔\r\n draw = ImageDraw.Draw(image)\r\n # 生成字符串\r\n text = cls.gene_text(cls.number)\r\n # 获取字体的尺寸\r\n font_width, font_height = font.getsize(text)\r\n # 填充字符串\r\n draw.text(((width-font_width)/2, (height-font_height)/2),text,fill=cls.__gene_random_color(150,255),font=font)\r\n # 绘制干扰线\r\n for x in range(0,cls.line_number):\r\n cls.__gene_line(draw,width,height)\r\n # 制造噪点\r\n cls.__gene_points(draw,10,width,height)\r\n return (text,image)","sub_path":"utils/captcha/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138491906","text":"import re\nimport time\n\nfrom Downloader.Downloader import *\nfrom Utils import Utils\nfrom Utils.RedisUtils import RedisUtils\n\nname = 'Spider58'\nrootUrls = [\"http://gz.58.com/chuzu/\",\n \"http://sz.58.com/chuzu/\",\n \"http://bj.58.com/chuzu/\",\n \"http://sh.58.com/chuzu/\"\n ]\n\nredis = RedisUtils()\n# 解析一个页面下的所有房源链接,并获取下一页的链接\ndef parsePage(page_url):\n response = downloadHttpResponse(page_url)\n lis = response.xpath('//ul[@class=\"listUl\"]/li')\n total = len(lis)\n new_urls = 0\n old_urls = 0\n for li in lis:\n try:\n url_ = li.xpath('./div[@class=\"img_list\"]/a/@href').extract()[0]\n if re.match(r'^http', url_):\n url = url_\n else:\n url = \"http:%s\"%url_\n\n if redis.add_to_redis(name, url):\n print('%s %s新房源链接: %s' % (Utils.getCurrentTime(), name, url))\n new_urls += 1\n else:\n print('%s %s旧房源链接: %s' % (Utils.getCurrentTime(), name, url))\n old_urls += 1\n except:\n pass\n print('新房源数量: %d, 旧房源数量: %d' % (new_urls, old_urls))\n if new_urls / total <= 0.4:\n print('%s旧房源过多,停止采集%s' % (Utils.getCurrentTime(), page_url))\n return None\n\n try:\n next_url = response.xpath('//div[@class=\"pager\"]/a[@class=\"next\"]/@href').extract()[0]\n return next_url\n except:\n return None\n\n\n# 循环解析一个城市的所有房源链接\ndef crawlRoot(root):\n index = 1\n nextpage = parsePage(root)\n while nextpage is not None:\n time.sleep(random.randint(10,20))\n nextpage = parsePage(nextpage)\n index += 1\n if index > 30:\n break\n\n\ndef crawl58Urls():\n for item in rootUrls:\n crawlRoot(item)\n\n\n\nif __name__ == '__main__':\n crawl58Urls(rootUrls[0])\n","sub_path":"UrlsSpiders/Spider58.py","file_name":"Spider58.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90854547","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse\n\nimport blib\nfrom blib import getparam, rmparam, msg, site, tname, pname\n\ntemplates_with_sc = {\n \"t\": [\"alt\", \"2\"],\n \"t+\": [\"alt\", \"2\"],\n \"t-\": [\"alt\", \"2\"],\n \"t+check\": [\"alt\", \"2\"],\n \"t-check\": [\"alt\", \"2\"],\n \"l\": [\"3\", \"2\"],\n \"link\": [\"3\", \"2\"],\n \"l-self\": [\"3\", \"2\"],\n \"ll\": [\"3\", \"2\"],\n \"m\": [\"3\", \"2\"],\n \"mention\": [\"3\", \"2\"],\n \"m-self\": [\"3\", \"2\"],\n \"m+\": [\"3\", \"2\"],\n}\n\ndef process_text_on_page(index, pagetitle, text):\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n pagemsg(\"Processing\")\n\n notes = []\n\n global args\n\n def expand_text(tempcall):\n return blib.expand_text(tempcall, pagetitle, pagemsg, args.verbose)\n\n parsed = blib.parse_text(text)\n for t in parsed.filter_templates():\n tn = tname(t)\n origt = str(t)\n if tn in templates_with_sc:\n if not t.has(\"sc\"):\n continue\n lang = getparam(t, \"1\")\n sc = getparam(t, \"sc\")\n if not sc:\n rmparam(t, \"sc\")\n notes.append(\"remove blank sc= from {{%s}}\" % tn)\n else:\n params_to_check = templates_with_sc[tn]\n if type(params_to_check) is not list:\n params_to_check = [params_to_check]\n for param in params_to_check:\n value_to_check = getparam(t, param)\n if value_to_check:\n break\n if not value_to_check:\n pagemsg(\"WARNING: For lang=%s, no displayable value, not removing sc=%s: %s\" % (lang, sc, str(t)))\n continue\n detected_sc = expand_text(\"{{#invoke:scripts/templates|findBestScript|%s|%s}}\" % (value_to_check, lang))\n if not detected_sc:\n continue\n if detected_sc == \"ms-Arab\" and sc == \"Arab\" and lang == \"ms\":\n pagemsg(\"Detected script ms-Arab for lang=ms, saw explicit sc=Arab, which is probably wrong, removing sc=: %s\" % (str(t)))\n if detected_sc != sc:\n if len(detected_sc) >= 4 and len(sc) >= 4 and detected_sc[-4:] == sc[-4:]:\n pagemsg(\"For lang=%s, detected script %s, saw explicit sc=%s, both are variants of the same script, removing sc=: %s\" % (lang, detected_sc, sc, str(t)))\n elif detected_sc == \"None\":\n pagemsg(\"WARNING: For lang=%s, detected script %s but saw explicit sc=%s, which may be right: %s\" % (lang, detected_sc, sc, str(t)))\n continue\n else:\n force_detected_sc = expand_text(\"{{#invoke:scripts/templates|findBestScript|%s|%s|true}}\" % (value_to_check, lang))\n if force_detected_sc == detected_sc:\n pagemsg(\"WARNING: For lang=%s, force-detected script %s but saw explicit sc=%s, explicit sc= probably wrong: %s\" % (lang, detected_sc, sc, str(t)))\n else:\n pagemsg(\"WARNING: For lang=%s, detected script %s but force-detected %s and saw explicit sc=%s, which may be right: %s\" % (lang, detected_sc, force_detected_sc, sc, str(t)))\n continue\n rmparam(t, \"sc\")\n notes.append(\"remove redundant sc=%s from {{%s}}\" % (sc, tn))\n if str(t) != origt:\n pagemsg(\"Replaced %s with %s\" % (origt, str(t)))\n return parsed, notes\n\nparser = blib.create_argparser(\"Remove redundant sc=\", include_pagefile=True, include_stdin=True)\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\n\nblib.do_pagefile_cats_refs(args, start, end, process_text_on_page, edit=True, stdin=True)\n","sub_path":"remove_extraneous_sc.py","file_name":"remove_extraneous_sc.py","file_ext":"py","file_size_in_byte":3438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630698599","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# File: base.py\n# Author: Qian Ge \n\nimport os\nimport numpy as np \nimport lib.utils.utils as utils\n# from tensorcv.dataflow.common import get_file_list\nfrom tensorcv.dataflow.base import RNGDataFlow\n\ndef get_file_list(file_dir, file_ext, sub_name=None):\n # assert file_ext in ['.mat', '.png', '.jpg', '.jpeg']\n re_list = []\n\n if sub_name is None:\n return np.array([os.path.join(root, name)\n for root, dirs, files in os.walk(file_dir) \n for name in sorted(files) if name.endswith(file_ext)])\n else:\n return np.array([os.path.join(root, name)\n for root, dirs, files in os.walk(file_dir) \n for name in sorted(files) if name.endswith(file_ext) and sub_name in name])\n\nclass DataFlow(RNGDataFlow):\n def __init__(self,\n data_name_list,\n # data_type_list,\n # n_channel_list,\n data_dir='',\n shuffle=True,\n batch_dict_name=None,\n load_fnc_list=None,\n # pf_list=None,\n ):\n data_name_list = utils.make_list(data_name_list)\n # data_type_list = utils.make_list(data_type_list)\n # n_channel_list = utils.make_list(n_channel_list)\n load_fnc_list = utils.make_list(load_fnc_list)\n # pf_list = utils.make_list(pf_list)\n utils.assert_len([data_name_list, load_fnc_list])\n self._n_dataflow = len(data_name_list)\n # self._data_name_list = data_name_list\n # self._data_type_list = data_type_list\n # self._n_channel_list = n_channel_list\n # pf_list = [pf if pf is not None else identity for pf in pf_list]\n # self._pf_list = pf_list\n self._load_fnc_list = load_fnc_list\n\n self._data_dir = data_dir\n self._shuffle = shuffle\n self._batch_dict_name = batch_dict_name\n\n self._data_id = 0\n self.setup(epoch_val=0, batch_size=1)\n self._load_file_list(data_name_list)\n\n def size(self):\n return len(self._file_name_list[0])\n\n def _load_file_list(self, data_name_list):\n data_dir = self._data_dir\n self._file_name_list = []\n for data_name in data_name_list:\n self._file_name_list.append(get_file_list(data_dir, data_name))\n if self._shuffle:\n self._suffle_file_list()\n\n def _suffle_file_list(self):\n idxs = np.arange(self.size())\n self.rng.shuffle(idxs)\n for idx, file_list in enumerate(self._file_name_list):\n self._file_name_list[idx] = file_list[idxs]\n\n def next_batch(self):\n assert self._batch_size <= self.size(), \\\n \"batch_size cannot be larger than data size\"\n\n if self._data_id + self._batch_size > self.size():\n start = self._data_id\n end = self.size()\n else:\n start = self._data_id\n self._data_id += self._batch_size\n end = self._data_id\n batch_data = self._load_data(start, end)\n\n if end == self.size():\n self._epochs_completed += 1\n self._data_id = 0\n if self._shuffle:\n self._suffle_file_list()\n return batch_data\n\n def _load_data(self, start, end):\n data_list = [[] for i in range(0, self._n_dataflow)]\n for k in range(start, end):\n for read_idx, read_fnc in enumerate(self._load_fnc_list):\n data = read_fnc(self._file_name_list[read_idx][k])\n data_list[read_idx].append(data)\n\n for idx, data in enumerate(data_list):\n data_list[idx] = np.array(data)\n\n return data_list\n\n def next_batch_dict(self):\n batch_data = self.next_batch()\n return {key: data for key, data in zip(self._batch_dict_name, batch_data)} \n","sub_path":"src/dataflow/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105242852","text":"import pygame as pg\n\n# from constants import \nfrom classes.entities import Player\nfrom scenes.base import Scene\nfrom scenes.game.beginning import TowerFloor1\n\nclass NewFileMenu(Scene):\n \"\"\"Menu for choosing the save file.\"\"\"\n\n def __init__(self, name, manager):\n super(NewFileMenu, self).__init__(manager)\n self.name = name\n self.filename = []\n self.is_clearing = False\n self.clearing_time = 0\n self.title = self.font.render(\"Gamer Project\", True, (255,255,255))\n\n def render(self, screen):\n screen.fill((0,0,0))\n pos = self.get_title_pos(screen, self.title)\n screen.blit(self.title, pos)\n field = self.sfont.render(\"Save file: %s\" % \"\".join(self.filename), True, (255,255,255))\n field = self.surround_with_rect(field)\n pos = self.get_mid_surf_pos(screen, field)\n screen.blit(field, pos)\n\n def update(self, delta):\n if self.is_clearing:\n self.clearing_time += delta\n # \"unpack\" all the clearing time\n while self.clearing_time > 500:\n if len(self.filename):\n self.filename.pop()\n self.clearing_time -= 100\n else:\n self.clearing_time = 0\n\n def handle_events(self, events):\n for e in events:\n if e.type == pg.KEYDOWN:\n if e.key == pg.K_ESCAPE:\n self.manager.go_back()\n elif e.unicode in self.chars:\n self.filename.append(e.unicode)\n elif e.key == pg.K_BACKSPACE:\n if len(self.filename):\n self.filename.pop()\n self.is_clearing = True\n elif e.key == pg.K_RETURN:\n player = Player(self.name)\n self.save(player, \"\".join(self.filename))\n self.manager.go_to(TowerFloor1(self.manager, player))\n elif e.type == pg.KEYUP:\n if e.key == pg.K_BACKSPACE:\n self.is_clearing = False\n self.clearing_time = 0\n","sub_path":"python/scenes/menus/newfile.py","file_name":"newfile.py","file_ext":"py","file_size_in_byte":2092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513977663","text":"#!/usr/bin/python3\nimport socket\n\nip=\"13.233.66.67\"\nport=8888\n\ns=socket.socket(socket.AF_INET,socket.SOCK_DGRAM)\n\nmsg=input(\"enter the message to be sent\")\nn=msg.enocde('ascii')\n\ns.sendto((ip,port))\nprint(s.recvfrom(1000))\n","sub_path":"sender.py","file_name":"sender.py","file_ext":"py","file_size_in_byte":223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"131481839","text":"import logging\n\nclass Log:\n log = None\n INFO = logging.INFO\n \n @staticmethod\n def registraLog(arqLog, formato, DIC):\n logging.basicConfig(filename=arqLog, format = formato)\n Log.log = logging.getLogger()\n Log.log.setLevel(Log.INFO)\n logging.info(None, extra = DIC)","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416973714","text":"import tensorflow as tf\nimport keras\nimport sys\nimport csv\nimport math\nimport numpy as np\nfrom keras.models import Sequential \nfrom keras.layers import Dense,Dropout,Flatten,Conv2D,MaxPooling2D ,Activation\nfrom keras.optimizers import SGD,Adam\nfrom keras.utils import np_utils\nfrom keras.datasets import mnist\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom keras.layers.normalization import BatchNormalization\nfrom keras.initializers import he_normal\nfrom keras import regularizers\nfrom keras.callbacks import LearningRateScheduler\nimport keras.backend as K\ndef scheduler(epoch):\n lr = 0.01\n if epoch > 80:\n frac = (epoch - 80) //5\n decay_factor = 0.9 ** frac\n K.set_value(model.optimizer.lr, lr *decay_factor)\n print(\"lr changed to {}\".format(K.get_value(model.optimizer.lr)))\n return K.get_value(model.optimizer.lr)\n\ntypecount=7\nlabel=[]\ndata=[]\ncount=0\nwith open(sys.argv[1], newline='') as csvFile:\n#with open('/Users/peter yang/Downloads/train.csv', newline='') as csvFile:\n\trows = csv.reader(csvFile, delimiter=',')\n\tfor row in rows:\n\t\tif count==0:\n\t\t\tcount+=1\n\t\t\tcontinue\n\t\tdata.append([])\n\t\tlabel.append([])\n\t\tlabel[count-1].append(row[0])\n\t\tfor j in range(1,len(row)):\t\t\n\t\t\tdata[count-1].append(row[j])\n\t\tcount+=1\nsdata=[]\nfor i in range(len(data)):\n\tsdata.append(data[i][0].split())\n\nsdata=np.array(sdata,dtype=float)\nsdata=np.reshape(sdata,(len(data),48,48))\naugmantdata=[]\nauglabel=[]\ncrosssize=[[0,0],[0,6],[6,0],[6,6],[3,3]]\ncount1=0\nfor i in range(len(sdata)):\n\tfor j in range(5):\n\t\taugmantdata.append([])\n\t\tauglabel.append(label[i][0])\n\t\tfor x in range(crosssize[j][0],42+crosssize[j][0]):\n\t\t\tfor y in range(crosssize[j][1],42+crosssize[j][1]):\n\t\t\t\taugmantdata[count1].append(sdata[i][x][y])\n\t\tcount1+=1\ntrainlabel=[]\nfor i in range(len(augmantdata)):\n\ttrainlabel.append([])\n\tfor j in range(typecount):\n\t\tif(j==int(auglabel[i])):\n\t\t\ttrainlabel[i].append(1)\n\t\telse:\n\t\t\ttrainlabel[i].append(0)\naugmantdata=np.array(augmantdata,dtype=float)\naugmantdata/=255.0\ndel sdata\ndel label\ndel auglabel\ntrainlabel=np.array(trainlabel,dtype=int)\naugmantdata=np.reshape(augmantdata,(5*len(data),42,42,1))\n\nvdata=[]\nvlabel=[]\ncount=0\n\n\n\n\"\"\"\n#################################################################################################\nwith open(sys.argv[2], newline='') as csvFile:\n#with open('/Users/peter yang/Downloads/train.csv', newline='') as csvFile:\n\trows = csv.reader(csvFile, delimiter=',')\n\tfor row in rows:\n\t\tif count==0:\n\t\t\tcount+=1\n\t\t\tcontinue\n\t\tvdata.append([])\n\t\tvlabel.append([])\n\t\tvlabel[count-1].append(row[0])\n\t\tfor j in range(1,len(row)):\t\t\n\t\t\tvdata[count-1].append(row[j])\n\t\tcount+=1\nsvdata=[]\ntrainvlabel=[]\nleng=len(vdata)\nleng=int(leng)\n#############################\n##########################\nfor i in range(leng):\n\tsvdata.append(vdata[i][0].split())\nsvdata=np.array(svdata,dtype=float)\nsvdata=np.reshape(svdata,(leng,48,48))\ncount1=0\naugvdata=[]\naugvlabel=[]\nfor i in range(len(svdata)):\n\tfor j in range(5):\n\t\taugvdata.append([])\n\t\taugvlabel.append(vlabel[i][0])\n\t\tfor x in range(crosssize[j][0],42+crosssize[j][0]):\n\t\t\tfor y in range(crosssize[j][1],42+crosssize[j][1]):\n\t\t\t\taugvdata[count1].append(svdata[i][x][y])\n\t\tcount1+=1\n\naugvdata=np.array(augvdata,dtype=float)\naugvdata/=255.0\nfor i in range(len(augvdata)):\n\ttrainvlabel.append([])\n\tfor j in range(typecount):\n\t\tif(j==int(augvlabel[i][0])):\n\t\t\ttrainvlabel[i].append(1)\n\t\telse:\n\t\t\ttrainvlabel[i].append(0)\ntrainvlabel=np.array(trainvlabel,dtype=int)\naugvdata=np.reshape(augvdata,(5*leng,42,42,1))\n\"\"\"\ndrop=0.5\nmodel = Sequential()\nmodel.add(Conv2D(64, (3, 3), padding=\"same\",kernel_initializer='he_normal', input_shape=(42,42,1)))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(64, (3, 3), padding=\"same\",kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(128, (3, 3), padding=\"same\",kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(128, (2, 2), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(256, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(256, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(256, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(256, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\n\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\nmodel.add(Conv2D(512, (3, 3), padding=\"same\", kernel_initializer='he_normal'))\nmodel.add(BatchNormalization()) \nmodel.add(Activation('relu'))\n\nmodel.add(MaxPooling2D(pool_size=(2, 2)))\n\nmodel.add(Flatten())\n\n\nmodel.add(Dropout(drop))\n\n\nmodel.add(Dense(units=512,\n\t\t\t\tkernel_initializer='he_normal'))\nmodel.add(BatchNormalization())\nmodel.add(Activation('relu'))\nmodel.add(Dropout(drop))\n\nmodel.add(Dense(units=7))\nmodel.add(Activation('softmax'))\nsgd=SGD(lr=0.01,momentum=0.9,decay=5e-4,nesterov=False)\nmodel.compile(loss='categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])\n\n\n\n\n\n\ngen = ImageDataGenerator( horizontal_flip=True )\n\ngen.fit(augmantdata)\n\ntrain_generator = gen.flow(augmantdata, trainlabel, batch_size=128)\n\ncw={0:1.8,\n\t1:1,\n\t2:1.7,\n\t3:1,\n\t4:1.5,\n\t5:2.3,\n\t6:1.5}\n\n\"\"\"\ntest_gen = ImageDataGenerator()\ntest_gen.fit(augvdata)\ntest_generator = test_gen.flow(augvdata, trainvlabel, batch_size=128)\n\"\"\"\nfrom keras.callbacks import ReduceLROnPlateau\nlearning_rate_function = ReduceLROnPlateau(monitor='val_acc', \n patience=1,\n min_delta=0.00001, \n verbose=1, \n factor=0.1)\n\n\nlrate = LearningRateScheduler(scheduler)\ncallbacklist=[lrate]\nmodel.fit_generator(train_generator,steps_per_epoch=120,\n epochs=250,\n callbacks=callbacklist,\n verbose=1,\n shuffle=True,\n # class_weight=cw,\n # validation_data=(augvdata,trainvlabel)\n )\n\n#model.fit(sdata,trainlabel,batch_size=100,epochs=50)\nmodel.save('my_model.h5')\nresult=model.evaluate(augmantdata,trainlabel,batch_size=1200)\nprint(\"result\",result[1])\n#\tpython HW3.py train.csv test1.csv","sub_path":"hw3/HW3.py","file_name":"HW3.py","file_ext":"py","file_size_in_byte":7855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378518626","text":"\"\"\"Update PieceCommentVote.\n\nRevision ID: 35c0d4e5f47\nRevises: 60d61fb9582\nCreate Date: 2015-04-02 10:37:19.831205\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '35c0d4e5f47'\ndown_revision = '60d61fb9582'\n\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('piece_comment_vote',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.Column('piece_comment_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['piece_comment_id'], ['piece_comment.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.drop_table('piece_comment_like')\n op.add_column('piece_comment', sa.Column('votes_count', sa.Integer(), nullable=True))\n op.drop_column('piece_comment', 'likes_count')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('piece_comment', sa.Column('likes_count', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True))\n op.drop_column('piece_comment', 'votes_count')\n op.create_table('piece_comment_like',\n sa.Column('id', mysql.INTEGER(display_width=11), nullable=False),\n sa.Column('created_at', mysql.DATETIME(), nullable=True),\n sa.Column('user_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.Column('piece_comment_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),\n sa.ForeignKeyConstraint(['piece_comment_id'], [u'piece_comment.id'], name=u'piece_comment_like_ibfk_1'),\n sa.ForeignKeyConstraint(['user_id'], [u'user.id'], name=u'piece_comment_like_ibfk_2'),\n sa.PrimaryKeyConstraint('id'),\n mysql_default_charset=u'utf8',\n mysql_engine=u'InnoDB'\n )\n op.drop_table('piece_comment_vote')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/20150402103719_35c0d4e5f47_update_piececommentvote.py","file_name":"20150402103719_35c0d4e5f47_update_piececommentvote.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55999254","text":"import os\nfrom setuptools import setup\n\n# version (i.e., cme.firmware) is stored in the VERSION file in package root\nwith open(os.path.join(os.getcwd(), 'VERSION')) as f:\n\tversion = f.readline().strip()\n\nsetup (\n\tname\t\t\t\t\t= \"cmeapi\",\n\tversion\t\t\t\t\t= version,\n\tdescription \t\t\t= \"CME Application Programming Interface (API)\",\n\tpackages\t\t\t\t= ['cmeapi', 'cmeapi.api_routes', 'cmeapi.common'],\n\tinclude_package_data\t= True,\n\tzip_safe\t\t\t\t= False,\n\tinstall_requires\t\t= [\"CherryPy\", \"Paste\", \"Flask\", \"rrdtool==0.1.4\" ],\n\tentry_points\t\t\t= {'console_scripts': ['cmeapi = cmeapi.__main__:main'] }\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"651986914","text":"from sklearn.manifold import TSNE\nfrom gensim.models.doc2vec import Doc2Vec\nimport pickle\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold\n\ndef get_vector(path, label):\n model = Doc2Vec.load(path)\n RES = []\n for i in range(len(label)):\n RES.append(model.docvecs['g_'+str(i)])\n return np.array(RES)\n \nif __name__ == '__main__':\n path_dbow = './models/graph-dbow.model' \n Graph_Label_list = pickle.load(open('Graph_Label_list', 'rb'), encoding='bytes')\n embeds = get_vector(path_dbow, Graph_Label_list)\n tsne = manifold.TSNE(n_components = 2, init = 'pca', random_state = 0)\n Y = tsne.fit_transform(embeds)\n plt.figure(figsize=(8, 8))\n plt.scatter(Y[:, 0], Y[:, 1], c=np.array(Graph_Label_list).reshape(-1))\n plt.legend()\n","sub_path":"Notebook/data&code/COX2/Evaluate_by_visualization.py","file_name":"Evaluate_by_visualization.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"489131144","text":"from BRQueue import Queue\n\nfrom itertools import chain, combinations\n\"\"\"\npowerset([1,2,3]) --> Returns Dictionary where each value is a new distinct subset\n\"\"\"\ndef powersets(arr):\n xs=[*arr];D = {}\n for i,x in enumerate(list(chain.from_iterable(combinations(xs,n) for n in range(4)))):\n D[i]=x\n return D\n\nimport requests\nimport lxml.html as lh\n\ndef getTable(msg):\n url='https://www.x-rates.com/table/?from=' + msg + '&amount=1'\n page = requests.get(url)\n doc = lh.fromstring(page.content)\n #Parse data that are stored between .. of HTML\n tr_elements = doc.xpath('//tr')\n\n headers=[]\n #Parse Titles of Columns\n for i,ele in enumerate(tr_elements[0]):\n name=ele.text_content()\n headers.append(name)\n #print('headers', headers)\n\n ##Fill in rest of table\n table=[]\n for j in range(1,len(tr_elements)):\n row=tr_elements[j]\n \n if len(row)!=3:\n break\n #Iterate through each element of the row\n tds=row.iterchildren()\n data=[td.text_content() for td in tds]\n for i,td in enumerate(data):\n try:\n data[i]=float(td)\n except:\n pass\n data.insert(0, headers[0])\n table.append(data)\n return table\n\n##GENERATE TABLES\nallTables = [];arbitrages=[]\ncurrencies=['USD', 'JPY', 'EUR', 'INR', 'AUD']\nfor currency in currencies:\n allTables+=getTable(currency)\n\n#FIND ARBITRAGES\nfor trade in allTables:\n try:\n if trade[2]*trade[3]>1.0099:\n arbitrages.append(trade)\n except:\n pass\nprint('Calculating...')\nfor x in arbitrages:\n print(x, x[2]*x[3])\n\nprint('Generating Subpaths...')\nD=powersets(allTables);subpaths=[]\ni=0\nprint('Looking at subpaths...')\nfor subpath in D:\n while i == 0:\n subqueue=Queue();valid=True\n subqueue.enqueue(D[subpath])\n print(subqueue.getList())\n for i in range(len(D[subpath])-1):\n first = subqueue.dequeue()\n if not(first and first[1] == subqueue.front[0]):\n valid=False\n i+=1\n\nprint('done')","sub_path":"Python/Daily Coding Challenges/armitage.py","file_name":"armitage.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"519617162","text":"for x in range(1, 6):\n for y in range(1, 6):\n print(x, \" * \", y, \" = \", x * y)\n\nprint(\"\\n\")\n\n# wyswietlenie x i y w liniach by bardzie przypominaly tabelke\nfor x in range(1, 6):\n line = str(x)\n for y in range(1, 6):\n line += \"\\t\" + str(x*y)\n print(line)\n\nprint(\"\\n\")\n\n# przesuniecie kolumn w tabeli\nfor x in range(1, 6):\n line = str(x)\n for y in range(1, 6):\n line += (\"\\t%3d\" % (x * y)) # do linii dodany jest odstep jako tabulacja z %3d jako trzy miejsca na wartosc\n print(line)\n\nprint(\"\\n\")\n\n# cwiczenie\ni = 10\nfor i in range(10, 11):\n j = 1\n line = str(i) + \"! = 1\"\n for y in range(2, i + 1):\n line += \" * \" + str(y)\n j *= y\n line += \" = \" + str(j)\n print(line)\n\nprint(\"\\n\")\n\n# cwiczenie 2\ni = 10\nfor i in range(2, 11):\n j = 1\n line = str(i) + \"! = 1\"\n for y in range(2, i + 1):\n line += \" * \" + str(y)\n j *= y\n line += \" = \" + str(j)\n print(line)\n\nprint(\"\\n\")\n\n# inny zapis\nx = 10\nfor i in range(1, x + 1):\n result = 1\n for j in range(1, i + 1):\n result *= j\n print(i, \"!\", result)\n\nprint(\"\\n\")\n\n# cwiczenie 3\nlist_noun = [\"dog\", \"potato\", \"meal\", \"icecream\", \"car\"]\nlist_adj = [\"dirty\", \"big\", \"hot\", \"colorful\", \"fast\"]\n\nfor noun in list_noun:\n for adj in list_adj:\n print(adj, \"-\", noun)\n","sub_path":"nauka/kurs_podstawowy/cwicz7_7_for_zagniezdzony/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"23048319","text":"#!/usr/bin/env python\n\nimport subprocess\nimport gzip\nimport itertools\nimport struct\nimport array\nimport math\nimport os\n\ntest_dir = 'tests'\ntests = ['image1'];\n\ndef readResults(filename):\n with gzip.open(filename) as stream:\n results = []\n\n while True:\n headerBytes = stream.read(8)\n if len(headerBytes) == 0:\n return results\n\n scale, height, width = struct.unpack(' 1e-3:\n print ('Mismatched scales for %s (%f, %f)'%(prefix, oscale, cscale))\n exit()\n\n if oheight != cheight:\n print ('Mismatched heights for %s'%prefix)\n exit()\n\n if owidth != cwidth:\n print ('Mismatched widths for %s'%prefix)\n exit()\n\n diffs = [(math.fabs(a - b) if b > -1 else 0) for a, b in itertools.izip(ovalues, cvalues)]\n maxdiff = max(diffs)\n if maxdiff > 1e-3:\n print ('Found max diff of %f for %s'%(maxdiff, prefix))\n print ('entry %d'%pos)\n# exit()\n\n print('@@@ Result is of size (%d x %d) at a scale of %f'%(oheight, owidth, oscale))\n print('@@@ Greatest difference was %f'%maxdiff)\n\nif __name__ == '__main__':\n subprocess.check_call(['make'])\n\n for test in tests:\n tester(os.path.join(test_dir,test)) \n","sub_path":"test_large.py","file_name":"test_large.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582753106","text":"from telegram.ext import ConversationHandler\nfrom c_utils import c_keyboard, get_c_exchange_rate\nfrom db import db, get_or_create_user, save_currency\nimport settings\n\nc_list = settings.available_currencies\n\n\ndef с_scenario_start(update, context):\n get_or_create_user(db, update.effective_user, update.message.chat.id)\n update.message.reply_text(\n 'Доступные валюты:',\n reply_markup=c_keyboard(*c_list, ['На главную'])\n )\n return 'user_currency'\n\n\ndef c_subscribe(update, context):\n user = get_or_create_user(db, update.effective_user, update.message.chat.id)\n save_currency(db, user['user_id'], context.user_data['default_currency'])\n\n\ndef set_exch_currency(update, context):\n user = get_or_create_user(db, update.effective_user, update.message.chat.id)\n default_exch_currency = update.message.text\n update.message.reply_text('Готово')\n return default_exch_currency\n\n\ndef c_scenario_rate(update, context):\n context.user_data['default_currency'] = update.message.text\n get_or_create_user(db, update.effective_user, update.message.chat.id)\n user_currency = update.message.text\n user_cur_rate = get_c_exchange_rate(user_currency)\n update.message.reply_text(f\"{user_cur_rate} руб за 1 {user_currency}\",\n reply_markup=c_keyboard(['Подписаться на курс этой валюты'],\n ['Назад'], ['На главную']))\n return 'c_rate'\n\n\ndef c_cancel(update, context):\n get_or_create_user(db, update.effective_user, update.message.chat.id)\n update.message.reply_text('Возвращаемся в главное меню',\n reply_markup=c_keyboard(*settings.main))\n return ConversationHandler.END\n","sub_path":"c_scenario.py","file_name":"c_scenario.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"440986157","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import JsonResponse\nfrom django.views.decorators.http import require_POST\nfrom django.views.decorators.csrf import csrf_exempt\nfrom .forms import ImageForm\nfrom .models import Image\n\n\n@login_required(login_url=\"/account/login/\")\n@csrf_exempt\n@require_POST\ndef upload_image(request):\n form = ImageForm(data=request.POST)\n if form.is_valid():\n try:\n new_item = form.save(commit=False)\n new_item.user = request.user\n new_item.save()\n return JsonResponse({'status': '1'})\n except:\n return JsonResponse({'status': '0'})\n else:\n print(\"tijiao buhefa \")\n\n\n@login_required(login_url='/account/login/')\ndef list_images(request):\n images = Image.objects.filter(user=request.user)\n return render(request,'image/list_images.html',{\"images\":images})\n\n\n@login_required(login_url='/account/login/')\n@require_POST\n@csrf_exempt\ndef del_image(request):\n image_id = request.POST['image_id']\n try:\n image = Image.objects.get(id=image_id)\n image.delete()\n return JsonResponse({\"status\":'1'})\n except:\n return JsonResponse({\"status\":'2'})\n\n\ndef falls_images(request):\n images = Image.objects.all()\n return render(request,'image/falls_images.html',{\"images\":images})\n","sub_path":"mysite/image/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"616407752","text":"import numpy as np\ndef leven_dist(str1, str2):\n size_x = len(str1) + 1\n size_y = len(str2) + 1\n matrix = np.zeros ((size_x, size_y))\n for x in range(size_x):\n matrix [x, 0] = x\n for y in range(size_y):\n matrix [0, y] = y\n for i in range(1, size_x):\n for j in range(1, size_y):\n diff = 1 if str1[i - 1] != str2[j - 1] else 0\n matrix[i, j] = min(matrix[i - 1, j] + 1, matrix[i, j - 1] + 1, matrix[i - 1, j - 1] + diff)\n return int(matrix[size_x - 1, size_y - 1])\n\nstr1 = input()\nstr2 = input()\ndist = leven_dist(str1, str2)\nprint(dist)","sub_path":"Practise/levenshtein_dist.py","file_name":"levenshtein_dist.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387217781","text":"__author__ = 'MinterS'\n\n# Refresh Access Token with Refresh Token from tokens.py\n# Collects user and client details from credentials.py\n# Calls the Auth API to return Access Token to file\n\nfrom Authentication import tokens, credentials\nfrom Resources import statusCodes\nimport requests\nimport json\n\n\ndef getAuthRefresh():\n # Collect Refresh Token and Build URL\n url = 'https://accounts.zoho.com/oauth/v2/token?refresh_token={refresh_token}&client_id={client_id}&client_secret={client_secret}&grant_type=refresh_token'\n url = url.replace(\"{refresh_token}\", tokens.getRefresh())\n url = url.replace(\"{client_id}\", credentials.client_id)\n url = url.replace(\"{client_secret}\", credentials.client_secret)\n\n # Call API and Return Status\n response = requests.post(url)\n status = response.status_code\n message = statusCodes.statusCode(status)\n\n # Print Input Data\n print('')\n print('---- Token Request ----')\n print('URL:', url)\n\n # Parse Response data\n json_data = json.loads(response.text)\n try:\n error = json_data[\"error\"]\n except:\n error = 'None'\n try:\n access = json_data[\"access_token\"]\n except:\n access = 'No Token'\n\n # Print Output Data\n print('')\n print('---- Token Results ----')\n if error != 'None':\n print('Error:', error)\n print(response)\n print(response.text)\n else:\n print('Access:', access)\n # Write Token to File\n tokens.saveAccess(access)\n\n\ntoken = getAuthRefresh()\n","sub_path":"Python/Zoho_CRM_API/2.0/getAuthRefresh.py","file_name":"getAuthRefresh.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38859471","text":"import copy\nfrom flask import request\nfrom brown.exceptions import ValidationException\nfrom brown.lib import fields, utils\nfrom brown.database import db\nfrom brown.models import Network, Channel, Attempt\n\nclass Form(object):\n def __init__(self, *args, **kwargs):\n self.errors = {}\n self.cleaned_data = {}\n self.fields = copy.deepcopy(self._get_fields())\n\n # Set the field values passed in kwargs.\n for key, value in kwargs.iteritems():\n self.fields[key].set_value(value)\n\n self.validate()\n\n def _get_fields(self):\n \"\"\"Get all fields declared as the attributes of the form class.\"\"\"\n return {attr: getattr(self, attr) for attr\n in dir(self)\n if isinstance(getattr(self, attr), fields.Field)\n }\n\n def _add_error(self, field_name, e):\n \"\"\"Appends an error to the error list.\"\"\"\n if not field_name in self.errors:\n self.errors[field_name] = []\n self.errors[field_name].append(str(e))\n\n def get_error_list(self):\n errors = []\n for key, value in self.errors.iteritems():\n errors.extend(value)\n return errors\n\n def validate(self):\n \"\"\"Validates the form.\"\"\"\n self.errors = {}\n try:\n self.clean()\n except ValidationException as e:\n self._add_error('__custom__', e)\n for field_name, field in self.fields.iteritems():\n try:\n field.validate(self.cleaned_data[field_name])\n except ValidationException as e:\n self._add_error(field_name, e)\n\n def clean(self):\n \"\"\"Cleans all fields.\n Define the custom validation which depends on multiple fields here.\n Remember to call the base function first.\n \"\"\"\n self.cleaned_data = {}\n for field_name, field in self.fields.iteritems():\n self.cleaned_data[field_name] = field.clean(field.get_value())\n\n def is_valid(self, *args, **kwargs):\n \"\"\"Indicates whether this form is valid.\"\"\"\n return not bool(self.errors)\n\n def save(self, *args, **kwargs):\n \"\"\"Call this to save the form.\"\"\"\n pass\n\nclass AddChannelForm(Form):\n channel = fields.ChannelField()\n password = fields.StringField(required=False)\n network = fields.StringField()\n captcha = fields.BooleanField()\n\n def __init__(self, *args, **kwargs):\n super(AddChannelForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n super(AddChannelForm, self).clean()\n self.network_instance = None\n self.channel_instance = None\n\n # Check if the network exists.\n if self.cleaned_data['network']:\n self.network_instance = Network.query.filter(\n Network.name==self.cleaned_data['network']\n ).first()\n if not self.network_instance:\n raise ValidationException('Network does not exist.')\n\n # Check if the channel exists.\n if self.cleaned_data['channel']:\n self.channel_instance = Channel.query.filter(\n Channel.network==self.network_instance,\n db.func.lower(Channel.name)==db.func.lower(self.cleaned_data['channel'])\n ).first()\n if self.channel_instance:\n raise ValidationException('Channel already exists.')\n\n # Captcha.\n if not self.cleaned_data['captcha']:\n raise ValidationException('Invalid captcha.')\n\n def save(self):\n # Add a channel.\n self.channel_instance = Channel(\n network=self.network_instance,\n name=self.cleaned_data['channel'],\n password=self.cleaned_data['password']\n )\n db.session.add(self.channel_instance)\n\n # Store the time to display the CAPTCHA later.\n attempt = Attempt(\n ip=utils.ip2long(request.remote_addr),\n type=Attempt.ADD_CHANNEL\n )\n db.session.add(attempt)\n\n db.session.commit()\n","sub_path":"brown/lib/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269187712","text":"\nimport os\nimport pwd\nfrom pickle import dumps as pickle_dumps\nfrom pickle import loads as pickle_loads\nfrom typing import Union\nfrom ..argparsing import CommandlineParsingHelper\nfrom ..api.syntax import TaskDeclaration, GroupDeclaration\nfrom ..api.contract import TaskInterface\nfrom ..api.contract import ExecutorInterface\nfrom ..api.contract import ExecutionContext\nfrom ..context import ApplicationContext\nfrom ..inputoutput import IO\nfrom ..inputoutput import SystemIO\nfrom ..inputoutput import output_formatted_exception\nfrom .results import ProgressObserver\nfrom ..exception import InterruptExecution\nfrom ..audit import decide_about_target_log_files\nfrom ..api.temp import TempManager\nfrom .serialization import FORKED_EXECUTOR_TEMPLATE\nfrom .serialization import get_unpicklable\n\n\nclass OneByOneTaskExecutor(ExecutorInterface):\n \"\"\" Executes tasks one-by-one, providing a context that includes eg. parsed arguments \"\"\"\n\n _ctx: ApplicationContext\n _observer: ProgressObserver\n io: SystemIO\n\n def __init__(self, ctx: ApplicationContext):\n self._ctx = ctx\n self.io = ctx.io\n self._observer = ProgressObserver(ctx.io)\n\n def execute(self, declaration: TaskDeclaration, task_num: int, parent: Union[GroupDeclaration, None] = None,\n args: list = []):\n\n \"\"\" Executes a single task passing the arguments, redirecting/capturing the output and handling the errors \"\"\"\n\n result = False\n is_exception = False\n\n # 1. notify\n self._observer.task_started(declaration, parent, args)\n\n # 2. parse arguments\n parsed_args, defined_args = CommandlineParsingHelper.parse(declaration, args)\n log_level: str = parsed_args['log_level']\n log_to_file: str = parsed_args['log_to_file']\n is_silent: bool = parsed_args['silent']\n keep_going: bool = parsed_args['keep_going']\n cmdline_become: str = parsed_args['become']\n\n # 3. execute\n temp = TempManager()\n\n try:\n io = IO()\n io.set_log_level(log_level if log_level else self.io.get_log_level())\n\n if is_silent:\n io.silent = is_silent\n else:\n io.inherit_silent(self.io) # fallback to system-wide\n\n where_to_store_logs = decide_about_target_log_files(self._ctx, log_to_file, declaration, task_num)\n\n with io.capture_descriptors(target_files=where_to_store_logs):\n\n task = declaration.get_task_to_execute()\n task.internal_inject_dependencies(io, self._ctx, self, temp)\n\n result = self._execute_directly_or_forked(cmdline_become, task, temp, ExecutionContext(\n declaration=declaration,\n parent=parent,\n args=parsed_args,\n env=declaration.get_env(),\n defined_args=defined_args\n ))\n\n # 4. capture result\n except Exception as e:\n # allows to keep going on, even if task fails\n if not keep_going:\n output_formatted_exception(e, str(task.get_full_name()), self.io)\n raise InterruptExecution()\n\n self._observer.task_errored(declaration, e)\n is_exception = True\n\n finally:\n temp.finally_clean_up()\n\n if result is True:\n self._observer.task_succeed(declaration, parent)\n else:\n if not is_exception: # do not do double summary\n self._observer.task_failed(declaration, parent)\n\n # break the whole pipeline only if not --keep-going\n if not keep_going:\n raise InterruptExecution()\n\n def _execute_directly_or_forked(self, cmdline_become: str, task: TaskInterface, temp: TempManager, ctx: ExecutionContext):\n \"\"\"Execute directly or pass to a forked process\n \"\"\"\n\n if task.should_fork() or cmdline_become:\n task.io().debug('Executing task as separate process')\n return self._execute_as_forked_process(cmdline_become, task, temp, ctx)\n\n return task.execute(ctx)\n\n @staticmethod\n def _execute_as_forked_process(become: str, task: TaskInterface, temp: TempManager, ctx: ExecutionContext):\n \"\"\"Execute task code as a separate Python process\n\n The communication between processes is with serialized data and text files.\n One text file is a script, the task code is passed with stdin together with a whole context\n Second text file is a return from executed task - it can be a boolean or exeception.\n\n When an exception is returned by a task, then it is reraised there - so the original exception is shown\n without any proxies.\n \"\"\"\n\n if not become:\n become = task.get_become_as()\n\n # prepare file with source code and context\n communication_file = temp.assign_temporary_file()\n task.io().debug('Assigning communication temporary file at \"%s\"' % communication_file)\n\n context_to_pickle = {'task': task, 'ctx': ctx, 'communication_file': communication_file}\n\n try:\n task.io().debug('Serializing context')\n with open(communication_file, 'wb') as f:\n f.write(pickle_dumps(context_to_pickle))\n\n except (AttributeError, TypeError) as e:\n task.io().error('Cannot fork, serialization failed. ' +\n 'Hint: Tasks that are using internally inner-methods and ' +\n 'lambdas cannot be used with become/fork')\n task.io().error(str(e))\n\n if task.io().is_log_level_at_least('debug'):\n task.io().error('Pickle trace: ' + str(get_unpicklable(context_to_pickle)))\n\n return False\n\n # set permissions to temporary file\n if become:\n task.io().debug('Setting temporary file permissions')\n os.chmod(communication_file, 0o777)\n\n try:\n pwd.getpwnam(become)\n except KeyError:\n task.io().error('Unknown user \"%s\"' % become)\n return False\n\n task.io().debug('Executing python code')\n task.py(code=FORKED_EXECUTOR_TEMPLATE, become=become, capture=False, arguments=communication_file)\n\n # collect, process and pass result\n task.io().debug('Parsing subprocess results from a serialized data')\n with open(communication_file, 'rb') as conn_file:\n task_return = pickle_loads(conn_file.read())\n\n if isinstance(task_return, Exception):\n task.io().debug('Exception was raised in subprocess, re-raising')\n raise task_return\n\n return task_return\n\n def get_observer(self) -> ProgressObserver:\n return self._observer\n","sub_path":"rkd/execution/executor.py","file_name":"executor.py","file_ext":"py","file_size_in_byte":6852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138767432","text":"#!/usr/bin/python\n\"\"\"\nCopyright (C) 2020 onokatio\n\nThis program is free software: you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program. If not, see .\n\nこのプログラムはフリーソフトウェアです。あなたはこれを、フリーソフトウェ\nア財団によって発行された GNU 一般公衆利用許諾契約書(バージョン3か、希\n望によってはそれ以降のバージョンのうちどれか)の定める条件の下で再頒布\nまたは改変することができます。\n\nこのプログラムは有用であることを願って頒布されますが、*全くの無保証*\nです。商業可能性の保証や特定の目的への適合性は、言外に示されたものも含\nめ全く存在しません。詳しくはGNU 一般公衆利用許諾契約書をご覧ください。\n\nあなたはこのプログラムと共に、GNU 一般公衆利用許諾契約書の複製物を一部\n受け取ったはずです。もし受け取っていなければ、\nを確認してください。\n\"\"\"\nimport numpy as np\n\ndef hakidasi(matrix):\n loop = 0\n (height, width) = matrix.shape\n\n for i in range(height):\n matrix[i] = matrix[i] / matrix[i][i]\n\n for k in range(height):\n if k==i:\n continue;\n div = matrix[k][i]\n matrix[k] -= div * matrix[i]\n loop+=1\n ans = matrix[:,width-1]\n return (ans,loop)\n\ndef gauss(matrix):\n (height, width) = matrix.shape\n loop = 0\n for i in range(height):\n matrix[i] = matrix[i] / matrix[i][i]\n\n for k in range(i+1,height):\n div = matrix[k][i]\n matrix[k] -= div * matrix[i]\n loop+=1\n\n ans = np.zeros(width-1)\n\n for i in range(height-1,-1,-1):\n ans[i] = matrix[i][width-1] - sum(ans * matrix[i][0:-1])\n loop+=1\n return (ans,loop)\n\nA = np.loadtxt('./A100.csv', delimiter=',')\nB = np.loadtxt('./b100.csv', delimiter=',')\nB = np.reshape(B,(len(B),1))\nmatrix = np.concatenate([A,B],1)\n\n(ans1g,loop1g) = gauss(matrix)\n(ans1h,loop1h) = hakidasi(matrix)\n\nA = np.loadtxt('./A1000.csv', delimiter=',')\nB = np.loadtxt('./b1000.csv', delimiter=',')\nB = np.reshape(B,(len(B),1))\nmatrix = np.concatenate([A,B],1)\n\n(ans2g,loop2g) = gauss(matrix)\n(ans2h,loop2h) = hakidasi(matrix)\n\nprint(ans1g)\nprint(ans1h)\nprint(ans2g)\nprint(ans2h)\nprint(loop1g)\nprint(loop1h)\nprint(loop2g)\nprint(loop2h)\n","sub_path":"5/simulation/LU/gauss.py","file_name":"gauss.py","file_ext":"py","file_size_in_byte":2947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"530675733","text":"import os\n#import subprocess\nfrom subprocess import Popen, PIPE\n#mport pexpect\nimport sys\nSTDERR = sys.stderr\ndef excepthook(*args):\n print >> STDERR, 'caught'\n print >> STDERR, args\n\nsys.excepthook = excepthook\n\ndef main():\n #os.chdir(\"/opt/openbaton/scripts/prasu\")\n #os.system(\"mv bugzilla /etc/ansible\")\n #os.system(\"mv bugzilla_ans.yml /etc/ansible\")\n os.chdir(\"/etc/ansible\")\n \n #proc = subprocess.Popen(\"ansible 172.19.77.161 -m ping -u root -k\".split(),stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n proc=Popen('ansible 172.19.77.161 -m ping -u root -k'.split(), stdin=PIPE, stdout=PIPE)\n sys.stdout.readline()\n sys.stdin.write(\"abc123\\n\")\n sys.stdin.flush()\n #proc.communicate(\"abc123\") \n #outQueue = Queue()\n #errQueue = Queue()\n #proc.stdin.write(\"abc123\\n\")\n #proc.stdin.flush()\n #proc.stdin.close()\n #proc = subprocess.Popen(\"hostname\".split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n #errors = getOutput(errQueue)\n #output = getOutput(outQueue)\n #out1, err1 = proc.communicate()\n #print out1\n #proc.stdin.write(\"abc123\\n\")\n\n #child = pexpect.spawn(\"ansible 172.19.77.161 -m ping -u root -k\")\n #child.expect(pexpect.EOF)\n #child.expect(\"SSH Password:\")\n #child.sendline(\"abc123\")\n #output = child.before\n #print output\n \n #print os.system(\"ansible 172.19.77.161 -m ping -u root\")\n #print os. system(\"ansible-playbook bugzilla_ans.yml\")\nmain()\n\n\n","sub_path":"TOSCA/prasu/Definitions/Scripts/bugplaybook.py","file_name":"bugplaybook.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261566884","text":"# ######################################################################\n# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #\n# National Laboratory. All rights reserved. #\n# #\n# Redistribution and use in source and binary forms, with or without #\n# modification, are permitted provided that the following conditions #\n# are met: #\n# #\n# * Redistributions of source code must retain the above copyright #\n# notice, this list of conditions and the following disclaimer. #\n# #\n# * Redistributions in binary form must reproduce the above copyright #\n# notice this list of conditions and the following disclaimer in #\n# the documentation and/or other materials provided with the #\n# distribution. #\n# #\n# * Neither the name of the Brookhaven Science Associates, Brookhaven #\n# National Laboratory nor the names of its contributors may be used #\n# to endorse or promote products derived from this software without #\n# specific prior written permission. #\n# #\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #\n# \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #\n# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #\n# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #\n# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #\n# POSSIBILITY OF SUCH DAMAGE. #\n########################################################################\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\nfrom itertools import product\nfrom metadatastore.api import (insert_run_start, insert_event,\n insert_descriptor)\nfrom filestore.api import register_handler, insert_resource, insert_datum\nimport filestore.file_writers as fw\nfrom filestore.handlers import HDFMapsSpectrumHandler as HDFM\nfrom databroker.examples.sample_data import common\n\nimport six\nimport h5py\nimport os.path as op\nimport numpy as np\nimport datetime\nimport uuid\n\nimport logging\nlogger = logging.getLogger(__name__)\nnoisy = common.noisy\n\n\nregister_handler('hdf_maps', HDFM)\n\n\ndef save_syn_data(eid, data, base_path=None):\n \"\"\"\n Save a array as hdf format to disk.\n Defaults to saving files in :path:`~/.fs_cache/YYYY-MM-DD.h5`\n\n Parameters\n ----------\n eid : unicode\n id for file name\n data : ndarray\n The data to be saved\n base_path : str, optional\n The base-path to use for saving files. If not given\n default to `~/.fs_cache`. Will add a sub-directory for\n each day in this path.\n \"\"\"\n\n if base_path is None:\n base_path = op.join(op.expanduser('~'), '.fs_cache',\n str(datetime.date.today()))\n fw._make_sure_path_exists(base_path)\n fpath = op.join(base_path, str(eid) + '.h5')\n\n with h5py.File(fpath, 'w') as f:\n # create a group for maps to hold the data\n mapsGrp = f.create_group('MAPS')\n # now set a comment\n mapsGrp.attrs['comments'] = 'MAPS group'\n\n entryname = 'mca_arr'\n comment = 'These are raw spectrum data.'\n ds_data = mapsGrp.create_dataset(entryname, data=data)\n ds_data.attrs['comments'] = comment\n return fpath\n\n\ndef get_data(ind_v, ind_h, rows, cols):\n \"\"\"\n Get data for given x, y index.\n\n Parameters\n ----------\n ind_v : int\n vertical index\n ind_h : int\n horizontal index\n\n Returns\n -------\n unicode:\n id number of event\n \"\"\"\n\n uid = str(uuid.uuid1())\n\n # generate 3D random number with a given shape\n syn_data = np.ones((20, 1, 10)) * (ind_v * cols + ind_h)\n file_path = save_syn_data(uid, syn_data)\n\n custom = {'dset_path': 'mca_arr'}\n\n fb = insert_resource('hdf_maps', file_path, resource_kwargs=custom)\n evl = insert_datum(fb, uid, datum_kwargs={'x': ind_v, 'y': ind_h})\n return evl.datum_id\n\n\ndef hdf_data_io(rows, cols):\n \"\"\"\n Save data to db and run test when data is retrieved.\n \"\"\"\n run_start_uid = insert_run_start(time=0., scan_id=1, beamline_id='csx',\n uid=str(uuid.uuid4()))\n\n # data keys entry\n data_keys = {'v_pos': dict(source='MCA:pos_y', dtype='number'),\n 'h_pos': dict(source='MCA:pos_x', dtype='number'),\n 'xrf_spectrum': dict(source='MCA:spectrum', dtype='array',\n shape=(20, 1, 10),\n external='FILESTORE:')}\n\n # save the event descriptor\n descriptor_uid = insert_descriptor(\n run_start=run_start_uid, data_keys=data_keys, time=0.,\n uid=str(uuid.uuid4()))\n\n events = []\n for i, (v_pos, h_pos) in enumerate(product(range(rows), range(cols))):\n\n spectrum_uid = get_data(v_pos, h_pos, rows, cols)\n\n # Put in actual ndarray data, as broker would do.\n data1 = {'xrf_spectrum': spectrum_uid,\n 'v_pos': v_pos,\n 'h_pos': h_pos}\n timestamps1 = {k: noisy(i) for k in data1}\n\n event_uid = insert_event(descriptor=descriptor_uid, seq_num=i,\n time=noisy(i), data=data1,\n uid=str(uuid.uuid4()),\n timestamps=timestamps1)\n events.append(event_uid)\n\n return run_start_uid, events\n","sub_path":"databroker/examples/hdf_io.py","file_name":"hdf_io.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"522391489","text":"#!/usr/bin/env python3\n\"\"\"Trigger iCount demultiplexing, sample annotation and primary analysis.\"\"\"\n\nimport argparse\nimport sys\n\nfrom time import sleep\n\nimport resdk\nfrom resdk.resources import Collection\n\n\ndef parse_arguments():\n \"\"\"Parse command line arguments.\"\"\"\n parser = argparse.ArgumentParser(description=\"Annotate and analyse iCount samples.\")\n parser.add_argument('multiplexed_reads', help=\"Multiplexed reads.\")\n parser.add_argument('sample_annotation', help=\"iCount sample annotation file.\")\n parser.add_argument('-u', '--username', type=str, help='Username', default='admin')\n parser.add_argument('-p', '--password', type=str, help='Password', default='admin')\n parser.add_argument('-s', '--server', type=str, help='Server URL', default='http://localhost:8000')\n return parser.parse_args()\n\ndef get_or_create_collection(resolwe, coll_name):\n \"\"\"Check if Collection with a given name already exists. Create new Collection if not.\"\"\"\n n_coll = len(resolwe.collection.filter(name=coll_name))\n if n_coll == 1:\n return resolwe.collection.get(name=coll_name)\n if n_coll == 0:\n new_coll = Collection(resolwe=resolwe)\n new_coll.name = coll_name\n new_coll.save()\n return new_coll\n\ndef main():\n \"\"\"Invoke when run directly as a program.\"\"\"\n args = parse_arguments()\n\n # Make a connection to a Resolwe server\n res = resdk.Resolwe(args.username, args.password, args.server)\n resdk.start_logging()\n\n # Upload input data\n multiplexed_reads = res.run('upload-fastq-single', input={'src': args.multiplexed_reads})\n sample_annotation = res.run('upload-iclip-annotation', input={'src': args.sample_annotation})\n\n # trigger demultiplexing, annotation and primary analysis\n demultiplex = res.run(\n 'workflow-icount-demultiplex',\n input = {\n 'reads': multiplexed_reads.id, # reference previously uploaded data by its ID\n 'icount_annotation': sample_annotation.id # reference previously uploaded data by its ID\n }\n )\n\n while demultiplex.status not in ['OK', 'ER']:\n sleep(5)\n demultiplex.update()\n\n if demultiplex.status == 'OK':\n\n demux_object = res.data.filter(parents=demultiplex.id, type='data:demultiplex:icount:')[0]\n annotation_object = res.data.filter(parents=demultiplex.id, type='data:icount:annotsample:')[0]\n\n # wait for sample annotation job to either finish or fail\n while annotation_object.status not in ['OK', 'ER']:\n sleep(5)\n annotation_object.update()\n\n if annotation_object.status == 'OK':\n # get a list of child objects after demultiplexing\n demux_data = res.data.filter(parents=demux_object.id)\n\n # Use experiment_name annotation field to assign samples to collection\n for d_obj in demux_data:\n if d_obj.sample:\n try:\n collection_name = d_obj.descriptor['other']['experiment_name']\n coll = get_or_create_collection(res, collection_name)\n\n if coll:\n coll.add_samples(d_obj.sample)\n print('{} added to collection {}.'.format(d_obj.sample, coll.name))\n except:\n print('{} was not assigned to any collections'.format(d_obj.sample))\n else:\n print('Sample annotation failed. Could not assign samples to collection.')\n else:\n print('Demultiplexing job failed')\n sys.exit(1)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ICLIP/iclip_demultiplex.py","file_name":"iclip_demultiplex.py","file_ext":"py","file_size_in_byte":3620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"263410644","text":"import cv2\nimport numpy as np\nfrom PIL import Image\nimport pytesseract\nimport os\n\ndef crop_fps_portion_and_enlarge_it(screenshot, processed_img):\n image = cv2.imread(screenshot)\n # Crop the image\n fps = image[1:12, 310:375]\n # Enlarge it to with = 150\n r = 160.0 / fps.shape[1]\n dim = (160, int(fps.shape[0] * r))\n enlarged_fps = cv2.resize(fps, dim, interpolation = cv2.INTER_AREA)\n cv2.imwrite(processed_img, enlarged_fps)\n\ndef recognize_characters_from_the_image(image):\n img = cv2.imread(image)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n cv2.imwrite(\"Gray-FPS.png\", gray)\n rgbimg = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)\n text = pytesseract.image_to_string(Image.fromarray(rgbimg))\n if (len(text) == 0):\n raise AssertionError(\"There is no character in the image!\")\n\n return \"\\n\" + text\n","sub_path":"webgl-test/lhg-robot-libs.py","file_name":"lhg-robot-libs.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"308351069","text":"#\n# This code is a modified version of CEDR: https://github.com/Georgetown-IR-Lab/cedr\n#\n# (c) Georgetown IR lab & Carnegie Mellon University\n#\n# It's distributed under the MIT License\n# MIT License is compatible with Apache 2 license for the code in this repo.\n#\nimport random\nfrom tqdm import tqdm\nimport torch\nimport pickle\n\nfrom collections import Counter\n\nQUERY_ID_FIELD = 'query_id'\nDOC_ID_FIELD = 'doc_id'\nCAND_SCORE_FIELD = 'cand_score'\nQUERY_TOK_FIELD = 'query_tok'\nDOC_TOK_FIELD = 'doc_tok'\nQUERY_MASK_FIELD = 'query_mask'\nDOC_MASK_FIELD = 'doc_mask'\n\nPAD_CODE=1 # your typical padding symbol\nDEFAULT_MAX_QUERY_LEN=32\nDEFAULT_MAX_DOC_LEN=512 - DEFAULT_MAX_QUERY_LEN - 4\n\n\ndef read_datafiles(files):\n \"\"\"Read train and test files.\n\n :param files: an array of file objects, which represent queries or documents (in any order)\n :return: a dataset, which is tuple of two dictionaries representing queries and documents, respectively.\n \"\"\"\n queries = {}\n docs = {}\n for file in files:\n for line in tqdm(file, desc='loading datafile (by line)', leave=False):\n cols = line.rstrip().split('\\t')\n if len(cols) != 3:\n tqdm.write(f'skipping line: `{line.rstrip()}`')\n continue\n c_type, c_id, c_text = cols\n assert c_type in ('query', 'doc')\n if c_type == 'query':\n queries[c_id] = c_text\n if c_type == 'doc':\n docs[c_id] = c_text\n return queries, docs\n\n\ndef read_pairs_dict(file):\n \"\"\"Read training pairs and scores provided by a candidate generator.\n\n :param file: an open file, not a file name!\n :return: Training pairs in the dictionary of dictionary formats.\n Candidate generator scores are\n values of the inner-most dictionary.\n \"\"\"\n result = {}\n for ln, line in enumerate(tqdm(file, desc='loading pairs (by line)', leave=False)):\n fields = line.split()\n if not len(fields) in [2, 3]:\n raise Exception(f'Wrong # of fields {len(fields)} in file {file}, line #: {ln+1}')\n qid, docid = fields[0: 2]\n if len(fields) == 3:\n score = fields[2]\n else:\n score = 0\n\n result.setdefault(qid, {})[docid] = float(score)\n\n return result\n\n\ndef write_pairs_dict(train_pairs, file_name):\n \"\"\"Write training pairs.\n\n :param train_pairs: training data dictionary of dictionaries.\n :param file_name: output file name\n \"\"\"\n with open(file_name, 'w') as outf:\n for qid, docid_dict in train_pairs.items():\n for did, score in docid_dict.items():\n outf.write(f'{qid}\\t{did}\\t{score}\\n')\n\n\ndef create_empty_batch():\n return {QUERY_ID_FIELD: [], DOC_ID_FIELD: [], CAND_SCORE_FIELD: [], QUERY_TOK_FIELD: [], DOC_TOK_FIELD: []}\n\n\ndef iter_train_data(model, device_name, dataset,\n train_pairs, do_shuffle, neg_qty_per_query,\n qrels,\n batch_size, max_query_len, max_doc_len):\n \"\"\"Training data iterator.\n\n :param model: a model object\n :param device_name: a device name\n :param dataset: a dataset object: a tuple returned by read_datafiles\n :param train_pairs: training pairs returned by read_pairs_dict\n :param do_shuffle: True to shuffle\n :param neg_qty_per_query: a number of negative examples in each query\n :param qrels: a QREL dictionary returned by read_qrels_dict\n :param batch_size: the size of the batch\n :param max_query_len: max. query length\n :param max_doc_len: max. document length\n\n :return:\n \"\"\"\n batch = create_empty_batch()\n for qid, did, score, query_tok, doc_tok in _iter_train_data(model, dataset,\n train_pairs=train_pairs,\n do_shuffle=do_shuffle,\n neg_qty_per_query=neg_qty_per_query,\n qrels=qrels):\n batch[QUERY_ID_FIELD].append(qid)\n batch[DOC_ID_FIELD].append(did)\n batch[CAND_SCORE_FIELD].append(score)\n batch[QUERY_TOK_FIELD].append(query_tok)\n batch[DOC_TOK_FIELD].append(doc_tok)\n\n if len(batch[QUERY_ID_FIELD]) // (1 + neg_qty_per_query) == batch_size:\n yield _pack_n_ship(batch, device_name, max_query_len, max_doc_len)\n batch = create_empty_batch()\n\n\ndef train_item_qty_upper_bound(train_pairs):\n return len(list(train_pairs.keys()))\n\n\ndef _iter_train_data(model, dataset,\n train_pairs, do_shuffle, neg_qty_per_query,\n qrels):\n ds_queries, ds_docs = dataset\n while True:\n qids = list(train_pairs.keys())\n if do_shuffle:\n random.shuffle(qids)\n for qid in qids:\n query_train_pairs = train_pairs[qid]\n\n pos_ids = [did for did in query_train_pairs if qrels.get(qid, {}).get(did, 0) > 0]\n if len(pos_ids) == 0:\n continue\n pos_id = random.choice(pos_ids)\n pos_ids_lookup = set(pos_ids)\n\n neg_id_arr = [did for did in query_train_pairs if did not in pos_ids_lookup]\n if len(neg_id_arr) < neg_qty_per_query:\n continue\n\n pos_doc = ds_docs.get(pos_id)\n if pos_doc is None:\n tqdm.write(f'missing doc {pos_id}! Skipping')\n continue\n\n neg_data_arr = []\n sample_fail = False\n\n # sampling *WITHOUT* replacement\n for neg_id in random.sample(neg_id_arr, neg_qty_per_query):\n neg_doc = ds_docs.get(neg_id)\n\n if neg_doc is None:\n tqdm.write(f'missing doc {neg_id}! Skipping')\n sample_fail = True\n break\n\n neg_data_arr.append( (neg_id, neg_doc) )\n\n if sample_fail:\n continue\n\n query_tok_ids = model.tokenize_and_encode(ds_queries[qid])\n\n yield qid, pos_id, query_train_pairs[pos_id], \\\n query_tok_ids, model.tokenize_and_encode(pos_doc)\n\n assert len(neg_data_arr) == neg_qty_per_query\n for neg_id, neg_doc in neg_data_arr:\n yield qid, neg_id, query_train_pairs[neg_id], \\\n query_tok_ids, model.tokenize_and_encode(neg_doc)\n\n\ndef iter_valid_records(model, device_name, dataset, run,\n batch_size, max_query_len, max_doc_len):\n batch = create_empty_batch()\n for qid, did, score, query_tok, doc_tok in _iter_valid_records(model, dataset, run):\n batch[QUERY_ID_FIELD].append(qid)\n batch[DOC_ID_FIELD].append(did)\n batch[CAND_SCORE_FIELD].append(score)\n batch[QUERY_TOK_FIELD].append(query_tok)\n batch[DOC_TOK_FIELD].append(doc_tok)\n if len(batch[QUERY_ID_FIELD]) == batch_size:\n yield _pack_n_ship(batch, device_name, max_query_len, max_doc_len)\n batch = create_empty_batch()\n # final batch\n if len(batch[QUERY_ID_FIELD]) > 0:\n yield _pack_n_ship(batch, device_name, max_query_len, max_doc_len)\n\n\ndef _iter_valid_records(model, dataset, run):\n ds_queries, ds_docs = dataset\n for qid in run:\n query_tok_ids = model.tokenize_and_encode(ds_queries[qid])\n for did, score in run[qid].items():\n doc = ds_docs.get(did)\n if doc is None:\n tqdm.write(f'missing doc {did}! Skipping')\n continue\n doc_tok_ids = model.tokenize_and_encode(doc)\n yield qid, did, score, query_tok_ids, doc_tok_ids\n\n\ndef _pack_n_ship(batch, device_name, max_query_len, max_doc_len):\n dlen = min(max_doc_len, max(len(b) for b in batch[DOC_TOK_FIELD]))\n return {\n QUERY_ID_FIELD: batch[QUERY_ID_FIELD],\n DOC_ID_FIELD: batch[DOC_ID_FIELD],\n CAND_SCORE_FIELD: torch.FloatTensor(batch[CAND_SCORE_FIELD]).to(device_name),\n QUERY_TOK_FIELD: _pad_crop(device_name, batch[QUERY_TOK_FIELD], max_query_len),\n DOC_TOK_FIELD: _pad_crop(device_name, batch[DOC_TOK_FIELD], dlen),\n QUERY_MASK_FIELD: _mask(device_name, batch[QUERY_TOK_FIELD], max_query_len),\n DOC_MASK_FIELD: _mask(device_name, batch[DOC_TOK_FIELD], dlen),\n }\n\n\ndef _pad_crop(device_name, items, l):\n result = []\n for item in items:\n if len(item) < l:\n item = item + [PAD_CODE] * (l - len(item))\n if len(item) > l:\n item = item[:l]\n result.append(item)\n res = torch.tensor(result).long()\n\n return res.to(device_name)\n\n\ndef _mask(device_name, items, max_len):\n result = []\n for e in items:\n elen = min(len(e), max_len)\n result.append([1.] * elen + [0.]*(max_len - elen))\n\n res = torch.tensor(result).float()\n\n return res.to(device_name)\n\n\nclass VocabBuilder:\n \"\"\"Compile a vocabulary together with token stat. from *WHITE-SPACE* tokenized text.\"\"\"\n def __init__(self):\n self.total_counter = Counter()\n self.doc_counter = Counter()\n self.doc_qty = 0\n self.tot_qty = 0\n\n def proc_doc(self, text):\n \"\"\"White-space tokenize the document, update counters.\"\"\"\n toks = text.strip().split()\n self.total_counter.update(toks)\n self.doc_counter.update(list(set(toks)))\n self.tot_qty += len(toks)\n self.doc_qty += 1\n\n def save(self, file_name):\n with open(file_name, 'wb') as f:\n dt = [self.total_counter, self.doc_counter, self.doc_qty, self.tot_qty]\n pickle.dump(dt, f)\n\n @staticmethod\n def load(file_name):\n with open(file_name, 'rb') as f:\n dt = pickle.load(f)\n res = VocabBuilder()\n res.total_counter, res.doc_counter, res.doc_qty, res.tot_qty = dt\n return res\n\n\n","sub_path":"flexneuart/models/train/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":10005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141096682","text":"\"\"\"\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\nPROBLEM: Find kth smallest or kth largest element in a list\n\nBF approach is to sort and return the element at index k\nComplexity is O(nlogn)\n\nWe dont have to sort all the elements to get kth smallest.\nWe just need to find the position of kth element in the sorted order.\nThis can be done by partitioning the array and check if the partitioned\nelement is the kth element\n\nfor kth largest you just need to modify partitioning to reverse the\norder and return kth element\n\nNOTES:\n\nCOMPEXITY:\n Time: O(n)\n\nSOURCE:\nNone\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\"\"\"\n\n\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n\ndef partition(arr, left, right, asc):\n \"\"\"\n partition the array around a pivot element,\n with elements < pivot to the left of pivot\n and elements >= pivot to the right of pivot\n \"\"\"\n # can randomize pivot idx\n pivot_idx = right\n pivot = arr[right]\n part_idx = left\n for i in range(left, right):\n if asc:\n # ascending order\n if arr[i] < pivot:\n # move to the left of part_idx\n swap(arr, part_idx, i)\n part_idx += 1\n else:\n if arr[i] > pivot:\n swap(arr, part_idx, i)\n part_idx += 1\n\n # part_idx is the new position of pivot element\n swap(arr, part_idx, pivot_idx)\n return part_idx\n\n\ndef quick_select(arr, left, right, k):\n p = partition(arr, left, right)\n if p == k-1:\n return arr[p]\n elif p > k-1:\n return quick_select(arr, left, p-1, k)\n else:\n return quick_select(arr, p+1, right, k)\n\ndef kth_smallest(arr, k):\n \"\"\"\n This uses the quickselect algorithm to find\n smallest\n \"\"\"\n left, right = 0, len(arr)-1\n while left <= right:\n p = partition(arr, left, right, True)\n if p == k-1:\n return arr[p]\n if p > k-1:\n right = p-1\n else:\n left = p+1\n\ndef kth_largest(arr, left, right, k):\n \"\"\"\n This uses the quickselect algorithm to find\n smallest\n \"\"\"\n while left <= right:\n p = partition(arr, left, right, False)\n if p == k-1:\n return arr[p]\n if p > k-1:\n right = p-1\n else:\n left = p+1\n\nif __name__ == '__main__':\n arr = [4, 9, 2, 8, 6, 5, 3]\n for i in range(1, len(arr)+1):\n print(\"{} smallest: {}\".format(i, kth_smallest(arr, 0, len(arr)-1, i)))\n for i in range(1, len(arr)+1):\n print(\"{} largest: {}\".format(i, kth_largest(arr, 0, len(arr)-1, i)))\n","sub_path":"kthsmallest.py","file_name":"kthsmallest.py","file_ext":"py","file_size_in_byte":2763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381310642","text":"from heppy.framework.analyzer import Analyzer\nfrom heppy.statistics.tree import Tree\nfrom heppy.analyzers.ntuple import *\nfrom heppy.particles.tlv.jet import Jet\n\nfrom ROOT import TFile, TLorentzVector\n\n\nclass JetConeTreeProducer(Analyzer):\n\n def beginLoop(self, setup):\n super(JetConeTreeProducer, self).beginLoop(setup)\n self.rootfile = TFile('/'.join([self.dirName,\n 'jet_tree.root']),\n 'recreate')\n self.tree = Tree( self.cfg_ana.tree_name,\n self.cfg_ana.tree_title )\n bookJet(self.tree, 'papasjet')\n bookJet(self.tree, 'cmsjet')\n bookJet(self.tree, 'papas_control_jet')\n bookJet(self.tree, 'cms_control_jet')\n bookJet(self.tree, 'gen_jet')\n bookJet(self.tree, 'simtrack')\n var(self.tree, 'simtrack_len')\n for i in range(20):\n bookParticle(self.tree, 'simtrack_ptc'+str(i))\n bookParticle(self.tree, 'PFCandidate')\n\n def process(self, event):\n self.tree.reset()\n papasjet = getattr(event, self.cfg_ana.rec_jet, None)\n cmsjet = getattr(event, self.cfg_ana.pf_jet, None)\n papas_control_jet = getattr(event, self.cfg_ana.rec_control_jet, None)\n cms_control_jet = getattr(event, self.cfg_ana.pf_control_jet, None)\n gen_jet = getattr(event, self.cfg_ana.gen_jet, None)\n if papasjet:\n fillJet(self.tree, 'papasjet', papasjet)\n if cmsjet:\n fillJet(self.tree, 'cmsjet', cmsjet)\n if papas_control_jet:\n fillJet(self.tree, 'papas_control_jet', papas_control_jet)\n if cms_control_jet:\n fillJet(self.tree, 'cms_control_jet', cms_control_jet)\n if gen_jet:\n fillJet(self.tree, 'gen_jet', gen_jet)\n sim_track_jet = getattr(event, self.cfg_ana.sim_track_jet, None)\n if sim_track_jet:\n fillJet(self.tree, 'simtrack', sim_track_jet)\n sim_track_ptcs = getattr(event, self.cfg_ana.sim_track, None)\n if sim_track_ptcs:\n for i in range(min(len(sim_track_ptcs), 20)):\n fillParticle(self.tree, 'simtrack_ptc'+str(i), sim_track_ptcs[i])\n fill(self.tree, 'simtrack_len', len(sim_track_ptcs))\n pfcandidates = getattr(event, self.cfg_ana.pfcandidates, None)\n if pfcandidates:\n fillParticle(self.tree, 'PFCandidate', pfcandidates[0])\n self.tree.tree.Fill()\n\n def write(self, setup):\n self.rootfile.Write()\n self.rootfile.Close()\n","sub_path":"analyzers/JetConeTreeProducer.py","file_name":"JetConeTreeProducer.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"63626603","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\n\ndef input():\n return sys.stdin.readline()\n\n\ndef all_search(s, i, i_sum: int, temp):\n n = int(s[i])\n\n if i == len(s)-1:\n # ベースケース\n i_sum += temp + n\n return i_sum\n else:\n ret = all_search(s, i+1, i_sum, (temp+n)*10) + all_search(s, i+1, i_sum+temp+n, 0)\n return ret\n\n\n\n\ndef resolve():\n s = input().rstrip()\n\n print(all_search(s, 0, 0, 0))\n\n\n\nif __name__ == \"__main__\":\n resolve()\n\n","sub_path":"Python_codes/p04001/s471086750.py","file_name":"s471086750.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"283336805","text":"import multiprocessing\nimport json\nimport os, glob\nimport pandas as pd\nimport pandas.io.json as pdjson\nimport seaborn as sns\n\nsns.set(style=\"whitegrid\")\n\n\ndef run(sz):\n data_frames = []\n\n for file in glob.glob(\"benchfiles/*+{}+*.orunchrt.summary.bench\".format(sz)):\n print(file)\n with open(file) as f:\n data = []\n for l in f:\n j = json.loads(l)\n j['name'] = j['name'].split('.orunchrt')[0]\n data.append(j)\n df = pd.json_normalize(data)\n df['variant'] = file.replace(\".summary.bench\",\"\").replace(\"benchfiles/\", \"\")\n data_frames.append(df)\n\n # print(data_frames)\n df = pd.concat (data_frames, sort=False)\n df = df.sort_values(['name'])\n # Uncomment the following to display all the lines in pandas output\n pd.set_option('display.max_rows', df.shape[0]+1)\n\n df.filter(['name','variant','time_secs'])\n\n g = sns.catplot (x='name', y='time_secs', hue='variant', data = df, kind ='bar', aspect=16)\n g.set_xticklabels(rotation=90)\n g.savefig(\"time_{}.jpg\".format(sz))\n\n # returns a dictionary with {key = name : value : metric}\n # the metric corresponds to the variant\n def create_column(df, variant, metric):\n df = pd.DataFrame.copy(df)\n variant_metric_name = list([ zip(df[metric], df[x], df['name'])\n for x in df.columns.array if x == \"variant\" ][0])\n name_metric = {n:t for (t, v, n) in variant_metric_name if v == variant}\n return name_metric\n\n def add_display_name(df, variant, metric):\n name_metric = create_column(pd.DataFrame.copy(df), variant, metric)\n disp_name = [name+\" (\"+str(round(name_metric[name], 2))+\")\" for name in df[\"name\"]]\n df[\"display_name\"] = pd.Series(disp_name, index=df.index)\n return df\n\n def normalise(df, variant, topic, additionalTopics=[]):\n df = add_display_name(df, variant, topic)\n df = df.sort_values([\"name\",\"variant\"])\n grouped = df.filter(items=['name',topic,'variant','display_name']+additionalTopics).groupby('variant')\n ndata_frames = []\n for group in grouped:\n (v,data) = group\n if(v != variant):\n data['b'+topic] = grouped.get_group(variant)[topic].values\n data[['n'+topic]] = data[[topic]].div(grouped.get_group(variant)[topic].values, axis=0)\n for t in additionalTopics:\n # print(variant, t)\n data[[t]] = grouped.get_group(variant)[t].values\n ndata_frames.append(data)\n df = pd.concat (ndata_frames)\n return df\n\n def plot_normalised(df, variant, topic):\n df = pd.DataFrame.copy(df)\n df.sort_values(by=[topic],inplace=True)\n df[topic] = df[topic] - 1\n g = sns.catplot (x=\"display_name\", y=topic, hue='variant', data = df, kind ='bar', aspect=16, bottom=1)\n g.set_xticklabels(rotation=90)\n g.ax.legend(loc=8)\n g._legend.remove()\n g.ax.set_xlabel(\"Benchmarks\")\n return g\n # g.ax.set_yscale('log')\n\n ndf = normalise(df, '4.12.0+domains+effects+{}+8_1.orunchrt'.format(sz), 'time_secs')\n g = plot_normalised(ndf, '4.12.0+domains+effects+{}+8_1.orunchrt'.format(sz), 'ntime_secs')\n g.savefig(\"time_normalised_{}.jpg\".format(sz))\n\n g = sns.catplot (x='name', y='gc.major_collections', hue='variant', data = df, kind ='bar', aspect=4)\n g.set_xticklabels(rotation=90)\n g.savefig(\"major_collections_{}.jpg\".format(sz))\n\n ndf = normalise(df, '4.12.0+domains+effects+{}+8_1.orunchrt'.format(sz), 'gc.major_collections')\n g = plot_normalised(ndf, '4.12.0+domains+effects+{}+8_1.orunchrt'.format(sz), 'ngc.major_collections')\n g.savefig(\"major_collections_normalised_{}.jpg\".format(sz))\n\n g = sns.catplot (x='name', y='gc.minor_collections', hue='variant', data = df, kind ='bar', aspect=4)\n g.set_xticklabels(rotation=90)\n g.savefig(\"minor_collections_{}.jpg\".format(sz))\n\n ndf = normalise(df, '4.12.0+domains+effects+{}+8_1.orunchrt'.format(sz), 'gc.minor_collections')\n g = plot_normalised(ndf, '4.12.0+domains+effects+{}+8_1.orunchrt'.format(sz), 'ngc.minor_collections')\n g.savefig(\"minor_collections_normalised_{}.jpg\".format(sz))\n\npool = multiprocessing.Pool(4)\npool.map(run, [\"256k\", \"512k\", \"1M\", \"2M\", \"4M\"])","sub_path":"dlabs_bench_results/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72645349","text":"\n\nfrom xai.brain.wordbase.nouns._hotbed import _HOTBED\n\n#calss header\nclass _HOTBEDS(_HOTBED, ):\n\tdef __init__(self,): \n\t\t_HOTBED.__init__(self)\n\t\tself.name = \"HOTBEDS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"hotbed\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_hotbeds.py","file_name":"_hotbeds.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"555569836","text":"from OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\nfrom OpenGL.GLUT import *\r\nimport numpy as np\r\nfrom math import cos, sin\r\n\r\nPI = 3.141592\r\n\r\ndef model():\r\n return None\r\n\r\ndef gasket(depth, p1, p2, p3):\r\n if depth == 0:\r\n glVertex2f(*p1)\r\n glVertex2f(*p2)\r\n glVertex2f(*p3)\r\n return\r\n \r\n m1 = (p1+p2)/2\r\n m2 = (p2+p3)/2\r\n m3 = (p3+p1)/2\r\n \r\n gasket(depth-1, p1, m1, m3)\r\n gasket(depth-1, m1, p2, m2)\r\n gasket(depth-1, m3, m2, p3)\r\n\r\ndef init():\r\n glClearColor(0.0, 0.0, 0.0, 1.0)\r\n\r\ndef display():\r\n glClear(GL_COLOR_BUFFER_BIT)\r\n glColor3f(1.0, 1.0, 1.0)\r\n glBegin(GL_TRIANGLES)\r\n gasket(4, np.array([cos(0), sin(0)]), np.array([cos(2*PI/3), sin(2*PI/3)]), np.array([cos(-2*PI/3), sin(-2*PI/3)]))\r\n glEnd()\r\n glutSwapBuffers()\r\n\r\ndef main():\r\n glutInit()\r\n glutInitWindowPosition(50, 50)\r\n glutInitWindowSize(640, 640)\r\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA)\r\n glutCreateWindow(\"sierpinski triangle\")\r\n init()\r\n # glOrtho(-2.0, 2.0, -2.0, 2.0, -2.0, 2.0)\r\n glutDisplayFunc(display)\r\n glutMainLoop()\r\n \r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"chap1-samples/CG-w1/06-sierpinski-triangle.py","file_name":"06-sierpinski-triangle.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"569713411","text":"import sys\n\ndef theta(a, b):\n if a == '-' or b == '-' or a != b: # gap or mismatch\n return -1\n elif a == b: # match\n return 1\n\ndef make_score_matrix(seq1, seq2):\n \"\"\"\n return score matrix and map(each score from which direction)\n 0: from diagonal\n 1: from left\n 2: from up\n \"\"\"\n seq1 = '-' + seq1\n seq2 = '-' + seq2\n score_mat = {}\n trace_mat = {}\n\n for i,p in enumerate(seq1):\n score_mat[i] = {} # matrix with two layer dict\n trace_mat[i] = {}\n for j,q in enumerate(seq2):\n if i == 0: # first row, gap in seq1\n score_mat[i][j] = -j\n trace_mat[i][j] = 1\n continue\n if j == 0: # first column, gap in seq2\n score_mat[i][j] = -i\n trace_mat[i][j] = 2\n continue\n ul = score_mat[i-1][j-1] + theta(p, q) # from up-left, mark 0\n l = score_mat[i][j-1] + theta('-', q) # from left, mark 1, gap in seq1\n u = score_mat[i-1][j] + theta(p, '-') # from up, mark 2, gap in seq2\n picked = max([ul,l,u])\n score_mat[i][j] = picked # record value\n trace_mat[i][j] = [ul, l, u].index(picked) # record direction\n return score_mat, trace_mat\n\ndef traceback(seq1, seq2, trace_mat):\n '''\n find one optimal traceback path from trace matrix, return path code\n -!- CAUTIOUS: if multiple equally possible path exits, only return one of them -!-\n '''\n seq1, seq2 = '-' + seq1, '-' + seq2\n i, j = len(seq1) - 1, len(seq2) - 1\n path_code = ''\n while i > 0 or j > 0:\n direction = trace_mat[i][j]\n if direction == 0: # from diagonal\n i = i-1\n j = j-1\n path_code = '0' + path_code\n elif direction == 1: # from left\n j = j-1\n path_code = '1' + path_code\n elif direction == 2: # from up\n i = i-1\n path_code = '2' + path_code\n return path_code\n\ndef print_m(seq1, seq2, m):\n \"\"\"print score matrix or trace matrix\"\"\"\n seq1 = '-' + seq1; seq2 = '-' + seq2\n print()\n print(' '.join(['%3s' % i for i in ' '+seq2]))\n for i, p in enumerate(seq1):\n line = [p] + [m[i][j] for j in range(len(seq2))]\n print(' '.join(['%3s' % i for i in line]))\n print()\n return\n\ndef pretty_print_align(seq1, seq2, path_code):\n '''\n return pair alignment result string from\n path code: 0 for match, 1 for gap in seq1, 2 for gap in seq2\n '''\n align1 = ''\n middle = ''\n align2 = ''\n for p in path_code:\n if p == '0':\n align1 = align1 + seq1[0]\n align2 = align2 + seq2[0]\n if seq1[0] == seq2[0]:\n middle = middle + '|'\n else:\n middle = middle + ' '\n seq1 = seq1[1:]\n seq2 = seq2[1:]\n elif p == '1':\n align1 = align1 + '-'\n align2 = align2 + seq2[0]\n middle = middle + ' '\n seq2 = seq2[1:]\n elif p == '2':\n align1 = align1 + seq1[0]\n align2 = align2 + '-'\n middle = middle + ' '\n seq1 = seq1[1:]\n\n print(' %s\\n %s \\n %s' % (align1, middle, align2))\n return\n\ndef main():\n try:\n seq1, seq2 = sys.argv[1:3]\n except:\n print('Usage: python align_nw.py seq1 seq2')\n print('--Demo:--')\n seq1, seq2 = 'TCATC','TCATGGC'\n\n score_mat, trace_mat = make_score_matrix(seq1, seq2)\n\n path_code = traceback(seq1, seq2, trace_mat)\n pretty_print_align(seq1, seq2, path_code)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"align_nw.py","file_name":"align_nw.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"91213505","text":"import tkinter as tk\nfrom tkinter import ttk\nimport os\nimport sqlite3\nfrom src import primepart\n\n\nclass RelicTab:\n\n def __init__(self, tab):\n\n # Run every time something is typed into the search bar on the top.\n def click(event):\n global search_curr\n search_curr += event.char\n print(search_curr)\n\n # Run whenever an item row is clicked on.\n def onDoubleClick(event):\n rowid = tree.__tree.identify_row(event.y)\n print(str(rowid))\n\n # Find that item in SQL table and swap the owned value\n\n self.tab = tab # Tab is the tab of the workbook parts list presides on\n\n # Create search bar stuffs\n global search_curr\n search_curr = \"\"\n search_label = ttk.Label(self.tab, text=\"Search:\")\n search_label.pack(side=\"top\")\n search_bar = ttk.Entry(self.tab)\n search_bar.bind(\"\", click)\n search_bar.pack(side=\"top\")\n\n # Create frame for the table of parts\n l_frame = ttk.Labelframe(self.tab, text=\"Current parts:\")\n\n # Grab all prime parts info from the SQL database\n os.chdir(\"../data/relics\")\n connection = sqlite3.connect('Relics.db')\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM PRIMEPARTS ORDER BY NAME ASC\")\n result = cursor.fetchall()\n\n # Initialize the tree\n columns = (\"part\", \"owned\", \"relics\")\n tree = ttk.Treeview(l_frame, columns=columns, show=\"headings\")\n tree.heading(\"part\", text=\"Part\")\n tree.heading(\"owned\", text=\"Owned (y/n)\")\n tree.heading(\"relics\", text=\"Relics\")\n tree.column(\"part\", anchor=\"center\")\n tree.column(\"owned\", anchor=\"center\")\n tree.column(\"relics\", anchor=\"center\")\n tree.bind(\"<>\", onDoubleClick)\n\n # Fill the tree by stepping through each part in the table\n for part in result:\n curr_part = part[0][2:-2]\n tree.insert(\"\", tk.END, values=(curr_part, \"0\", \"filler\"))\n\n # Pack it up\n tree.pack(fill=\"both\", expand=True)\n l_frame.pack(fill=\"both\", expand=True)\n search_bar.focus() # Set cursor to active on the search bar\n","sub_path":"src/relictab.py","file_name":"relictab.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617321395","text":"def minimumHours(rows, columns, grid):\n # holds (x, y) pairs for every server\n # that might be able to deliver the file to neighbors\n servers = set()\n for x in range(rows):\n for y in range(columns):\n if grid[x][y] == 1:\n servers.add((x, y))\n\n hours = -1\n\n # iterating while there are servers\n # that might be able to deliver the file to neighbors\n while len(servers) > 0:\n hours += 1\n\n # duplicating grid for atomic update\n new_grid = grid.copy()\n new_servers = set()\n\n for (x, y) in servers:\n adjacent = [\n (x + 1, y),\n (x - 1, y),\n (x, y + 1),\n (x, y - 1)\n ]\n\n # checking every adjacent cell of this server\n for (ax, ay) in adjacent:\n if ax < 0 or ax >= rows or ay < 0 or ay >= columns:\n continue\n\n if grid[ax][ay] == 0:\n # delivering the file here\n # and saving the adjacent server for next step\n new_grid[ax][ay] = 1\n new_servers.add((ax, ay))\n\n # updating the grid in one step\n grid = new_grid\n servers = new_servers\n\n return hours\n\n\nprint(minimumHours(2, 2, [[1, 0], [0, 0]]))\nprint(minimumHours(4, 4, [[0, 1, 1, 0, 1], [0, 1, 0, 1, 0], [0, 0, 0, 0, 1], [0, 1, 0, 0, 0]]))","sub_path":"amazon/server_delivery.py","file_name":"server_delivery.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261569589","text":"import env\nimport os\nimport pantherine as purr\nimport vehicle\n\n###############################\n# Initilize anything that needs to happen at step 0 here.\n###############################\ndef initialize(traci):\n os.system(\"cls\")\n print(\"Initializing...\")\n \n trips = purr.readXMLtag(purr.mrf(env.map_dir,r\"*.trips.xml\"),\"trip\")\n \n total = len(trips) - 1\n print('Number of trips:',len(trips))\n for i,trip in enumerate(trips):\n veh = vehicle.Vehicle(\"veh%d\" % (i))\n try:\n veh.add(traci,trip['from'],trip['to'],trip['depart'])\n except:\n pass\n env.vehicles.append(veh)\n purr.update(i,total)\n\n print(\"Initialization complete!\")\n return\n# end def intialize\n\n\n###############################\n# Anything that happens within the TraCI control loop goes here.\n# One pass of the loop == 1 timestep.\n# Return False to finalize the simulation\n###############################\ndef timestep(traci,n_step):\n if traci.simulation.getTime() > 28799 and traci.vehicle.getIDCount() == 0:\n return False\n return True\n# end timestep\n\n###############################\n# Finalize the Simulation\n###############################\ndef finalize():\n \n return\n# End finalize\n","sub_path":"ex0/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442471627","text":"# coding: utf-8\n\nimport itertools\nfrom oslo_config import cfg, types\n\nCONF = cfg.CONF\nPortType = types.Integer(1, 65535)\n\n\nopenstack_sample_opts = [\n cfg.StrOpt('topic',\n default='openstack_sample',\n help='topic'),\n cfg.StrOpt('host',\n default='localhost',\n help='host'),\n cfg.StrOpt('bind_host',\n default='127.0.0.1',\n help='IP address to listen on'),\n cfg.Opt('bind_port',\n type=PortType,\n default=8080,\n help='Port number to listen on'),\n]\n\n\ndef list_opts():\n return [\n ('openstack_sample', itertools.chain(openstack_sample_opts))\n ]\n","sub_path":"openstack_sample/conf/opts.py","file_name":"opts.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538530081","text":"from django.contrib import admin\nfrom models import Category, App\nfrom adminsortable.admin import SortableAdmin\n\n\nclass CategoryAdmin(SortableAdmin):\n fieldsets = [\n (None, {\n 'fields': ('name',)\n }),\n ]\n list_display = ('name',)\n\nclass AppAdmin(SortableAdmin):\n fieldsets = [\n (None, {\n 'fields': ('name', 'abbr_name', 'url', 'icon', 'category', 'featured_index',),\n }),\n ('Description', {\n 'fields': ('description', 'screenshot', 'founder_description',),\n }),\n ]\n list_display = ('name', 'category', 'featured_index',)\n list_editable = ('featured_index',)\n list_filter = ('category',)\n ordering = ['category__order', 'order']\n\nadmin.site.register(Category, CategoryAdmin)\nadmin.site.register(App, AppAdmin)\n","sub_path":"tigerapps/www/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512211904","text":"# -*- coding: utf-8 -*-\nfrom station import Station\n\nimport urllib,urllib2\nfrom BeautifulSoup import BeautifulStoneSoup\nimport re\nfrom datetime import datetime\n\nACTION_URL = \"/public/mobapp/maq.asmx/{0}\"\nSTATION_LIST = \"getRacks\"\nSTATION_INFO = \"getRack\"\n\n \ndef get_all(spec, prefix=\"\"):\n url = \"%s%s\" % (spec.url, ACTION_URL.format(STATION_LIST))\n usock = urllib2.urlopen(prefix+url)\n xml_data = usock.read()\n usock.close()\n soup = BeautifulStoneSoup(xml_data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n data = BeautifulStoneSoup(soup.find('string').contents[0])\n markers = data.findAll('station')\n stations = []\n idx = 0\n for marker in markers:\n try:\n number = int(marker.contents[0])\n if (number < 500):\n station = spec(idx, number)\n stations.append(station)\n idx = idx + 1\n except Exception:\n pass\n return stations\n \nclass SmartBikeCCStation(Station):\n prefix = \"\"\n url = \"\"\n\n def __init__(self, idx, internal_id):\n Station.__init__(self, idx)\n self.number = internal_id\n \"\"\"\n Takes too long to instantiate all the stations, but the\n smartbike API does not give any useful information for\n starters.. :(\n \"\"\"\n self.update()\n \n def update(self, prefix = \"\"):\n usock = urllib2.urlopen(prefix+\"%s%s\" % (self.url, ACTION_URL.format(STATION_INFO)), urllib.urlencode({'id': self.number}))\n xml_data = usock.read()\n usock.close()\n soup = BeautifulStoneSoup(xml_data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n self.from_xml(soup)\n self.timestamp = datetime.now()\n return self\n \n def from_xml(self, xml_data):\n \n \"\"\" xml marker object as in\n \n \n 5\n 13\n 1\n 01-Middelthunsgate (vis-a-vis nr. 21, retning Kirkeveien)\n 10.709009170532226\n 59.92786125852981\n \n \n \"\"\"\n data = BeautifulStoneSoup(xml_data.find('string').contents[0])\n data = data.contents[0]\n self.bikes = int(data.find('ready_bikes').contents[0])\n self.free = int(data.find('empty_locks').contents[0])\n self.name = str(data.find('description').contents[0])\n self.lat = int(float(data.find('latitude').contents[0])*1E6)\n self.lng = int(float(data.find('longitute').contents[0])*1E6)\n return self\n","sub_path":"lib/smartbike.py","file_name":"smartbike.py","file_ext":"py","file_size_in_byte":2512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"514878293","text":"\"\"\"Train a network on top of the network trained on Painters-by-numbers.\n\nAuthor: Lucas David -- \nLicence: MIT License 2016 (c)\n\n\"\"\"\nimport os\n\nimport tensorflow as tf\nfrom PIL import ImageFile\nfrom keras import callbacks, optimizers, backend as K\nfrom keras.preprocessing.image import ImageDataGenerator\nfrom sacred import Experiment\nfrom sacred.utils import apply_backspaces_and_linefeeds\n\nfrom connoisseur import utils\nfrom connoisseur.models import build_siamese_model\nfrom connoisseur.utils.image import BalancedDirectoryPairsSequence\n\nImageFile.LOAD_TRUNCATED_IMAGES = True\n\nex = Experiment('train-top-network')\n\nex.captured_out_filter = apply_backspaces_and_linefeeds\ntf.logging.set_verbosity(tf.logging.ERROR)\ntf_config = tf.ConfigProto(allow_soft_placement=True)\ntf_config.gpu_options.allow_growth = True\nsession = tf.Session(config=tf_config)\nK.set_session(session)\n\n\n@ex.config\ndef config():\n device = \"/gpu:0\"\n\n data_dir = \"/datasets/pbn/random_299/\"\n train_pairs = 1584\n valid_pairs = 1584\n num_classes = 1584\n classes = None\n\n batch_size = 128\n image_shape = [299, 299, 3]\n architecture = 'InceptionV3'\n weights = 'imagenet'\n last_base_layer = None\n use_gram_matrix = False\n dense_layers = ()\n embedding_units = 1024\n joints = 'multiply'\n trainable_limbs = False\n pooling = 'avg'\n\n predictions_activation = 'softmax'\n limb_weights = '/work/painter-by-numbers/ckpt/limb_weights.hdf5'\n\n opt_params = {'lr': .001}\n dropout_rate = 0.2\n ckpt = 'top-network.hdf5'\n resuming_ckpt = None\n epochs = 100\n steps_per_epoch = None\n validation_steps = None\n use_multiprocessing = False\n workers = 1\n initial_epoch = 0\n early_stop_patience = 30\n tensorboard_tag = 'train-top-network/'\n\n\n@ex.automain\ndef run(_run, image_shape, data_dir, train_pairs, valid_pairs, classes,\n num_classes, architecture, weights, batch_size, last_base_layer, pooling, device, predictions_activation,\n opt_params, dropout_rate, resuming_ckpt, ckpt, steps_per_epoch, epochs, validation_steps, joints,\n workers, use_multiprocessing, initial_epoch, early_stop_patience, use_gram_matrix, dense_layers,\n embedding_units, limb_weights, trainable_limbs, tensorboard_tag):\n report_dir = _run.observers[0].dir\n\n if isinstance(classes, int):\n classes = sorted(os.listdir(os.path.join(data_dir, 'train')))[:classes]\n\n g = ImageDataGenerator(horizontal_flip=True, vertical_flip=True, zoom_range=.2, rotation_range=.2,\n height_shift_range=.2, width_shift_range=.2,\n fill_mode='reflect', preprocessing_function=utils.get_preprocess_fn(architecture))\n\n train_data = BalancedDirectoryPairsSequence(os.path.join(data_dir, 'train'), g, target_size=image_shape[:2],\n pairs=train_pairs, classes=classes, batch_size=batch_size)\n valid_data = BalancedDirectoryPairsSequence(os.path.join(data_dir, 'valid'), g, target_size=image_shape[:2],\n pairs=valid_pairs, classes=classes, batch_size=batch_size)\n if steps_per_epoch is None:\n steps_per_epoch = len(train_data)\n if validation_steps is None:\n validation_steps = len(valid_data)\n\n with tf.device(device):\n print('building...')\n model = build_siamese_model(image_shape, architecture, dropout_rate, weights, num_classes, last_base_layer,\n use_gram_matrix, dense_layers, pooling, include_base_top=False, include_top=True,\n predictions_activation=predictions_activation, limb_weights=limb_weights,\n trainable_limbs=trainable_limbs, embedding_units=embedding_units, joints=joints)\n print('siamese model summary:')\n model.summary()\n if resuming_ckpt:\n print('loading weights...')\n model.load_weights(resuming_ckpt)\n\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'],\n optimizer=optimizers.Adam(**opt_params))\n\n print('training from epoch %i...' % initial_epoch)\n try:\n model.fit_generator(\n train_data,\n steps_per_epoch=steps_per_epoch, epochs=epochs,\n validation_data=valid_data,\n validation_steps=validation_steps,\n initial_epoch=initial_epoch,\n use_multiprocessing=use_multiprocessing, workers=workers, verbose=2,\n callbacks=[\n callbacks.TerminateOnNaN(),\n callbacks.EarlyStopping(patience=early_stop_patience),\n callbacks.ReduceLROnPlateau(min_lr=1e-10, patience=int(early_stop_patience // 3)),\n callbacks.TensorBoard(os.path.join(report_dir, tensorboard_tag), batch_size=batch_size),\n callbacks.ModelCheckpoint(os.path.join(report_dir, ckpt), save_best_only=True, save_weights_only=True, verbose=1),\n ])\n except KeyboardInterrupt:\n print('interrupted by user')\n else:\n print('done')\n","sub_path":"experiments/3-painter-by-numbers/4a-train-top-network.py","file_name":"4a-train-top-network.py","file_ext":"py","file_size_in_byte":5227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"247558068","text":"'''\nWhen cleaner does nothing, we will update cost and path, and keep state as it\nis, we will not keep another node for do_nothing part\n'''\nfrom status import *\nfrom initial_state_generator import generate_initial_state\nfrom grid import draw_path\n\npath = [] # Consists of actions 'U', 'D', 'L', 'R', 'S', 'N'\nexplored_states = []\n\n\ndef depth_limited_search(node, limit):\n\n if node.goal_check():\n return node\n\n elif limit == 0:\n return 'cutoff'\n\n else:\n\n cutoff = False\n\n for action in node.actions():\n\n child = node.successor(action)\n\n result = depth_limited_search(child, limit - 1)\n\n if result == 'cutoff':\n cutoff = True\n\n elif result != 'failure':\n return result\n\n if cutoff:\n return 'cutoff'\n\n else:\n return 'failure'\n\n\ndef iterative_deepening(node):\n\n depth = 0\n\n while True:\n\n result = depth_limited_search(node, depth)\n\n if result != 'cutoff':\n return result\n\n depth += 1\n\n\nrandom_initial_state = generate_initial_state(5)\n\nnode = Tree([1025, 7], None)\n\nresult = iterative_deepening(node)\n\npath = result.calc_path()\n\nprint(path)\n\ndraw_path(node.state[1], path)\n","sub_path":"explore.py","file_name":"explore.py","file_ext":"py","file_size_in_byte":1256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"258573381","text":"import discord\nfrom discord.ext import commands, tasks\nfrom itertools import cycle\nfrom dotenv import load_dotenv\nimport os\n\n\"\"\"Loads .env file\"\"\"\nload_dotenv()\nTOKEN = os.getenv('OROKIN_DISCORD_TOKEN')\n\n\"\"\"Setting up bot\"\"\"\norokin = commands.Bot(command_prefix='>')\nstatus = cycle(['Warframe', 'Discord Bot', 'Living my bot life'])\n\n\"\"\"Loading the Cogs on Startup\"\"\"\norokin.load_extension(f'cogs.warframe')\norokin.load_extension(f'cogs.warframe_calculators')\norokin.load_extension(f'cogs.warframe_search')\n\n@orokin.command()\n@commands.has_permissions(administrator=True)\nasync def orokin_load(ctx, extension):\n await ctx.message.delete()\n orokin.load_extension(f'cogs.{extension}')\n ctx.send(f'The extension {extension} was loaded!')\n\n@orokin_load.error\nasync def orokin_load_error(self, ctx, error):\n embed = discord.Embed(title='Syntax Error',\n colour=discord.Colour(0x9013fe),\n description='Did you mistype the extension name?')\n await ctx.send(embed=embed)\n\n@orokin.command()\n@commands.has_permissions(administrator=True)\nasync def orokin_unload(ctx, extension):\n await ctx.message.delete()\n orokin.unload_extension(f'cogs.{extension}')\n await ctx.send(f'The extension {extension} was unloaded!')\n\n@orokin_unload.error\nasync def orokin_unload_error(self, ctx, error):\n embed = discord.Embed(title='Syntax Error',\n colour=discord.Colour(0x9013fe),\n description='Did you mistype the extension name?')\n await ctx.send(embed=embed)\n\n\n@orokin.command()\n@commands.has_permissions(administrator=True)\nasync def orokin_reload(ctx, extension):\n await ctx.message.delete()\n orokin.unload_extension(f'cogs.{extension}')\n orokin.load_extension(f'cogs.{extension}')\n await ctx.send(f'The extension {extension} was reloaded!')\n\n@orokin_reload.error\nasync def orokin_reload_error(self, ctx, error):\n embed = discord.Embed(title='Syntax Error',\n colour=discord.Colour(0x9013fe),\n description='Did you mistype the extension name?')\n await ctx.send(embed=embed)\n\n\n@orokin.event\nasync def on_ready():\n \"\"\"When the bot Connects to discord\"\"\"\n print('Username: ' + orokin.user.name)\n print('ID: ' + str(orokin.user.id))\n change_status.start()\n print(f'{orokin.user} has connected to Discord!')\n\n\n@tasks.loop(seconds=60)\nasync def change_status():\n await orokin.change_presence(activity=discord.Game(next(status)))\n\n\n@orokin.command()\nasync def orokin_ping(ctx):\n await ctx.message.delete()\n await ctx.send(f'Pong! My latency is: {round(orokin.latency * 1000)}ms')\n\norokin.run(TOKEN)\n","sub_path":"src/bots/orokin_bot.py","file_name":"orokin_bot.py","file_ext":"py","file_size_in_byte":2691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395100387","text":"import FWCore.ParameterSet.Config as cms\n\n# -- needed for regional unpacking:\n# from L1TriggerConfig.L1GeometryProducers.l1CaloGeometry_cfi import *\n# from L1TriggerConfig.L1GeometryProducers.l1CaloGeomRecordSource_cff import *\n# es_source l1CaloGeomRecordSource = EmptyESSource {\n# string recordName = \"L1CaloGeometryRecord\"\n# vuint32 firstValid = { 1 }\n# bool iovIsRunNotTime = false\n# }\necalRegionalMuonsFEDs = cms.EDProducer(\"EcalListOfFEDSProducer\",\n Muon = cms.untracked.bool(True),\n MuonSource = cms.untracked.InputTag(\"l1extraParticles\"),\n MU_regionPhiMargin = cms.untracked.double(1.0),\n OutputLabel = cms.untracked.string(''),\n Jets = cms.untracked.bool(False),\n # untracked double MU_regionEtaMargin = 1.0\n # untracked double MU_regionPhiMargin = 1.0\n Ptmin_muon = cms.untracked.double(0.0),\n debug = cms.untracked.bool(False),\n EGamma = cms.untracked.bool(False),\n MU_regionEtaMargin = cms.untracked.double(1.0)\n)\n\n\n","sub_path":"RecoEgamma/EgammaHLTProducers/python/ecalRegionalMuonsFEDs_cfi.py","file_name":"ecalRegionalMuonsFEDs_cfi.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"123024929","text":"from PartNLP.models.validation.validator import Validator\nfrom PartNLP.models.helper.constants import SUPPORTED_PROCESSORS\nfrom PartNLP.models.helper.color import Color\n\n\nclass ProcessorsValidator(Validator):\n def __init__(self, config):\n super().__init__(config)\n \n def isvalid(self):\n return self.is_name_valid()\n\n def is_name_valid(self):\n self.prepare_input_value()\n # Check if processors is empty.\n if not self.config['processors']:\n return False, f'{Color.FAIL}Warning{Color.ENDC} No operator selected. List of supported operations:' \\\n f'{Color.HEADER}{SUPPORTED_PROCESSORS}{Color.ENDC}', self.config['processors']\n # Check if whether operators of the processor are supported or not.\n for p in self.config['processors']:\n if p not in SUPPORTED_PROCESSORS:\n return False, f'{Color.FAIL}{p}{Color.ENDC} Operator is not supported. ' \\\n f'List of supported operators : {Color.HEADER}{SUPPORTED_PROCESSORS}{Color.ENDC}', p\n return True, '', None\n \n def update_config_value(self, name, old_value, new_value):\n # Add new value to empty processors list\n if not self.config['processors']:\n self.config['processors'].append(new_value)\n # Assign new value to the old one\n else:\n idx = self.config['processors'].index(old_value)\n self.config['processors'][idx] = new_value\n\n def prepare_input_value(self):\n for p in self.config['processors']:\n p = p.upper()\n\n def get_dependencies(self):\n dependencies = self.config['processors']\n return dependencies\n","sub_path":"PartNLP/models/validation/processor_validator.py","file_name":"processor_validator.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"446515617","text":"\"\"\"\nReduce INT frames, carrying out following stages:\n- master bias subtraction\n- master flat field correction\n- 2D background model subtraction\n\"\"\"\n\nfrom photometry.int import Imager\nfrom photometry.utils import setupDirectories\nfrom photometry.reduction import (\n getImageList,\n getMasterBias,\n getMasterFlat,\n correctData,\n subtractBackground,\n )\nimport argparse as ap\nfrom astropy.io import fits\n\ntry:\n FileNotFoundError\nexcept NameError:\n FileNotFoundError = IOError\n\ndef argParse():\n \"\"\"\n Argument parser settings\n \n Parameters\n ----------\n None\n \n Returns\n -------\n args : array-like\n Array of command line arguments\n \n Raises\n ------\n None\n \"\"\"\n \n parser = ap.ArgumentParser()\n \n parser.add_argument('directory',\n help='directory containing raw images',\n type=str)\n \n parser.add_argument('bias_path',\n help='path to master bias',\n type=str)\n \n parser.add_argument('flat_path',\n help='path to master flat',\n type=str)\n \n parser.add_argument('mask_path',\n help='path to bad pixel mask',\n type=str)\n \n return parser.parse_args()\n\nif __name__ == \"__main__\":\n \n args = argParse()\n \n dirs = setupDirectories(args.directory)\n \n # generate master bias if needed\n try:\n master_bias = fits.open(args.bias_path)\n except FileNotFoundError:\n while True:\n check = input('Master bias not found. Generate one? [y/n] ')\n if check in ['y', 'yes', '']:\n print('Checking for bias frames...')\n master_bias = getMasterBias(args.directory)\n break\n elif check in ['n', 'no']:\n quit()\n else:\n print(\"Please respond with 'y' or 'n'...\")\n \n # generate master flat if needed\n try:\n master_flat = fits.open(args.flat_path)\n except FileNotFoundError:\n while True:\n check = input('Master flat not found. Generate one? [y/n] ')\n if check in ['y', 'yes', '']:\n print('Checking for flat field frames...')\n master_flat = getMasterFlat(args.directory)\n break\n elif check in ['n', 'no']:\n quit()\n else:\n print(\"Please respond with 'y' or 'n'...\")\n \n # load in bad pixel mask\n try:\n bp_mask = fits.open(args.mask_path)\n except FileNotFoundError:\n while True:\n check = input('No bad pixel mask found. Continue? [y/n] ')\n if check in ['y', 'yes', '']:\n print('Warning: Background to be calculated unmasked.')\n break\n elif check in ['n', 'no']:\n quit()\n else:\n print(\"Please respond with 'y' or 'n'...\")\n \n # obtain list of science frames\n print('Obtaining list of science images...')\n sci_filter = {'OBSTYPE' : Imager.sci_kwd,\n 'CCDXBIN' : Imager.binfactor,\n 'CCDSPEED': Imager.rspeed,\n 'WFFBAND' : Imager.band}\n sci_list = getImageList(args.directory, sci_filter)\n \n for s, sci_path in enumerate(sci_list):\n \n print('Calibrating frame', s+1, 'of', len(sci_list))\n \n sci_name = sci_path.split('/')[-1]\n \n # apply calibrations\n image = correctData(sci_path,\n master_bias,\n master_flat,\n out_path=dirs['calib']+sci_name)\n \n # subtract background model\n image = subtractBackground(dirs['calib']+sci_name,\n mask=bp_mask,\n out_path=dirs['bkg_sub']+sci_name)\n \n \n","sub_path":"int/reduceData.py","file_name":"reduceData.py","file_ext":"py","file_size_in_byte":3958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65057948","text":"import machine\nimport utime\nimport lbsend as lb\nimport SimpleClient as sc\nimport esp\n\n\ndef logger(lgstr):\n f = open('log.txt', 'a+')\n f.write(lgstr+\"\\n\")\n f.close()\n\n\nif machine.reset_cause() == machine.DEEPSLEEP_RESET:\n # logger(\"Deepsleep Reset Count\")\n sData = lb.readLB()\n utime.sleep(1)\n sc.sendData(sData)\n utime.sleep(1)\n rtc = machine.RTC()\n rtc.irq(trigger=rtc.ALARM0, wake=machine.DEEPSLEEP)\n # sleep for 10 min\n rtc.alarm(rtc.ALARM0, 1800000)\n machine.deepsleep()\n \nelse:\n # logger(\"Normal Reset\")\n print('normal rest')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"180947929","text":"import time\n\nfrom abc import ABCMeta, abstractmethod\nfrom attr import attrs, attrib, validators\nfrom onegov.core.crypto import random_token\nfrom onegov.core.utils import rchop\nfrom onegov.user import _, log, UserCollection\nfrom onegov.user.auth.clients import KerberosClient\nfrom onegov.user.auth.clients import LDAPClient\nfrom onegov.user.models.user import User\nfrom translationstring import TranslationString\nfrom typing import Dict\nfrom typing import Optional\nfrom webob import Response\nfrom webob.exc import HTTPClientError\n\n\nAUTHENTICATION_PROVIDERS = {}\n\n\ndef provider_by_name(providers, name):\n return next((p for p in providers if p.metadata.name == name), None)\n\n\nclass Conclusion(object):\n \"\"\" A final answer of :meth:`AuthenticationProvider`. \"\"\"\n\n\n@attrs(slots=True, frozen=True)\nclass Success(Conclusion):\n \"\"\" Indicates a sucessful authentication. \"\"\"\n\n user: User = attrib()\n note: TranslationString = attrib()\n\n def __bool__(self):\n return True\n\n\n@attrs(slots=True, frozen=True)\nclass Failure(Conclusion):\n \"\"\" Indicates a failed authentication. \"\"\"\n\n note: TranslationString = attrib()\n\n def __bool__(self):\n return False\n\n\n@attrs(slots=True, frozen=True)\nclass ProviderMetadata(object):\n \"\"\" Holds provider-specific metadata. \"\"\"\n\n name: str = attrib()\n title: str = attrib()\n\n\n@attrs()\nclass AuthenticationProvider(metaclass=ABCMeta):\n \"\"\" Base class and registry for third party authentication providers. \"\"\"\n\n # stores the 'to' attribute for the integration app\n # :class:`~onegov.user.integration.UserApp`.\n to: Optional[str] = attrib(init=False)\n\n @property\n def name(self):\n \"\"\" Needs to be available for the path in the integration app. \"\"\"\n return self.metadata.name\n\n def __init_subclass__(cls, **kwargs):\n metadata = kwargs.pop('metadata', None)\n\n if metadata:\n global AUTHENTICATION_PROVIDERS\n assert metadata.name not in AUTHENTICATION_PROVIDERS\n\n # reserved names\n assert metadata.name not in ('auto', )\n\n cls.metadata = metadata\n AUTHENTICATION_PROVIDERS[metadata.name] = cls\n\n else:\n assert cls.kind in ('separate', 'integrated')\n\n super().__init_subclass__(**kwargs)\n\n @classmethod\n def configure(cls, **kwargs):\n \"\"\" This function gets called with the per-provider configuration\n defined in onegov.yml. Authentication providers may optionally\n access these values.\n\n The return value is either a provider instance, or none if the\n provider is not available.\n\n \"\"\"\n\n return cls()\n\n\n@attrs()\nclass SeparateAuthenticationProvider(AuthenticationProvider):\n \"\"\" Base class for separate authentication providers.\n\n Seperate providers render a button which the user can click to do a\n completely separate request/response handling that eventually should lead\n to an authenticated user.\n\n \"\"\"\n\n kind = 'separate'\n\n @abstractmethod\n def authenticate_request(self, request):\n \"\"\" Authenticates the given request in one or many steps.\n\n Providers are expected to return one of the following values:\n\n * A conclusion (if the authentication was either successful or failed)\n * None (if the authentication failed)\n * A webob response (to perform handshakes)\n\n This function is called whenever the authentication is initiated by\n the user. If the provider returns a webob response, it is returned\n as is by the calling view.\n\n Therefore, `authenticate_request` must take care to return responses\n in a way that eventually end up fulfilling the authentication. At the\n very least, providers should ensure that all parameters of the original\n request are kept when asking external services to call back.\n\n \"\"\"\n\n @abstractmethod\n def button_text(self, request):\n \"\"\" Returns the translatable button text for the given request.\n\n It is okay to return a static text, if the button remains the same\n for all requests.\n\n The translatable text is parsed as markdown, to add weight to\n the central element of the text. For example::\n\n Login with **Windows**\n\n \"\"\"\n\n\n@attrs()\nclass IntegratedAuthenticationProvider(AuthenticationProvider):\n \"\"\" Base class for integrated authentication providers.\n\n Integrated providers use the username/password entered in the normal\n login form and perform authentication that way (with fallback to the\n default login mechanism).\n\n \"\"\"\n\n kind = 'integrated'\n\n @abstractmethod\n def hint(self, request):\n \"\"\" Returns the translatable hint shown above the login mask for\n the integrated provider.\n\n It is okay to return a static text, if the hint remains the same\n for all requests.\n\n The translatable text is parsed as markdown.\n\n \"\"\"\n\n @abstractmethod\n def authenticate_user(self, request, username, password):\n \"\"\" Authenticates the given username/password in a single step.\n\n The function is expected to return an existing user record or None.\n\n \"\"\"\n\n\ndef spawn_ldap_client(**cfg):\n \"\"\" Takes an LDAP configuration as found in the YAML and spawns an LDAP\n client that is connected. If the connection fails, an exception is raised.\n\n \"\"\"\n client = LDAPClient(\n url=cfg.get('ldap_url', None),\n username=cfg.get('ldap_username', None),\n password=cfg.get('ldap_password', None))\n\n try:\n client.try_configuration()\n except Exception as e:\n raise ValueError(f\"LDAP config error: {e}\")\n\n return client\n\n\ndef ensure_user(source, source_id, session, username, role, force_role=True):\n \"\"\" Creates the given user if it doesn't already exist. Ensures the\n role is set to the given role in all cases.\n\n \"\"\"\n\n users = UserCollection(session)\n\n # find the user by source and\n if source and source_id:\n user = users.by_source_id(source, source_id)\n else:\n user = None\n\n # fall back to the username\n user = user or users.by_username(username)\n\n if not user:\n user = users.add(\n username=username,\n password=random_token(),\n role=role\n )\n\n # update the username\n user.username = username\n\n # update the role even if the user exists already\n if force_role:\n user.role = role\n\n # the source of the user is always the last provider that was used\n user.source = source\n user.source_id = source_id\n\n return user\n\n\n@attrs(auto_attribs=True)\nclass RolesMapping(object):\n \"\"\" Takes a role mapping and provides access to it.\n\n A role mapping maps a onegov-cloud role to an LDAP role. For example:\n\n admins -> ACC_OneGovCloud_User\n\n The mapping comes in multiple\n levels. For example:\n\n * \"__default__\" Fallback for all applications\n * \"onegov_org\" Namespace specific config\n * \"onegov_org/govikon\" Application specific config\n\n Each level contains a group name for admins, editors and members.\n See onegov.yml.example for an illustrated example.\n\n \"\"\"\n\n roles: Dict[str, Dict[str, str]]\n\n def app_specific(self, app):\n if app.application_id in self.roles:\n return self.roles[app.application_id]\n\n if app.namespace in self.roles:\n return self.roles[app.namespace]\n\n return self.roles.get('__default__')\n\n def match(self, roles, groups):\n \"\"\" Takes a role mapping (the fallback, namespace, or app specific one)\n and matches it against the given LDAP groups.\n\n Returns the matched group or None.\n\n \"\"\"\n groups = {g.lower() for g in groups}\n\n if roles['admins'].lower() in groups:\n return 'admin'\n\n if roles['editors'].lower() in groups:\n return 'editor'\n\n if roles['members'].lower() in groups:\n return 'member'\n\n return None\n\n\n@attrs(auto_attribs=True)\nclass LDAPAttributes(object):\n \"\"\" Holds the LDAP server-specific attributes. \"\"\"\n\n # the name of the Distinguished Name (DN) attribute\n name: str\n\n # the name of the e-mails attribute (returns a list of emails)\n mails: str\n\n # the name of the group membership attribute (returns a list of groups)\n groups: str\n\n # the name of the password attribute\n password: str\n\n # the name of the uid attribute\n uid: str\n\n @classmethod\n def from_cfg(cls, cfg):\n return cls(\n name=cfg.get('name_attribute', 'cn'),\n mails=cfg.get('mails_attribute', 'mail'),\n groups=cfg.get('groups_attribute', 'memberOf'),\n password=cfg.get('password_attribute', 'userPassword'),\n uid=cfg.get('uid_attribute', 'uid'),\n )\n\n\n@attrs(auto_attribs=True)\nclass LDAPProvider(\n IntegratedAuthenticationProvider, metadata=ProviderMetadata(\n name='ldap', title=_(\"LDAP\"))):\n\n \"\"\" Generic LDAP Provider that includes authentication via LDAP. \"\"\"\n\n # The LDAP client to use\n ldap: LDAPClient = attrib()\n\n # The authentication method to use\n #\n # * bind => The authentication is made by rebinding the connection\n # to the LDAP server. This is the more typical approach, but\n # also slower. It requires that users that can authenticate\n # may also create a connection to the LDAP server.\n #\n # (not yet implemented)\n #\n # * compare => Uses the existing LDAP client connection and checks the\n # given password using the LDAP COMPARE operation. Since\n # this is the first approach we implemented, it is the\n # default.\n #\n auth_method: str = attrib(\n validator=validators.in_(\n ('bind', 'compare')\n )\n )\n\n # The LDAP attributes configuration\n attributes: LDAPAttributes = attrib()\n\n # Roles configuration\n roles: RolesMapping = attrib()\n\n # Custom hint to be shown in the login view\n custom_hint: str = ''\n\n @classmethod\n def configure(cls, **cfg):\n\n # Providers have to decide themselves if they spawn or not\n if not cfg:\n return None\n\n # LDAP configuration\n ldap = spawn_ldap_client(**cfg)\n\n return cls(\n ldap=ldap,\n auth_method=cfg.get('auth_method', 'compare'),\n attributes=LDAPAttributes.from_cfg(cfg),\n custom_hint=cfg.get('hint', None),\n roles=RolesMapping(cfg.get('roles', {\n '__default__': {\n 'admins': 'admins',\n 'editors': 'editors',\n 'members': 'members'\n }\n })),\n )\n\n def hint(self, request):\n return self.custom_hint\n\n def authenticate_user(self, request, username, password):\n if self.auth_method == 'compare':\n return self.authenticate_using_compare(request, username, password)\n\n raise NotImplementedError()\n\n def authenticate_using_compare(self, request, username, password):\n\n # since this is turned into an LDAP query, we want to make sure this\n # is not used to make broad queries\n assert '*' not in username\n assert '&' not in username\n assert '?' not in username\n\n # onegov-cloud uses the e-mail as username, therefore we need to query\n # LDAP to get the designated name (actual LDAP username)\n query = f\"({self.attributes.mails}={username})\"\n attrs = (\n self.attributes.groups,\n self.attributes.mails,\n self.attributes.uid\n )\n\n # we query the groups at the same time, so if we have a password\n # match we are all ready to go\n entries = self.ldap.search(query, attrs)\n\n # as a fall back, we try to query the uid\n if not entries:\n query = f\"({self.attributes.uid}={username})\"\n entries = self.ldap.search(query, attrs)\n\n # if successful we need the e-mail address\n for name, attrs in (entries or {}).items():\n username = attrs[self.attributes.mails][0]\n\n break\n\n # then, we give up\n if not entries:\n log.warning(f\"No LDAP user with uid ore-mail {username}\")\n return\n\n if len(entries) > 1:\n log.warning(f\"Found more than one user for e-mail {username}\")\n log.warning(f\"All but the first user will be ignored\")\n\n for name, attrs in entries.items():\n groups = attrs[self.attributes.groups]\n uid = attrs[self.attributes.uid][0]\n\n # do not iterate over all entries, or this becomes a very\n # handy way to check a single password against multiple\n # (or possibly all) entries!\n break\n\n # We might talk to a very fast LDAP server which an attacker could use\n # to brute force passwords. We already throttle this on the server, but\n # additional measures never hurt.\n time.sleep(0.25)\n\n if not self.ldap.compare(name, self.attributes.password, password):\n log.warning(f\"Wrong password for {username} ({name})\")\n return\n\n # finally check if we have a matching role\n role = self.roles.match(self.roles.app_specific(request.app), groups)\n\n if not role:\n log.warning(f\"Wrong role for {username} ({name})\")\n return\n\n return ensure_user(\n source=self.name,\n source_id=uid,\n session=request.session,\n username=username,\n role=role)\n\n\n@attrs(auto_attribs=True)\nclass LDAPKerberosProvider(\n SeparateAuthenticationProvider, metadata=ProviderMetadata(\n name='ldap_kerberos', title=_(\"LDAP Kerberos\"))):\n\n \"\"\" Combines LDAP with Kerberos. LDAP handles authorisation, Kerberos\n handles authentication.\n\n \"\"\"\n\n # The LDAP client to use\n ldap: LDAPClient = attrib()\n\n # The Kerberos client to use\n kerberos: KerberosClient = attrib()\n\n # LDAP attributes configuration\n attributes: LDAPAttributes = attrib()\n\n # Roles configuration\n roles: RolesMapping = attrib()\n\n # Optional suffix that is removed from the Kerberos username if present\n suffix: Optional[str] = None\n\n @classmethod\n def configure(cls, **cfg):\n\n # Providers have to decide themselves if they spawn or not\n if not cfg:\n return None\n\n # LDAP configuration\n ldap = spawn_ldap_client(**cfg)\n\n # Kerberos configuration\n kerberos = KerberosClient(\n keytab=cfg.get('kerberos_keytab', None),\n hostname=cfg.get('kerberos_hostname', None),\n service=cfg.get('kerberos_service', None))\n\n return cls(\n ldap=ldap,\n kerberos=kerberos,\n attributes=LDAPAttributes.from_cfg(cfg),\n suffix=cfg.get('suffix', None),\n roles=RolesMapping(cfg.get('roles', {\n '__default__': {\n 'admins': 'admins',\n 'editors': 'editors',\n 'members': 'members'\n }\n }))\n )\n\n def button_text(self, request):\n \"\"\" Returns the request tailored to each OS (users won't understand\n LDAP/Kerberos, but for them it's basically their local OS login).\n\n \"\"\"\n user_os = request.agent['os']['family']\n\n if user_os == \"Other\":\n return _(\"Login with operating system\")\n\n return _(\"Login with **${operating_system}**\", mapping={\n 'operating_system': user_os\n })\n\n def authenticate_request(self, request):\n response = self.kerberos.authenticated_username(request)\n\n # handshake\n if isinstance(response, Response):\n return response\n\n # authentication failed\n if response is None or isinstance(response, HTTPClientError):\n return Failure(_(\"Authentication failed\"))\n\n # we got authentication, do we also have authorization?\n name = response\n user = self.request_authorization(request=request, username=name)\n\n if user is None:\n return Failure(_(\"User «${user}» is not authorized\", mapping={\n 'user': name\n }))\n\n return Success(user, _(\"Successfully logged in as «${user}»\", mapping={\n 'user': user.username\n }))\n\n def request_authorization(self, request, username):\n\n if self.suffix:\n username = rchop(username, self.suffix)\n\n entries = self.ldap.search(\n query=f'({self.attributes.name}={username})',\n attributes=[self.attributes.mails, self.attributes.groups])\n\n if not entries:\n log.warning(f\"No LDAP entries for {username}\")\n return None\n\n if len(entries) > 1:\n tip = ', '.join(entries.keys())\n log.warning(f\"Multiple LDAP entries for {username}: {tip}\")\n return None\n\n attributes = next(v for v in entries.values())\n\n mails = attributes[self.attributes.mails]\n if not mails:\n log.warning(f\"No e-mail addresses for {username}\")\n return None\n\n groups = attributes[self.attributes.groups]\n if not groups:\n log.warning(f\"No groups for {username}\")\n return None\n\n # get the common name of the groups\n groups = {g.lower().split(',')[0].split('cn=')[-1] for g in groups}\n\n # get the roles\n roles = self.roles.app_specific(request.app)\n\n if not roles:\n log.warning(f\"No role map for {request.app.application_id}\")\n return None\n\n role = self.roles.match(roles, groups)\n if not role:\n log.warning(f\"No authorized group for {username}\")\n return None\n\n return ensure_user(\n source=self.name,\n source_id=username,\n session=request.session,\n username=mails[0],\n role=role)\n","sub_path":"src/onegov/user/auth/provider.py","file_name":"provider.py","file_ext":"py","file_size_in_byte":18196,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66671294","text":"\"\"\"\n\nRuntime: 96 ms, faster than 63.38% of Python3 online submissions for Minimum Path Sum.\nMemory Usage: 15.5 MB, less than 27.56% of Python3 online submissions for Minimum Path Sum.\n\n\"\"\"\n\ndef minPathSum(self, grid: List[List[int]]) -> int:\n if not grid:\n return 0\n\n m = len(grid)\n n = len(grid[0])\n f = [[0 for j in range(0, n)] for i in range(0, m)]\n\n f[0][0] = grid[0][0]\n for i in range(1, m):\n f[i][0] = f[i-1][0] + grid[i][0]\n for j in range(1, n):\n f[0][j] = f[0][j-1] + grid[0][j]\n\n for i in range(1, m):\n for j in range(1, n):\n f[i][j] = min(f[i-1][j], f[i][j-1]) + grid[i][j]\n\n return f[m-1][n-1]\n","sub_path":"dynamic_programming/0064. Min Path Sum.py","file_name":"0064. Min Path Sum.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357978280","text":"#-*- coding: UTF-8 -*-\n# author TELILS\n\nimport cv2,os\nfrom PIL import Image \n\n# 学生头像的获取程序\n\nname = 'stu'\npic_obtain_num = 10000 # 获取照片的数量\nout_path = r'D:\\FaceData\\data\\otherFace'\n\ndef get_face(path=None):\n cap = cv2.VideoCapture(path)\n # cap = cv2.VideoCapture(0)\n classfier = cv2.CascadeClassifier(r'C:\\Users\\ASUS\\Anaconda3\\envs\\tensorflow\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')\n cap_check = cap.isOpened()\n frame_count = 0\n out_count = 0\n while cap_check:\n frame_count += 1\n if out_count > pic_obtain_num:\n break\n cap_check, frame = cap.read() # 读取一帧\n \n \n params = []\n params.append(2)\n # 将图片转换成灰度图片\n grey_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) \n # 获取脸部位置\n face_rects = classfier.detectMultiScale(grey_img, scaleFactor = 1.3, minNeighbors = 5)\n if len(face_rects) > 0:\n for face in face_rects:\n x, y, w, h = face\n image = frame[y - 10: y + h + 10, x - 10: x + w + 10]\n \n\n # 将预处理的图片存到目标路径下\n if not os.path.exists(out_path):\n os.makedirs(out_path)\n cv2.imwrite(out_path+'\\\\'+name+'%d.jpg' % out_count,image)\n out_count += 1\n print('成功获取学生'+name+'的第%d个面部图片'%out_count)\n cap.release()\n cv2.destroyAllWindows()\n print('总帧数:', frame_count)\n print('提取脸部:',out_count)\n\nif __name__ == \"__main__\":\n get_face(0) # 0表示本机摄像头\n\n\n","sub_path":"FaceIdentify2.0/stu_picObtain.py","file_name":"stu_picObtain.py","file_ext":"py","file_size_in_byte":1687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617327673","text":"from database import CursorFromConnectionFromPool\n\nclass Participation:\n def __init__(self, event_id, joined_date, participant_id, id):\n self.event_id = event_id,\n self.joined_date = joined_date,\n self.participant_id = participant_id,\n self.id = id\n\n def __repr__(self):\n return \" +#+ +:+ +#+ #\n# +#+#+#+#+#+ +#+ #\n# Created: 2015/07/27 04:41:43 by rbernand #+# #+# #\n# Updated: 2015/07/27 06:30:20 by rbernand ### ########.fr #\n# #\n# **************************************************************************** #\n\nimport tkinter\nfrom tkinter.filedialog import askdirectory\nfrom collections import OrderedDict\nfrom PIL import Image, ImageTk\n\nclass FrameMovieData:\n def __init__(self, parent):\n self._frame = tkinter.Frame(parent)\n self._frame.pack(side=tkinter.LEFT, padx=100, pady=10)\n self._lbldict = OrderedDict()\n self._poster = ImageTk.PhotoImage(Image.open(\"wait.jpg\"))\n self._lbldict[\"Poster\"] = tkinter.Canvas(self._frame, height=500, width=500)\n self._lbldict[\"Poster\"].create_image(0, 0, image=self._poster, anchor=tkinter.NW)\n self._lbldict[\"Name\"] = tkinter.Label(self._frame, text=\"Name:\")\n self._lbldict[\"Director\"] = tkinter.Label(self._frame, text=\"Director:\")\n for lbl in self._lbldict.values():\n lbl.pack()\n\n def edit(self, data):\n img = Image.open(data.getPosterPath())\n self._poster = ImageTk.PhotoImage(img)\n self._lbldict[\"Poster\"].create_image(0, 0, image=self._poster, anchor=tkinter.NW)\n self._lbldict[\"Name\"][\"text\"] = \"Name: %s\" % data[\"Title\"]\n self._lbldict[\"Director\"][\"text\"] = \"Director: %s\" % data[\"Director\"]\n\nclass UI:\n def __init__(self, database):\n self._window = tkinter.Tk()\n self._leftFrame = tkinter.Frame(self._window)\n self._leftFrame.pack(side=tkinter.LEFT, padx=0, pady=0, fill=tkinter.BOTH)\n self._rightFrame = FrameMovieData(self._window)\n self._database = database\n self._add_menu()\n\n def mainloop(self):\n self._window.mainloop()\n\n def save_db(self):\n self._database.save()\n\n def _add_menu(self):\n menubar = tkinter.Menu(self._window)\n menu_file = tkinter.Menu(menubar, tearoff=0)\n menu_file.add_command(label=\"Load\", command=self._database.load)\n menu_file.add_command(label=\"Save\", command=self._database.save)\n menu_file.add_command(label=\"Quit\", command=self._window.quit)\n menubar.add_cascade(label=\"File\", menu=menu_file)\n menu_edit = tkinter.Menu(menubar, tearoff=0)\n menu_edit.add_command(label=\"Add directory\", command=self._ask_dir)\n menu_edit.add_command(label=\"Update Movies\",\\\n command=self._database.movies.update)\n menubar.add_cascade(label=\"Edit\", menu=menu_edit)\n self._window.config(menu=menubar)\n\n def add_list(self, items):\n def _print_data(event):\n w = event.widget\n try:\n index = int(w.curselection()[0])\n value = w.get(index)\n self._rightFrame.edit(self._database.movies.getByName(value))\n except IndexError:\n print(\"No element\")\n listbox = tkinter.Listbox(self._leftFrame)\n for item in items:\n listbox.insert(item.getId(), str(item))\n listbox.pack(anchor=\"ne\", fill=tkinter.BOTH)\n listbox.bind('<>', _print_data)\n\n def _ask_dir(self):\n filepath = askdirectory()\n print(filepath)\n self._database.movies.addDir(filepath)\n self.add_list(self._database.movies)\n","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"453490486","text":"import unittest\nimport random\nimport pprint\n\nfrom maxheap import MaxHeap\n\n\nclass CompareTasksError(Exception): pass\n\nclass Task:\n \"\"\"\n Arbitrary class for testing purposes.\n \"\"\"\n def __init__(self, name):\n self.name = name\n\n def __repr__(self):\n return \"Task({0!r})\".format(self.name)\n\n def _exception_operator(self, other):\n raise CompareTasksError(\"Comparing Task objects is not required in this exercise.\")\n __lt__ = _exception_operator\n __le__ = _exception_operator #Wow\n __eq__ = _exception_operator\n __ne__ = _exception_operator\n __ge__ = _exception_operator\n __gt__ = _exception_operator\n\n\nTASK_NAMES = (\n \"Wash dishes\",\n \"Eat food\",\n \"Go to Maari\",\n \"Do math assignments\",\n \"Play Chopin's Etude Op. 25, No. 9 in G-flat major\",\n \"Watch silly reality show\",\n \"Apply for work\",\n \"Sleep\",\n \"Talk to your plants\",\n \"Go jogging while listening to some brutal black metal\",\n \"Clean room\",\n \"Dance\",\n \"Tell your roommate that life is pretty nice\",\n \"Do something crazy\"\n)\n\ndef generate_tasks_with_priorities(task_names=TASK_NAMES):\n \"\"\"Returns a list with random length containing tuples (p, task).\"\"\"\n task_count = random.randint(3, len(task_names))\n random_tasks = random.sample(task_names, task_count)\n #Take p from curr iteration count provided by enumerate\n return [( task_count - (p+1), Task(task_name)) for p, task_name in enumerate(random_tasks)]\n\n\ndef heap_array_to_string(heap_array, msg=None):\n if msg is None:\n msg = \"Starting at index 0, the heap array in your solution looks like this:\"\n return msg + '\\n' + pprint.pformat(heap_array)\n\n\n#TODO: add test for initializing with sequence, take notes from grader_tests\n\nclass TestMaxHeap(unittest.TestCase):\n\n def setUp(self):\n self.heap = MaxHeap()\n\n def test_clear(self):\n \"\"\"Calling clear for a heap sets its array to an empty list and size to zero. (0p)\"\"\"\n self.heap.array = [(1, 1)]\n self.heap.size = 1\n\n self.heap.clear()\n\n self.assertListEqual(\n [],\n self.heap.array,\n \"heap.clear() should set heap.array to an empty list.\"\n )\n self.assertEqual(\n 0,\n self.heap.size,\n \"heap.clear() should set heap.size to 0.\"\n )\n\n\n def test_empty_heap_size(self):\n \"\"\"The size of an empty heap is zero. (0p)\"\"\"\n self.assertEqual(\n 0,\n self.heap.size,\n \"The size of an empty heap should be zero.\"\n )\n\n def test_empty_heap_is__empty(self):\n \"\"\"is_empty returns True for an empty heap. (0p)\"\"\"\n self.assertTrue(\n self.heap.is_empty(),\n \"Calling is_empty should return True for an empty heap instance.\"\n )\n\n def test_higher_priority_high_low(self):\n \"\"\"_higher_priority returns True when comparing an element with a higher priority to an element with a lower priority. (1p)\"\"\"\n self.heap.array = [(2, 'important'), (1, 'not important')]\n self.assertTrue(\n self.heap._higher_priority(0, 1),\n \"_higher_priority priority should return True when comparing {0} to {1}\"\n .format(self.heap.array[0], self.heap.array[1])\n )\n\n def test_higher_priority_low_high(self):\n \"\"\"_higher_priority returns False when comparing an element with a lower priority to an element with a higher priority. (1p)\"\"\"\n self.heap.array = [(2, 'important'), (1, 'not important')]\n self.assertFalse(\n self.heap._higher_priority(1, 0),\n \"_higher_priority priority should return False when comparing {0} to {1}\"\n .format(self.heap.array[1], self.heap.array[0])\n )\n\n\n def test_size_after_insert(self):\n \"\"\"Inserting values into the heap increments the size counter. (1p)\"\"\"\n inserted_tasks = generate_tasks_with_priorities()\n for pair in inserted_tasks:\n self.heap.insert(pair)\n\n inserted_count = len(inserted_tasks)\n current_size = self.heap.size\n self.assertEqual(\n inserted_count,\n current_size,\n \"After inserting {0} pairs, the size of the heap should be {0}, not {1}\"\n .format(inserted_count, current_size) + '\\n' +\n heap_array_to_string(self.heap.array)\n )\n\n\n def test_empty_heap_top(self):\n \"\"\"Calling top on an empty heap returns None. (1p)\"\"\"\n self.assertIsNone(\n self.heap.top(),\n \"Calling heap.top() should return None for an empty heap instance.\"\n )\n\n def test_empty_heap_pop(self):\n \"\"\"Calling pop on an empty heap raises an exception. (1p)\"\"\"\n msg = \"Calling heap.pop() should raise a RuntimeError for an empty heap instance.\"\n with self.assertRaises(RuntimeError, msg=msg):\n self.heap.pop()\n\n\n def test_top_after_insert(self):\n \"\"\"Calling top always returns the object with the greatest priority value. (1p)\"\"\"\n\n inserted_tasks = generate_tasks_with_priorities()\n\n shuffled = list(inserted_tasks)\n random.shuffle(shuffled)\n\n for pair in shuffled:\n self.heap.insert(pair)\n\n expected_value = inserted_tasks[0][1]\n returned_value = self.heap.top()\n self.assertIs(\n returned_value,\n expected_value,\n \"Calling top should have returned {0}, not {1}.\"\n .format(expected_value, returned_value) + '\\n' +\n heap_array_to_string(self.heap.array)\n )\n\n\n def test_pop_after_insert(self):\n \"\"\"Calling pop always returns the object with the greatest priority value and removes it from the heap. (1p)\"\"\"\n\n inserted_tasks = generate_tasks_with_priorities()\n\n shuffled = list(inserted_tasks)\n random.shuffle(shuffled)\n\n for pair in shuffled:\n self.heap.insert(pair)\n\n for i, pair in enumerate(inserted_tasks):\n assertmsg = \"Before calling pop, the heap array in your solution looked like this:\"\n heap_array_before_pop = self.heap.array[:]\n\n popped_value = self.heap.pop()\n expected_value = pair[1]\n self.assertIs(\n popped_value,\n expected_value,\n \"Calling pop should have returned {0}, not {1}.\"\n .format(expected_value, popped_value) + '\\n' +\n heap_array_to_string(heap_array_before_pop, assertmsg)\n )\n\n removed_count = i+1\n self.assertEqual(\n len(self.heap.array),\n len(inserted_tasks) - removed_count,\n \"Calling pop should remove the object with the greatest priority value from the heap array.\" +\n '\\n' +\n heap_array_to_string(heap_array_before_pop, assertmsg)\n )\n\n\nif __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","sub_path":"maxheap/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":6963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"596327044","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Area',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('nombre', models.CharField(max_length=50)),\n ],\n options={\n 'verbose_name_plural': 'Áreas',\n 'verbose_name': 'Área',\n },\n ),\n migrations.CreateModel(\n name='LineaTelefonica',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('numero', models.PositiveIntegerField()),\n ('descripcion', models.CharField(max_length=50, blank=True)),\n ('area', models.ForeignKey(to='app_facturacion.Area')),\n ],\n options={\n 'verbose_name_plural': 'Líneas telefónicas',\n 'verbose_name': 'Línea telefónica',\n },\n ),\n migrations.CreateModel(\n name='PlanLinea',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('cantidad_total', models.PositiveIntegerField()),\n ('cantidad_cubierta', models.PositiveIntegerField()),\n ('linea', models.ForeignKey(to='app_facturacion.LineaTelefonica')),\n ],\n options={\n 'verbose_name_plural': 'Planes de línea',\n 'verbose_name': 'Plan de línea',\n },\n ),\n migrations.CreateModel(\n name='TipoConcepto',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('nombre', models.CharField(max_length=50)),\n ('tarifa', models.DecimalField(decimal_places=3, max_digits=7)),\n ],\n options={\n 'verbose_name_plural': 'Tipos de concepto',\n 'verbose_name': 'Tipo de concepto',\n },\n ),\n migrations.CreateModel(\n name='TipoConceptoRegex',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('regex', models.CharField(max_length=128)),\n ('tipo_concepto', models.ForeignKey(to='app_facturacion.TipoConcepto')),\n ],\n options={\n 'verbose_name_plural': 'Expresiones regulares para tipo de concepto',\n 'verbose_name': 'Expresión regular para tipo de concepto',\n },\n ),\n migrations.CreateModel(\n name='Usuario',\n fields=[\n ('id', models.AutoField(serialize=False, verbose_name='ID', primary_key=True, auto_created=True)),\n ('nombre', models.CharField(max_length=50)),\n ('apellido', models.CharField(max_length=50)),\n ],\n options={\n 'verbose_name_plural': 'Usuarios',\n 'verbose_name': 'Usuario',\n },\n ),\n migrations.AddField(\n model_name='planlinea',\n name='tipo_concepto',\n field=models.ForeignKey(to='app_facturacion.TipoConcepto'),\n ),\n migrations.AddField(\n model_name='lineatelefonica',\n name='usuario',\n field=models.ForeignKey(to='app_facturacion.Usuario'),\n ),\n ]\n","sub_path":"app_facturacion/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150423571","text":"from dtest import Tester, debug\nfrom tools import since\n\nfrom os.path import getsize\nimport time\nimport subprocess\nimport tempfile\n\nclass TestSSTableSplit(Tester):\n\n def split_test(self):\n \"\"\"\n Check that after running compaction, sstablessplit can succesfully split\n The resultant sstable. Check that split is reversable and that data is readable\n after carrying out these operations.\n \"\"\"\n cluster = self.cluster\n cluster.populate(1).start(wait_for_binary_proto=True)\n node = cluster.nodelist()[0]\n version = cluster.version()\n\n debug(\"Run stress to insert data\")\n if version < \"2.1\":\n node.stress( ['-o', 'insert'] )\n else:\n node.stress( ['write', 'n=1000000', '-rate', 'threads=50'] )\n\n self._do_compaction(node)\n self._do_split(node, version)\n self._do_compaction(node)\n self._do_split(node, version)\n\n debug(\"Run stress to ensure data is readable\")\n if version < \"2.1\":\n node.stress( ['-o', 'read'] )\n else:\n node.stress( ['read', 'n=1000000', '-rate', 'threads=25'] )\n\n def _do_compaction(self, node):\n debug(\"Compact sstables.\")\n node.flush()\n node.compact()\n node.flush()\n keyspace = 'keyspace1' if self.cluster.version() >= '2.1' else 'Keyspace1'\n sstables = node.get_sstables(keyspace, '')\n debug(\"Number of sstables after compaction: %s\" % len(sstables))\n\n def _do_split(self, node, version):\n debug(\"Run sstablesplit\")\n time.sleep(5.0)\n node.stop()\n keyspace = 'keyspace1' if self.cluster.version() >= '2.1' else 'Keyspace1'\n origsstable = node.get_sstables(keyspace, '')\n debug(\"Original sstable before split: %s\" % origsstable)\n node.run_sstablesplit( keyspace=keyspace )\n sstables = node.get_sstables(keyspace, '')\n debug(\"Number of sstables after split: %s\" % len(sstables))\n if version < \"2.1\":\n assert len(sstables) == 6, \"Incorrect number of sstables after running sstablesplit.\"\n assert max( [ getsize( sstable ) for sstable in sstables ] ) <= 52428960, \"Max sstables size should be 52428960.\"\n else:\n assert len(sstables) == 7, \"Incorrect number of sstables after running sstablesplit.\"\n sstables.remove(origsstable[0]) # newer sstablesplit does not remove the original sstable after split\n assert max( [ getsize( sstable ) for sstable in sstables ] ) <= 52428980, \"Max sstables size should be 52428980.\"\n node.start()\n\n @since(\"2.1\")\n def single_file_split_test(self):\n \"\"\"\n Covers CASSANDRA-8623\n\n Check that sstablesplit doesn't crash when splitting a single sstable at the time.\n \"\"\"\n cluster = self.cluster\n cluster.populate(1).start(wait_for_binary_proto=True)\n node = cluster.nodelist()[0]\n version = cluster.version()\n\n debug(\"Run stress to insert data\")\n node.stress(['write', 'n=2000000', '-rate', 'threads=50',\n '-schema', 'compaction(strategy=LeveledCompactionStrategy, sstable_size_in_mb=10)'])\n self._do_compaction(node)\n node.stop()\n with tempfile.TemporaryFile(mode='w+') as tmpfile:\n node.run_sstablesplit(keyspace='keyspace1', size=2, no_snapshot=True,\n stdout=tmpfile, stderr=subprocess.STDOUT)\n tmpfile.seek(0)\n output = tmpfile.read()\n\n debug(output)\n failure = output.find(\"java.lang.AssertionError: Data component is missing\")\n self.assertEqual(failure, -1, \"Error during sstablesplit\")\n","sub_path":"sstablesplit_test.py","file_name":"sstablesplit_test.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"136751815","text":"class Solution(object):\n def combinationSum3(self, k, n):\n \"\"\"\n :type k: int\n :type n: int\n :rtype: List[List[int]]\n \"\"\"\n n_list = list(range(1, 10))\n \n sol = []\n \n def _combinationSum3(curr_sol, curr_sum, curr_k, n_list, n, k, sol):\n if curr_sum == n and curr_k == k:\n sol.append(curr_sol)\n elif curr_sum < n and curr_k < k:\n for i in range(len(n_list)):\n _combinationSum3(curr_sol+[n_list[i]], curr_sum+n_list[i], curr_k+1, n_list[i+1:], n, k, sol)\n \n _combinationSum3([], 0, 0, n_list, n, k, sol)\n return sol\n \n ","sub_path":"216-Combination-Sum-III/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256735240","text":"from PyQt5.QtWidgets import QGraphicsItem\n\nfrom game_logic.snake import Snake\nfrom .player import Player\nfrom PyQt5.QtGui import QPainter, QColor, QBrush\nfrom PyQt5.QtCore import Qt\nfrom multiprocessing import Process, Pipe\nfrom game_logic.GameConfig import *\nimport random\nimport time\n\n\nclass GameApplication(Process):\n def __init__(self, pipe):\n super().__init__(target=self.event_communication, args=[pipe])\n self.pipe = pipe\n config = GameConfig()\n self.current_player = 0\n self.end_game = False\n self.players = []\n self.food_position = {}\n self.special_food_position = {'x': 0, 'y': 0}\n self.steps = []\n self.steps_counter = 0\n self.special_food = False\n self.number_of_players = 0\n self.number_of_snakes_per_player = 0\n self.timer = 0\n\n self.colors = [QColor(200, 0, 0), QColor(0, 200, 0), QColor(0, 0, 200), QColor(128, 0, 128)]\n\n # funkcija koja se pozove kad se startuje proces, osluskuje dogadjaje i handluje greske\n def event_communication(self, pipe: Pipe):\n movement_keys = [Qt.Key_A, Qt.Key_D, Qt.Key_W, Qt.Key_S]\n\n while True:\n\n try:\n receive = pipe.recv()\n\n if receive['event_type'] == 'start_game':\n config = receive['data']\n self.start_game(config)\n elif receive['event_type'] == 'key_pressed':\n if receive['data'] in movement_keys:\n self.check_steps(receive['data'])\n elif receive['data'] == Qt.Key_Enter:\n self.change_player()\n elif receive['data'] == Qt.Key_N:\n self.change_snake()\n elif receive['event_type'] == 'next_player':\n self.change_player()\n\n elif receive['event_type'] == 'delete_all':\n self.end_game = True\n elif receive['event_type'] == 'special_food':\n self.add_special_food()\n elif receive['event_type'] == 'close_app':\n break\n\n pipe.send({'event_type': 'rectangles', 'data': self.get_rectangles_to_draw()})\n\n except BrokenPipeError as e:\n print(e)\n print('Broken pipe error')\n break\n\n except EOFError as e:\n print(e)\n print('EOFError - game_APP')\n break\n\n # sprecava pomeranje igraca preko broja dozvoljenih koraka\n def check_steps(self, movement):\n if self.steps[self.current_player] > self.steps_counter:\n self.steps_counter += 1\n self.handle_movement(movement)\n if self.steps_counter == self.steps[self.current_player]:\n self.change_player()\n\n ## vrsi kretanje zmija, vrsi pozivanje funkcija koje resavaju kolizije\n def handle_movement(self, key):\n new_position = self.players[self.current_player].handle_movement(key, self.food_position, self.special_food_position)\n self.check_game_over(new_position)\n move = random.randint(1, 2)\n if new_position['food_eaten']:\n self.add_food()\n self.steps[self.current_player] += 1\n self.pipe.send({'event_type': 'score', 'data': self.current_player})\n\n if new_position['special_food_eaten']:\n self.special_food_position = {'x': 0, 'y': 0}\n if move is 1:\n self.steps[self.current_player] += 3\n self.pipe.send({'event_type': 'special_score', 'data': self.current_player, 'score_type': 3})\n else:\n self.steps[self.current_player] -= 1\n self.pipe.send({'event_type': 'special_score', 'data': self.current_player, 'score_type': -1})\n\n # proverava da li je zmija okruzena drugom zmijom\n def is_surrounded(self, position):\n counter = 0\n\n if not self.is_position_free({'x': position['x'] - 20, 'y': position['y']}):\n counter += 1\n\n if not self.is_position_free({'x': position['x'] + 20, 'y': position['y']}):\n counter += 1\n\n if not self.is_position_free({'x': position['x'], 'y': position['y'] - 20}):\n counter += 1\n\n if not self.is_position_free({'x': position['x'], 'y': position['y'] + 20}):\n counter += 1\n\n if counter is 4:\n return True\n\n # menja trenutnog igraca\n def change_player(self):\n self.move_food()\n self.steps_counter = 0\n self.current_player += 1\n if self.current_player == self.number_of_players:\n self.current_player = 0\n\n if self.players[self.current_player].is_disabled() is False:\n self.pipe.send({'event_type': 'current_player', 'data': self.current_player})\n time.sleep(0.1)\n\n for i in range(len(self.players[self.current_player].snakes)):\n position = self.players[self.current_player].snake_position(0)\n if self.is_surrounded(position):\n if len(self.players[self.current_player].snakes) is 1:\n self.players[self.current_player].remove_rectangles(self.players[self.current_player].current_snake)\n print(\"Game Over\")\n self.pipe.send({'event_type': 'end_game', 'data': self.current_player})\n time.sleep(0.1)\n self.change_player()\n # self.game_over()\n else:\n self.players[self.current_player].remove_rectangles(self.players[self.current_player].current_snake)\n self.change_snake()\n # self.remove_snake()\n else:\n self.change_player()\n\n # proverava da li je igra zavrsena i salje u pajp potrebne informacije ako jeste\n def check_game_over(self, new_position):\n if self.is_border_collision(new_position):\n if len(self.players[self.current_player].snakes) is 1:\n print(\"Game Over\")\n self.pipe.send({'event_type': 'end_game', 'data': self.current_player})\n time.sleep(0.1)\n self.players[self.current_player].remove_rectangles(self.players[self.current_player].current_snake)\n self.change_player()\n # self.game_over()\n else:\n self.players[self.current_player].remove_rectangles(self.players[self.current_player].current_snake)\n self.change_snake()\n # self.remove_snake()\n return True\n\n elif self.is_collision_on_position(new_position):\n if len(self.players[self.current_player].snakes) is 1:\n self.pipe.send({'event_type': 'end_game', 'data': self.current_player})\n time.sleep(0.1)\n self.players[self.current_player].remove_rectangles(self.players[self.current_player].current_snake)\n self.change_player()\n # self.game_over()\n else:\n self.players[self.current_player].remove_rectangles(self.players[self.current_player].current_snake)\n self.change_snake()\n # self.remove_snake()\n return True\n\n return False\n\n # preuzima potrebne parametre na pocetku igre, dodaje igrace i hranu\n def start_game(self, config: GameConfig):\n self.number_of_players = config.playerNumber\n self.number_of_snakes_per_player = config.snakeNumber\n self.timer = config.turnPlanTime\n for i in range(self.number_of_players):\n self.players.append(Player(self.number_of_snakes_per_player, self.colors[i]))\n self.steps.append(5)\n\n self.add_food()\n\n # vrsi kretanje hranje po terenu\n def move_food(self):\n move = random.randint(1, 3)\n position = random.randint(1, 2) # x ili y\n direction = random.randint(1, 2) # left or right\n if direction is 1: # right\n if position is 1: # x\n new_position = move * 20\n if self.is_collision({'x': new_position + self.food_position['x'],\n 'y': self.food_position['y']}):\n self.add_food()\n else:\n self.food_position['x'] += new_position\n\n elif position is 2: # y\n new_position = move * 20\n if self.is_collision({'x': self.food_position['x'],\n 'y': new_position + self.food_position['y']}):\n self.add_food()\n else:\n self.food_position['y'] += new_position\n\n elif direction is 2: # left\n if position is 1: # x\n new_position = move * 20\n if self.is_collision({'x': self.food_position['x'] - new_position,\n 'y': self.food_position['y']}):\n self.add_food()\n else:\n self.food_position['x'] -= new_position\n\n elif position is 2: # y\n new_position = move * 20\n if self.is_collision({'x': self.food_position['x'],\n 'y': self.food_position['y'] - new_position}):\n self.add_food()\n else:\n self.food_position['y'] -= new_position\n\n # proverava da li je doslo do kolizije hrane sa zidom i zmijama\n def is_collision(self, f_position):\n if f_position['x'] < 30:\n print('left collision')\n return True\n elif f_position['x'] > 510:\n print('right collision')\n return True\n elif f_position['y'] < 50:\n print('up collision')\n return True\n elif f_position['y'] > 550:\n print('down collision')\n return True\n elif self.number_of_elements_on_position(f_position) is not 0:\n print('food - snake collision')\n return True\n\n return False\n\n # menja zmijicu trenutnog igraca\n def change_snake(self):\n self.players[self.current_player].change_snake()\n\n # salje u pajp sve elemente koje treba nacrtati\n def get_rectangles_to_draw(self):\n if self.end_game is False:\n rectangles = self.get_snake_rectangles()\n food_rectangle = {'x': self.food_position['x'], 'y': self.food_position['y'], 'width': 20, 'height': 20,\n 'color': QColor(128, 128, 128)}\n rectangles.append(food_rectangle)\n if self.special_food is True:\n special_food_rectangle = {'x': self.special_food_position['x'], 'y': self.special_food_position['y'],\n 'width': 20, 'height': 20, 'color': QColor(0, 0, 0)}\n rectangles.append(special_food_rectangle)\n\n else:\n rectangles = []\n\n return rectangles\n\n # preuzima kvadratice od zmije\n def get_snake_rectangles(self):\n rectangles = []\n for player in self.players:\n rectangles += player.get_rectangles()\n return rectangles\n\n # dodaje hranu na teren\n def add_food(self):\n self.food_position['x'] = random.randrange(50, 400, 20)\n self.food_position['y'] = random.randrange(50, 400, 20)\n if not self.is_position_free({'x': self.food_position['x'],\n 'y': self.food_position['y']}):\n self.add_food()\n\n # dodaje specijalnu hranu na teren\n def add_special_food(self):\n self.special_food = True\n self.special_food_position['x'] = 470\n self.special_food_position['y'] = 510\n if not self.is_position_free({'x': self.food_position['x'],\n 'y': self.food_position['y']}):\n self.add_special_food()\n\n # proverava da li je zeljena pozicija slobodna\n def is_position_free(self, position):\n return self.number_of_elements_on_position(position) == 0\n\n # proverava da li se desila kolizija na odredjenoj poziciji\n def is_collision_on_position(self, position):\n return self.number_of_elements_on_position(position) >= 2\n\n # proverava da li je zmija izasla iz okvira terena\n def is_border_collision(self, position):\n if self.players[self.current_player].is_border_collison(position):\n return True\n return False\n\n # vraca broj elemenata na jednoj poziciji\n def number_of_elements_on_position(self, position):\n number_of_occupied = 0\n rectangles = self.get_snake_rectangles()\n for rectangle in rectangles:\n if rectangle['x'] == position['x'] and rectangle['y'] == position['y']:\n number_of_occupied += 1\n return number_of_occupied\n","sub_path":"game_logic/game_application.py","file_name":"game_application.py","file_ext":"py","file_size_in_byte":12929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"182109713","text":"#!/usr/bin/python3\n\n\ndef matrix_divided(matrix, div):\n \"\"\" This function divides each member of matrix by div \"\"\"\n newlist = []\n newmat = []\n msg1 = \"Each row of the matrix must have the same size\"\n msg2 = \"matrix must be a matrix (list of lists) of integers/floats\"\n if type(div) is not int and type(div) is not float:\n raise TypeError(\"div must be a number\")\n if div == 0:\n raise ZeroDivisionError(\"division by zero\")\n for list in matrix:\n if len(matrix[0]) != len(list):\n raise TypeError(msg1)\n for i in list:\n if type(i) is not int and type(i) is not float:\n raise TypeError(msg2)\n x = round((i / div), 2)\n newlist.append(x)\n newmat.append(newlist)\n newlist = []\n return newmat\n","sub_path":"0x07-python-test_driven_development/2-matrix_divided.py","file_name":"2-matrix_divided.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"363671073","text":"import setuptools\n\n\nconfiguration = {\n \"name\": \"tensors\",\n \"version\": \"0.1\",\n \"description\": \"Interface for working with low-rank tensor approximations\",\n \"classifiers\": [\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: MIT License\",\n \"Programming Language :: Python :: 3.4\",\n \"Topic :: Scientific/Engineering :: Mathematics\",\n ],\n \"keywords\": \"tensor low-rank TT vector matrix\",\n \"url\": \"https://bitbucket.org/thoughteer/tensors\",\n \"author\": \"Iskander Sitdikov\",\n \"author_email\": \"thoughteer@gmail.com\",\n \"license\": \"MIT\",\n \"packages\": setuptools.find_packages(exclude=[\"tests\"]),\n \"install_requires\": [\"beswitch>=0.1\", \"tt>=0.1\"],\n \"zip_safe\": False\n}\nsetuptools.setup(**configuration)\n","sub_path":"pypi_install_script/tensors-0.1.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"16728762","text":"# Copyright 2019 Atalaya Tech, Inc.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom collections import OrderedDict\n\nfrom bentoml import config\n\n\ndef get_open_api_spec_json(bento_service):\n \"\"\"\n The docs for all endpoints in Open API format.\n \"\"\"\n docs = OrderedDict(\n openapi=\"3.0.0\",\n info=OrderedDict(\n version=bento_service.version,\n title=bento_service.name,\n description=\"To get a client SDK, copy all content from docs and paste into \"\n \"editor.swagger.io then click \"\n \"the tab Generate Client and choose the language.\",\n ),\n tags=[{\"name\": \"infra\"}, {\"name\": \"app\"}],\n )\n\n paths = OrderedDict()\n default_response = {\"200\": {\"description\": \"success\"}}\n\n paths[\"/healthz\"] = OrderedDict(\n get=OrderedDict(\n tags=[\"infra\"],\n description=\"Health check endpoint. Expecting an empty response with status\"\n \" code 200 when the service is in health state\",\n responses=default_response,\n )\n )\n\n paths[\"/metadata\"] = OrderedDict(\n get=OrderedDict(\n tags=[\"infra\"],\n description=\"BentoService metadata endpoint. Returns the service's\"\n \"`bentoml.yml` in JSON format.\",\n responses=default_response,\n )\n )\n\n if config(\"apiserver\").getboolean(\"enable_metrics\"):\n paths[\"/metrics\"] = OrderedDict(\n get=OrderedDict(\n tags=[\"infra\"],\n description=\"Prometheus metrics endpoint\",\n responses=default_response,\n )\n )\n if config(\"apiserver\").getboolean(\"enable_feedback\"):\n paths[\"/feedback\"] = OrderedDict(\n post=OrderedDict(\n tags=[\"infra\"],\n description=\"Provide feedback to prediction results from BentoService. \"\n \"Expecting feedback request payload in JSON format \"\n \"and requires `request_id` field, which can be obtained \"\n \"from any BentoService prediction response's header. \"\n \"Only last key will be considered if keys are repeated.\",\n requestBody=OrderedDict(\n required=True,\n content={\n \"application/json\": {\n \"schema\": {\n \"type\": \"object\",\n \"required\": [\"request_id\"],\n \"properties\": {\"request_id\": {\"type\": \"uuid\"}},\n },\n \"example\": {\n \"request_id\": \"cf420b0f-15fa-013d-a37b-12345678c321\",\n \"example_feedback\": \"key-value pair can be anything\",\n },\n }\n },\n ),\n responses=default_response,\n ),\n )\n\n for api in bento_service.inference_apis:\n path = \"/{}\".format(api.name)\n paths[path] = OrderedDict(\n post=OrderedDict(\n tags=[\"app\"],\n description=api.doc,\n requestBody=OrderedDict(required=True, content=api.request_schema),\n responses=default_response,\n )\n )\n\n docs[\"paths\"] = paths\n return docs\n","sub_path":"bentoml/server/open_api.py","file_name":"open_api.py","file_ext":"py","file_size_in_byte":3962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"342468259","text":"import os\nimport sys\nimport logging\n\nimport re\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nfont = {'family' : 'Times New Roman', 'color' : 'black',\\\n 'weight' : 'normal', 'size' : 18}\n\nif __name__ == '__main__':\n program = os.path.basename(sys.argv[0])\n logger = logging.getLogger(program)\n \n logging.basicConfig(format='%(asctime)s: %(levelname)s: %(message)s')\n logging.root.setLevel(level=logging.INFO)\n logger.info(r\"running %s\" % ''.join(sys.argv))\n\n log_file = os.path.join('log', '1-layer lstm_0.5.txt')\n data = []\n with open(log_file, 'r') as my_file:\n for line in my_file.readlines():\n if not line.startswith('Epoch') and not line.startswith('2017'):\n line_data = re.findall(r'\\d+\\.?\\d*', line)\n data.append(line_data)\n print(line_data)\n\n log_file_8 = os.path.join('log', '8-layer lstm_0.5.bak.txt')\n data_8 = []\n with open(log_file_8, 'r') as my_file_8:\n for line in my_file_8.readlines():\n if not line.startswith('Epoch') and not line.startswith('2017'):\n line_data = re.findall(r'\\d+\\.?\\d*', line)\n data_8.append(line_data)\n # print(line_data)\n\n x = np.linspace(1, 50, 50)\n # print(x)\n\n train_mae = []\n train_pearson = []\n\n for i in range(len(x)):\n train_mae.append(float(data[i][5]))\n train_pearson.append(float(data[i][6]))\n\n train_mae_8 = []\n train_pearson_8 = []\n\n for i in range(len(x)):\n train_mae_8.append(float(data_8[i][5]))\n train_pearson_8.append(float(data_8[i][6]))\n\n\n\n fig = plt.figure()\n\n ax1 = fig.add_subplot(111)\n line1, = ax1.plot(x, train_mae, linewidth=2.2, color='blue', label='1-layer LSTM ($MAE$)')\n line3, = ax1.plot(x, train_mae_8, linewidth=2.2, color='green', label='8-layer LSTM ($MAE$)')\n\n ax1.set_ylabel('Mean Absolute Error ($MAE$)', fontdict=font)\n ax1.set_title('Performance on Testing Set', fontdict=font)\n ax1.set_xlabel('Epochs', fontdict=font)\n\n ax1.set_ylim(0.11, 0.21)\n\n ax2 = ax1.twinx()\n line2, = ax2.plot(x, train_pearson, linewidth=2.2, color='red', label='1-layer LSTM ($r$)')\n line4, = ax2.plot(x, train_pearson_8, linewidth=2.2, color='cyan', label='8-layer LSTM ($r$)')\n\n ax2.set_ylabel('Pearson correlation coefficient ($r$)', fontdict=font)\n ax2.set_ylim(0.3, 1.0)\n \n\n ax1.grid(True)\n\n\n plt.legend(handles=[line1, line2, line3, line4], prop={'size': 13, 'family' : 'Times New Roman'}, loc='upper right')\n\n plt.tight_layout()\n plt.show()\n\n\n","sub_path":"plot_stacked_test.py","file_name":"plot_stacked_test.py","file_ext":"py","file_size_in_byte":2591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505794220","text":"import cv2\nimport numpy as np\nimport time\nimport re\nfrom tflite_runtime.interpreter import Interpreter\n\n\n#wget https://storage.googleapis.com/download.tensorflow.org/models/tflite/coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip\n# unzip coco_ssd_mobilenet_v1_1.0_quant_2018_06_29.zip -d Sample_TFLite_model\n\nmin_confidence = 0.5\nmargin = 30\nfile_name = \"image/parking_02.jpg\"\nlabel_name = \"coco_labels.txt\"\nmodel_name = \"detect.tflite\"\nnumber_car = 0\n\ndef load_labels(path):\n \"\"\"Loads the labels file. Supports files with or without index numbers.\"\"\"\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n print(labels)\n return labels\n\ndef set_input_tensor(interpreter, image):\n \"\"\"Sets the input tensor.\"\"\"\n tensor_index = interpreter.get_input_details()[0]['index']\n input_tensor = interpreter.tensor(tensor_index)()[0]\n input_tensor[:, :] = image\n\ndef get_output_tensor(interpreter, index):\n \"\"\"Returns the output tensor at the given index.\"\"\"\n output_details = interpreter.get_output_details()[index]\n tensor = np.squeeze(interpreter.get_tensor(output_details['index']))\n return tensor\n\ndef detect_objects(interpreter, image, threshold):\n \"\"\"Returns a list of detection results, each a dictionary of object info.\"\"\"\n set_input_tensor(interpreter, image)\n interpreter.invoke()\n\n # Get all output details\n boxes = get_output_tensor(interpreter, 0)\n classes = get_output_tensor(interpreter, 1)\n scores = get_output_tensor(interpreter, 2)\n count = int(get_output_tensor(interpreter, 3))\n\n results = []\n for i in range(count):\n if scores[i] >= threshold:\n result = {\n 'bounding_box': boxes[i],\n 'class_id': classes[i],\n 'score': scores[i]\n }\n results.append(result)\n return results\n\n\n# Load tflite\nlabels = load_labels(label_name)\ninterpreter = Interpreter(model_name)\ninterpreter.allocate_tensors()\n_, input_height, input_width, _ = interpreter.get_input_details()[0]['shape']\n\n\n# Loading image\nstart_time = time.time()\nimg = cv2.imread(file_name)\nheight, width, channels = img.shape\nimage = cv2.resize(img, (300, 300)) \n\n# Detecting objects\nouts = detect_objects(interpreter, image, min_confidence)\n\nfont = cv2.FONT_HERSHEY_PLAIN\ncolor = (0, 255, 0)\nfor out in outs:\n if out['class_id'] == 2 and out['score'] > min_confidence:\n number_car += 1\n ymin, xmin, ymax, xmax = out['bounding_box']\n xmin = int(xmin * width)\n xmax = int(xmax * width)\n ymin = int(ymin * height)\n ymax = int(ymax * height)\n \n label = '{:,.2%}'.format(out['score'])\n print(number_car, label)\n cv2.rectangle(img, (xmin, ymin), (xmax, ymax), color, 2)\n cv2.putText(img, label, (xmin, ymin - 10), font, 1, color, 2)\n \ntext = \"Number of Car is : {} \".format(number_car)\ncv2.putText(img, text, (margin, margin), font, 2, color, 2)\n\ncv2.imshow(\"Number of Car - \"+file_name, img)\n\nend_time = time.time()\nprocess_time = end_time - start_time\nprint(\"=== A frame took {:.3f} seconds\".format(process_time))\n\n\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"carDetect/TFL_car.py","file_name":"TFL_car.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"467157219","text":"# -*- coding: utf-8 -*-\n\nfrom sklearn.tree import tree\nfrom sklearn.datasets import load_iris\nfrom sklearn_porter import Porter\n\n\niris_data = load_iris()\nX = iris_data.data\ny = iris_data.target\n\nclf = tree.DecisionTreeClassifier()\nclf.fit(X, y)\n\nporter = Porter(clf, language='go')\noutput = porter.export()\nprint(output)\n\n\"\"\"\npackage main\n\nimport (\n\t\"os\"\n\t\"fmt\"\n\t\"strconv\"\n)\n\ntype DecisionTreeClassifier struct {\n\tlChilds []int\n\trChilds []int\n\tthresholds []float64\n\tindices []int\n\tclasses [][]int\n}\n\nfunc (dtc DecisionTreeClassifier) predict_(features []float64, node int) int {\n if dtc.thresholds[node] != -2 {\n if features[dtc.indices[node]] <= dtc.thresholds[node] {\n return dtc.predict_(features, dtc.lChilds[node])\n } else {\n return dtc.predict_(features, dtc.rChilds[node])\n }\n }\n var index int = 0\n\tfor i := 0; i < len(dtc.classes[node]); i++ {\n\t if dtc.classes[node][i] > dtc.classes[node][index] {\n\t index = i\n\t }\n\t}\n\treturn index\n}\n\nfunc (dtc DecisionTreeClassifier) predict(features []float64) int {\n return dtc.predict_(features, 0)\n}\n\nfunc main() {\n\n\t// Features:\n\tvar features []float64\n\tfor _, arg := range os.Args[1:] {\n\t\tif n, err := strconv.ParseFloat(arg, 64); err == nil {\n\t\t\tfeatures = append(features, n)\n\t\t}\n\t}\n\n // Parameters:\n lChilds := []int {1, -1, 3, 4, 5, -1, -1, 8, -1, 10, -1, -1, 13, 14, -1, -1, -1}\n rChilds := []int {2, -1, 12, 7, 6, -1, -1, 9, -1, 11, -1, -1, 16, 15, -1, -1, -1}\n thresholds := []float64 {2.45000004768, -2.0, 1.75, 4.94999980927, 1.65000009537, -2.0, -2.0, 1.54999995232, -2.0, 5.44999980927, -2.0, -2.0, 4.85000038147, 5.94999980927, -2.0, -2.0, -2.0}\n indices := []int {2, 2, 3, 2, 3, 2, 2, 3, 2, 2, 2, 2, 2, 0, 2, 2, 2}\n classes := [][]int {{50, 50, 50}, {50, 0, 0}, {0, 50, 50}, {0, 49, 5}, {0, 47, 1}, {0, 47, 0}, {0, 0, 1}, {0, 2, 4}, {0, 0, 3}, {0, 2, 1}, {0, 2, 0}, {0, 0, 1}, {0, 1, 45}, {0, 1, 2}, {0, 1, 0}, {0, 0, 2}, {0, 0, 43}}\n\n\t// Prediction:\n\tclf := DecisionTreeClassifier{lChilds, rChilds, thresholds, indices, classes}\n\testimation := clf.predict(features)\n\tfmt.Printf(\"%d\\n\", estimation)\n\n}\n\"\"\"\n","sub_path":"examples/estimator/classifier/DecisionTreeClassifier/go/basics.py","file_name":"basics.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"327294721","text":"# Copyright (C) 2015 Okami, okami@fuzetsu.info\n\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 3\n# of the License, or (at your option) any later version.\n\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n\nimport os, sys, glob, fnmatch\nfrom setuptools import setup\n\n\n# Utility function to read the README file.\n# Used for the long_description. It's nice, because now 1) we have a top level\n# README file and 2) it's easier to type in the README file than to put a raw\n# string in below ...\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\n# Code borrowed from wxPython's setup and config files\n# Thanks to Robin Dunn for the suggestion.\n# I am not 100% sure what's going on, but it works!\ndef opj(*args):\n path = os.path.join(*args)\n return os.path.normpath(path)\n\n\n# https://wiki.python.org/moin/Distutils/Tutorial\ndef find_data_files(srcdir, *wildcards, **kw):\n # get a list of all files under the srcdir matching wildcards,\n # returned in a format to be used for install_data\n def walk_helper(arg, dirname, files):\n if '.svn' in dirname:\n return\n names = []\n lst, wildcards = arg\n for wc in wildcards:\n wc_name = opj(dirname, wc)\n for f in files:\n filename = opj(dirname, f)\n\n if fnmatch.fnmatch(filename, wc_name) and not os.path.isdir(filename):\n names.append(filename)\n if names:\n lst.append((dirname, names))\n\n file_list = []\n recursive = kw.get('recursive', True)\n if recursive:\n os.walk(srcdir, walk_helper, (file_list, wildcards))\n else:\n walk_helper((file_list, wildcards),\n srcdir,\n [os.path.basename(f) for f in glob.glob(opj(srcdir, '*'))])\n return file_list\n\n\nsetup(**{\n 'name': 'aoisora',\n 'version': '0.0.2',\n 'author': 'Okami',\n 'author_email': 'okami@fuzetsu.info',\n 'description': 'Lightweight Bluetooth manager for Bluez5 with PyQt4 UI',\n 'license': 'GPLv3',\n 'keywords': 'bluetooth bluez pyqt',\n 'url': 'https://pypi.python.org/pypi/aoisora',\n 'packages': [\n 'aoisora',\n 'aoisora.device',\n ],\n 'scripts': ['AoiSora'],\n 'long_description': '',\n 'classifiers': [\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python :: 3',\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n ],\n 'install_requires': [\n ],\n 'data_files': find_data_files('share/aoisora', '*.svg', recursive=False),\n 'include_package_data': True,\n})\n","sub_path":"pypi_install_script/aoisora-0.0.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580397587","text":"'''\nCourtID: ill\nCourt Short Name: Ill.\nAuthor: Rebecca Fordon\nReviewer: \nHistory:\n* 2016-06-22: Created by Rebecca Fordon\n'''\n\nfrom datetime import datetime\nimport re\nfrom juriscraper.OralArgumentSite import OralArgumentSite\n\n\nclass Site(OralArgumentSite):\n def __init__(self, *args, **kwargs):\n super(Site, self).__init__(*args, **kwargs)\n self.court_id = self.__module__\n self.url = 'http://www.illinoiscourts.gov/Media/On_Demand.asp' \n\n def _get_download_urls(self):\n path = \"//table[4]//table//tr[position()>1]/td[6]//a/@href\"\n return list(self.html.xpath(path))\n\n def _get_case_dates(self):\n dates = []\n path = \"//table[4]//table//tr[position()>1]/td[1]//div/text()\"\n for s in self.html.xpath(path):\n date_format = '%m/%d/%y'\n try: \n d = datetime.strptime(s, date_format).date()\n dates.append(d)\n except ValueError: \n print(ValueError)\n print(s)\n continue\n return dates\n\n def _get_case_names(self):\n path = '//table[4]//table//tr[position()>1]/td[3]//div/text()'\n cases = []\n for case in self.html.xpath(path):\n if case.strip():\n cases.append(case)\n return cases\n\n def _get_docket_numbers(self):\n path = \"//table[4]//table//tr[position()>1]/td[2]//div\" # right now this is giving a problem because one of the fields has a new line in the middle of it\n dockets = []\n for docket in self.html.xpath(path): \n docket = docket.text_content() \n dockets.append(docket)\n return dockets\n","sub_path":"juriscraper/oral_args/united_states/state/ill.py","file_name":"ill.py","file_ext":"py","file_size_in_byte":1674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"608334192","text":"from django.apps import apps\nfrom django.core.exceptions import FieldDoesNotExist\n\n\ndef spammables():\n # Lists all models that are marked flaggable\n flaggables = []\n for model in apps.get_models():\n try:\n model._meta.get_field_by_name('spam_flag')\n except FieldDoesNotExist:\n continue\n flaggables.append(model)\n return flaggables\n\ndef is_spammable(app, model):\n model_class = apps.get_model(\"{}.{}\".format(app, model))\n return model_class in spammables()\n\ndef get_app_name(model_class_or_instance):\n return model_class_or_instance._meta.app_config.name.split('.')[-1]\n","sub_path":"spam/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80047458","text":"import pandas as pd\nimport os\n\n\ndef abreFile():\n \n oldAddres = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/'\n newFile = 'C:/Users/beuo/Documents/Demandas/AtualizaMiddleIntegrationVtex/files/extract.xlsx'\n \n \n \n\n def encontraArquivosEmPastaRecursivamente(pasta, extensao):\n arquivosTxt = []\n caminhoAbsoluto = os.path.abspath(pasta)\n for pastaAtual, subPastas, arquivos in os.walk(caminhoAbsoluto):\n arquivosTxt.extend([os.path.join(pastaAtual,arquivo) for arquivo in arquivos if arquivo.endswith('.xls')])\n return arquivosTxt\n\n old_Addrres = encontraArquivosEmPastaRecursivamente(oldAddres, '.xls')\n nome = old_Addrres[0]\n print(nome)\n os.rename(old_Addrres[0],newFile)\n \n wb = pd.ExcelFile('./file/extract.xlsx')\n df = pd.read_excel(wb)\n print(df.head())\n\nabreFile() ","sub_path":".history/toolbox/abreFile_20191127163055.py","file_name":"abreFile_20191127163055.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368555472","text":"import time\nfrom riotwatcher import ApiError\n\nclass RateLimitRule:\n\n # Requests is number of requests allowable in given rule\n # Seconds in number of seconds rule lasts\n # Slack is how many seconds to add to each rule to account for lag\n def __init__(self, requests, seconds, slack=2):\n self.requests = requests\n self.seconds = seconds\n self.slack = slack\n self.window = []\n\n def enforce(self):\n now = time.time()\n # First remove entries outside of given time window\n filter_function = lambda t: (now - t) < (self.seconds + self.slack)\n self.window = list(filter(filter_function, self.window))\n # Next wait as long as necessary to send another request\n if len(self.window) == self.requests:\n time_to_wait = now - self.window[0]\n time.sleep(time_to_wait)\n # Finally add current time to time window\n self.window.append(time.time())\n \n\n\nclass RateLimiter:\n # Rules is [RateLimitRule]\n def __init__(self, rules=[]):\n self.rules = rules\n \n # Call a function with arguments while enforcing rate limit\n def call(self, func, *args):\n # enforce rate limit rules\n for rule in self.rules:\n rule.enforce()\n try:\n return func(*args)\n except ApiError as err:\n if err.response.status_code == 400:\n print(\"Error 400 - Bad request\")\n exit(1)\n elif err.response.status_code == 401:\n print(\"Error 401 - Unauthorized\")\n exit(1)\n elif err.response.status_code == 403:\n print(\"Error 403 - Forbidden\")\n exit(1)\n elif err.response.status_code == 404:\n print(\"Error 404 - Data not found\")\n exit(1)\n elif err.response.status_code == 405:\n print(\"Error 405 - Method not allowed\")\n exit(1)\n elif err.response.status_code == 415:\n print(\"Error 415 - Unsupported media type\")\n exit(1)\n elif err.response.status_code == 429:\n print(\"Error 429 - Rate limit exceeded\")\n return self.call(func, *args)\n elif err.response.status_code == 500:\n print(\"Error 500 - Internal server error\")\n return self.call(func, *args)\n elif err.response.status_code == 502:\n print(\"Error 502 - Bad gateway\")\n return self.call(func, *args)\n elif err.response.status_code == 503:\n print(\"Error 503 - Service unavailable\")\n return self.call(func, *args)\n elif err.response.status_code == 504:\n print(\"Error 504 - Gateway timeout\")\n return self.call(func, *args)\n else:\n raise","sub_path":"python/rate_limit.py","file_name":"rate_limit.py","file_ext":"py","file_size_in_byte":2895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"8660218","text":"import math\n\n\ndef get_count_digits(number: int):\n \"\"\"Return number of digits in a number.\"\"\"\n\n if number == 0:\n return 1\n\n number = abs(number)\n\n if number <= 999999999999997:\n return math.floor(math.log10(number)) + 1\n\n count = 0\n while number:\n count += 1\n number //= 10\n return count\n\ndef mensajeRango(pages,currentpage,rows,elementos,search=None):\n if pages == 0:\n return ''\n if not search:\n if pages == 1:\n return \"{} filas en total\".format(elementos)\n elif currentpage == pages:\n return \"{}-{} de {} elementos\".format((currentpage-1)*rows + 1,elementos,elementos)\n return \"{}-{} de {} elementos\".format((currentpage - 1) * rows + 1, currentpage * rows, elementos)\n if pages == 1:\n return \"{} resultados para {}\".format(elementos, search)\n elif currentpage == pages:\n return \"{}-{} de {} para {}\".format((currentpage-1)*rows + 1,elementos,elementos,search)\n return \"{}-{} de {} para {}\".format((currentpage-1)*rows + 1, currentpage * rows)\n","sub_path":"src/inventario/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"587570437","text":"import os\nimport string\n\nimport gdb\nimport pwndbg.auxv\nimport pwndbg.commands\nimport pwndbg.memoize\nimport pwndbg.net\nimport pwndbg.file\nimport pwndbg.proc\n\ntry:\n import psutil\nexcept:\n psutil = None\n\n\"\"\"\nPEDA prints it out like this:\n\nexe = /bin/bash\nfd[0] -> /dev/pts/96\nfd[1] -> /dev/pts/96\nfd[2] -> /dev/pts/96\npid = 31102\nppid = 31096\nuid = [287138, 287138, 287138, 287138]\ngid = [5000, 5000, 5000, 5000]\n\"\"\"\n\nclass Process():\n def __init__(self, pid=None):\n if pid is None:\n pid = pwndbg.proc.pid\n self.pid = pid\n self.status\n\n @property\n @pwndbg.memoize.reset_on_stop\n def status(self):\n raw = pwndbg.file.get('/proc/%i/status' % self.pid)\n\n status = {}\n for line in raw.splitlines():\n if not line:\n continue\n\n k_v = line.split(None, 1)\n\n if len(k_v) == 1:\n k_v.append(b'')\n\n k,v = k_v\n\n # Python3 ftw!\n k = k.decode('latin-1')\n v = v.decode('latin-1')\n\n k = k.lower().rstrip(':')\n\n # bit fields\n if set(v) < set(string.hexdigits) and len(v) == 16:\n try:\n v = int(v, 16)\n except AttributeError:\n pass\n\n # vm stats\n elif v.endswith(' kB'):\n v = int(v.split()[0]) * (1<<10)\n elif v.endswith(' mB'):\n v = int(v.split()[0]) * (1<<20)\n\n # misc integers like pid and ppid\n elif v.isdigit():\n v = int(v)\n\n # uid and gid and groups\n elif all(map(str.isdigit, v.split())):\n v = list(map(int, v.split()))\n\n status[k] = v\n setattr(self, k, v)\n return status\n\n @property\n @pwndbg.memoize.reset_on_stop\n def open_files(self):\n fds = {}\n\n for i in range(self.fdsize):\n link = pwndbg.file.readlink('/proc/%i/fd/%i' % (pwndbg.proc.pid, i))\n\n if link:\n fds[i] = link\n\n return fds\n\n @property\n @pwndbg.memoize.reset_on_stop\n def connections(self):\n # Connections look something like this: \n # socket:[102422]\n fds = self.open_files\n socket = 'socket:['\n result = []\n\n functions = [pwndbg.net.tcp, \n pwndbg.net.unix, \n pwndbg.net.netlink]\n\n for fd, path in fds.items():\n if socket not in path:\n continue\n\n inode = path[len(socket):-1]\n inode = int(inode)\n\n for func in functions:\n for x in func():\n if x.inode == inode:\n x.fd = fd\n result.append(x)\n\n return tuple(result)\n\n@pwndbg.commands.Command\ndef pid():\n print(pwndbg.proc.pid)\n\n@pwndbg.commands.Command\ndef procinfo():\n \"\"\"\n Display information about the running process.\n \"\"\"\n if not psutil:\n print(\"psutil required but not installed\")\n return\n \n exe = str(pwndbg.auxv.get()['AT_EXECFN'])\n print(\"%-10s %r\" % (\"exe\", exe))\n\n proc = Process()\n\n # qemu-usermode fail!\n if not proc.status:\n return\n\n pid = proc.pid\n ppid = proc.ppid\n uids = proc.uid\n gids = proc.gid\n\n files = dict(proc.open_files)\n\n for c in proc.connections:\n files[c.fd] = str(c) \n\n print(\"%-10s %s\" % (\"pid\", pid))\n print(\"%-10s %s\" % (\"ppid\", ppid))\n print(\"%-10s %s\" % (\"uid\", uids))\n print(\"%-10s %s\" % (\"gid\", gids))\n for fd, path in files.items():\n if not set(path) < set(string.printable):\n path = repr(path)\n\n print(\"%-10s %s\" % (\"fd[%i]\" % fd, path))\n\n return\n","sub_path":"pwndbg/commands/procinfo.py","file_name":"procinfo.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157998586","text":"import numpy as num\nimport unittest\nfrom pytch.util import consecutive, f2pitch, pitch2f\nimport time\n\n\nclass UtilTestCase(unittest.TestCase):\n\n def test_consecutive(self):\n arr = num.array([2,3,4,7,9,10])\n i = consecutive(arr)\n compare = [num.array([2,3,4]), num.array([7]), num.array([9, 10])]\n for ielement, element in enumerate(i):\n self.assertTrue(all(element == compare[ielement]))\n\n def test_p2f2p(self):\n fs = num.random.random(1000)*1000.\n ps = f2pitch(fs)\n num.testing.assert_almost_equal(fs, pitch2f(ps))\n \n\nif __name__=='__main__':\n unittest.main()\n","sub_path":"test/test_util.py","file_name":"test_util.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251511187","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\nimport matplotlib.pyplot as plt\n\nimport pandas as pd\nimport numpy as np\nimport statistics\nimport math\nfrom sklearn.model_selection import train_test_split\nimport random\nimport sklearn\nfrom sklearn import ensemble\nfrom itertools import chain\nfrom typing import TextIO\nimport re\n\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import GroupKFold\nfrom sklearn.model_selection import PredefinedSplit\nfrom sklearn.model_selection import cross_validate\n\nfrom sklearn import linear_model\nfrom sklearn.linear_model import Ridge\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn import svm\nfrom sklearn.svm import SVC\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.ensemble import GradientBoostingClassifier\n#from sklearn.ensemble import VotingRegressor\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import roc_auc_score, average_precision_score, roc_curve, precision_recall_curve, confusion_matrix\n\n\n# In[2]:\n\n\n# data\ngenes = ['RPS14', 'CDC5L', 'POLR2I', 'RPS7', 'XAB2', 'RPS19BP1', 'RPL23A', 'SUPT6H', 'PRPF31', 'U2AF1', 'PSMD7',\n 'Hsp10', 'RPS13', 'PHB', 'RPS9', 'EIF5B', 'RPS6', 'RPS11', 'SUPT5H', 'SNRPD2', 'RPL37', 'RPSA', 'COPS6',\n 'DDX51', 'EIF4A3', 'KARS', 'RPL5', 'RPL32', 'SF3A1', 'RPS3A', 'SF3B3', 'POLR2D', 'RPS15A', 'RPL31', 'PRPF19',\n 'SF3B2', 'RPS4X', 'CSE1L', 'RPL6', 'COPZ1', 'PSMB2', 'RPL7', 'PHB2', 'ARCN1', 'RPA2', 'NUP98', 'RPS3', 'EEF2',\n 'USP39', 'PSMD1', 'NUP93', 'AQR', 'RPL34', 'PSMA1', 'RPS27A']\n\n\ngenes_filter_1 = ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B', 'RPL31',\n 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H', 'EEF2', 'RPS11',\n 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51', 'RPL7', 'RPS9', 'KARS',\n 'SF3A1', 'RPL32', 'PSMB2', 'RPS7', 'EIF4A3', 'U2AF1', 'PSMA1',\n 'PHB', 'POLR2D', 'RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2',\n 'SUPT5H', 'RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1',\n 'RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L', 'RPL5',\n 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\n\ngene_split_index = {}\nfor i in range(len(genes_filter_1)):\n gene = genes_filter_1[i]\n gene_split_index[gene]= math.floor(i/6)\n\n\nbase_positions = {\n 'A': 0,\n 'T': 1,\n 'C': 2,\n 'G': 3,\n 0: 'A',\n 1: 'T',\n 2: 'C',\n 3: 'G',\n}\n\n\n\n#distinguishing nmer data\n#nmer csv\n# enrichf = '../nmer_results/detailed_nmer_enriched_allfold_stats.csv'\n# depletef = '../nmer_results/detailed_nmer_depleted_allfold_stats.csv'\n# df_enriched = pd.read_csv(enrichf)\n# df_depleted = pd.read_csv(depletef)\n# enrichnmer = list(df_enriched['nmer'].values)\n# depltednmer = list(df_depleted['nmer'].values)\n\n# # 4mer\n# dslist=[]\n# for i in enrichnmer:\n# if len(i)==4:\n# dslist.append(i)\n# for j in depltednmer:\n# if len(j)==4:\n# dslist.append(j)\n#print(dslist)\n\n\n\ndef create_gene_splits_kfold(gene_strings, values_to_split: list, kfold, split):\n # use number [0, 1, 2, 3, 4] as index\n assert split >= 0 and split < kfold\n if kfold == 5:\n non_train_genes = genes[split * 11: (split + 1) * 11]\n val_genes = non_train_genes[:5]\n test_genes = non_train_genes[5:]\n elif kfold == 11:\n num_genes = len(genes)\n val_genes = genes[split * 5: (split + 1) * 5]\n if split != 10:\n test_genes = genes[((split + 1) * 5): (split + 2) * 5]\n else:\n test_genes = genes[0:5]\n print('val:', val_genes)\n print('test:', test_genes)\n\n val_ids = list(chain(*[np.where(gene_strings == g)[0] for g in val_genes]))\n test_ids = list(chain(*[np.where(gene_strings == g)[0] for g in test_genes]))\n train_ids = list((set(range(len(gene_strings))) - set(val_ids) - set(test_ids)))\n\n train = [[arr[i] for i in train_ids] for arr in values_to_split]\n val = [[arr[i] for i in val_ids] for arr in values_to_split]\n test = [[arr[i] for i in test_ids] for arr in values_to_split]\n\n return train, val, test\n\ndef create_gene_splits_filter1_kfold(gene_strings, values_to_split: list, kfold, split):\n # use number [0, 1, 2, 3, 4,...] as index\n genes_filter_1 = ['RPS6', 'PRPF19', 'RPL34', 'Hsp10', 'POLR2I', 'EIF5B', 'RPL31',\n 'RPS3A', 'CSE1L', 'XAB2', 'PSMD7', 'SUPT6H', 'EEF2', 'RPS11',\n 'SNRPD2', 'RPL37', 'SF3B3', 'DDX51', 'RPL7', 'RPS9', 'KARS',\n 'SF3A1', 'RPL32', 'PSMB2', 'RPS7', 'EIF4A3', 'U2AF1', 'PSMA1',\n 'PHB', 'POLR2D', 'RPSA', 'RPL23A', 'NUP93', 'AQR', 'RPA2',\n 'SUPT5H', 'RPL6', 'RPS13', 'SF3B2', 'RPS27A', 'PRPF31', 'COPZ1',\n 'RPS4X', 'PSMD1', 'RPS14', 'NUP98', 'USP39', 'CDC5L', 'RPL5',\n 'PHB2', 'RPS15A', 'RPS3', 'ARCN1', 'COPS6']\n assert split >= 0 and split < kfold\n if kfold == 9:\n val_genes = genes_filter_1[split * 6: (split + 1) * 6]\n if split != 8:\n test_genes = genes_filter_1[((split + 1) * 6): (split + 2) * 6]\n else:\n test_genes = genes_filter_1[0:6]\n print('val:', val_genes)\n print('test:', test_genes)\n\n val_ids = list(chain(*[np.where(gene_strings == g)[0] for g in val_genes]))\n test_ids = list(chain(*[np.where(gene_strings == g)[0] for g in test_genes]))\n train_ids = list((set(range(len(gene_strings))) - set(val_ids) - set(test_ids)))\n\n train = [[arr[i] for i in train_ids] for arr in values_to_split]\n val = [[arr[i] for i in val_ids] for arr in values_to_split]\n test = [[arr[i] for i in test_ids] for arr in values_to_split]\n\n return train, val, test\n\n\ndef normalize(a: np.ndarray):\n \"\"\"\n :param a: numpy array of size N x D, where N is number of examples, D is number of features\n :return: a, normalized so that all feature columns are now between 0 and 1\n \"\"\"\n a_normed, norms = sklearn.preprocessing.normalize(a, norm='max', axis=0, return_norm=True)\n print(\"Norms:\", norms)\n return a_normed\n\ndef one_hot_encode_sequence(seq, pad_to_len=-1):\n output_len = len(seq)\n if pad_to_len > 0:\n assert pad_to_len >= output_len\n output_len = pad_to_len\n\n encoded_seq = np.zeros((output_len, 4), dtype=np.float32)\n for i, base in enumerate(seq):\n encoded_seq[i][base_positions[base]] = 1\n return encoded_seq\n\ndef parse_guide_linearfold_fasta_into_dict_contrafold():\n fasta_file = open(linearfold_fasta_path_c)\n seq_dict = {}\n score_dict = {}\n\n def parse_one_example(fasta: TextIO):\n descr_line = fasta.readline()\n if not descr_line:\n return None, None, None\n guide_seq = fasta.readline().strip()[36:]\n linearfold_and_result = fasta.readline()\n match = re.match('([\\\\.|\\\\(|\\\\)]+) \\((\\-?[0-9]*\\.[0-9]+)\\)', linearfold_and_result)\n linseq, score = match.groups()\n score = float(score)\n\n assert '>' in descr_line\n\n return guide_seq, linseq, score\n\n while True:\n key, seq, score = parse_one_example(fasta_file)\n if key is None:\n break\n seq_dict[key] = seq\n score_dict[key] = score\n\n fasta_file.close()\n\n return seq_dict, score_dict\n\n\ndef parse_target_flanks_linearfold_fasta_into_dict_contrafold(flank_len = 15):\n flank_num = flank_len\n fname = '../dataset/linearfold_output/linfold_guides_nearby'+str(flank_num)+'_output.txt'\n fasta_file = open(fname)\n seq_dict = {}\n score_dict = {}\n\n def parse_one_example(fasta: TextIO):\n descr_line = fasta.readline()\n if not descr_line:\n return None, None, None\n target_seq = fasta.readline().strip() #target with flanks\n linearfold_and_result = fasta.readline()\n match = re.match('([\\\\.|\\\\(|\\\\)]+) \\((\\-?[0-9]*\\.[0-9]+)\\)', linearfold_and_result)\n linseq, score = match.groups()\n score = float(score)\n\n assert '>' in descr_line\n\n return target_seq, linseq, score #return target seq with flanks\n\n while True:\n key, seq, score = parse_one_example(fasta_file)\n if key is None:\n break\n seq_dict[key] = seq\n score_dict[key] = score\n\n fasta_file.close()\n\n return seq_dict, score_dict\n\n\ndef parse_target_flanks_constraints_linearfold_fasta_into_dict_contrafold(flank_len = 15):\n flank_num = flank_len\n fname = '../dataset/linearfold_output/linfold_guides_constrains_nearby'+str(flank_num)+'_output.txt'\n fasta_file = open(fname)\n seq_dict = {}\n score_dict = {}\n\n def parse_one_example(fasta: TextIO):\n descr_line = fasta.readline()\n if not descr_line:\n return None, None, None\n target_seq = fasta.readline().strip() #target with flanks\n constraints = fasta.readline()\n linearfold_and_result = fasta.readline()\n match = re.match('([\\\\.|\\\\(|\\\\)]+) \\((\\-?[0-9]*\\.[0-9]+)\\)', linearfold_and_result)\n linseq, score = match.groups()\n score = float(score)\n\n assert '>' in descr_line\n\n return target_seq, linseq, score #return target seq with flanks\n\n while True:\n key, seq, score = parse_one_example(fasta_file) #key is target seq with flanks\n if key is None:\n break\n seq_dict[key] = seq #linfold_seq\n score_dict[key] = score #linfold_score\n\n fasta_file.close()\n\n return seq_dict, score_dict\n\n\ndataset_filtered_csv_path = '../dataset/integrated_guide_feature_filtered_f24_mismatch3_all_features.csv'\nlinearfold_fasta_path_c = '../dataset/guides_linearfold_c.txt'\n\n#dataset\ndataframe = pd.read_csv(dataset_filtered_csv_path)\ndataframe = dataframe[dataframe['gene'].isin(genes_filter_1)] #filter out 1 gene\n\nnum_examples = len(dataframe['gene'].values)\n\n#lin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict()\nlin_seq_dict, lin_result_dict = parse_guide_linearfold_fasta_into_dict_contrafold()\n\nencoded_guides = [one_hot_encode_sequence(guide).flatten() for guide in dataframe['guide'].values]\n#encoded_linearfold = [one_hot_encode_linearfold(lin_seq_dict[guide], remove_universal_start=True) for guide in\n# dataframe['guide'].values]\n\nlinearfold_dr = [lin_seq_dict[guide][0:36] for guide in dataframe['guide'].values]\nref_dr = '.....(((((((.(((....))).))))))).....'\ndr_disr_num =0\nfor jj in range(num_examples):\n if linearfold_dr[jj] == ref_dr:\n linearfold_dr[jj] = 0\n else:\n linearfold_dr[jj] = 1\n dr_disr_num += 1\nprint('dr_disr_num:'+str(dr_disr_num)) \n\n\nlinearfold_vals = [lin_result_dict[guide] for guide in dataframe['guide'].values]\nfor ii in range(num_examples):\n linearfold_vals[ii] = abs(linearfold_vals[ii]-6.48)\n\n#target with nearby seq, dg of native and unfolded\nflank_l = 15\nlin_seq_flanks_dict, lin_result_flanks_dict = parse_target_flanks_linearfold_fasta_into_dict_contrafold(flank_len = flank_l)\nlinearfold_vals_target = [lin_result_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_'+str(flank_l)].values] #native energy\nunfold_lin_seq_flanks_dict, unfold_lin_result_flanks_dict = parse_target_flanks_constraints_linearfold_fasta_into_dict_contrafold(flank_len = flank_l)\nunfold_linearfold_vals_target = [unfold_lin_result_flanks_dict[target_flanks] for target_flanks in dataframe['nearby_seq_all_'+str(flank_l)].values] #unfolded target energy\nddg = [] #energy required to unfold the guide binding region\nfor jj in range(num_examples):\n ddg.append((linearfold_vals_target[jj]-unfold_linearfold_vals_target[jj]))\n\n\n#all features\nother_single_value_inputs = np.empty((11, num_examples))\nother_single_value_inputs[0, :] = dataframe['linearfold_vals'].values\nother_single_value_inputs[1, :] = dataframe['is_5UTR'].values\nother_single_value_inputs[2, :] = dataframe['is_CDS'].values\nother_single_value_inputs[3, :] = dataframe['is_3UTR'].values\nother_single_value_inputs[4, :] = dataframe['refseq_target_transcript_percent'].values\nother_single_value_inputs[5, :] = dataframe['target unfold energy']\nother_single_value_inputs[6, :] = dataframe['UTR5_position'].values\nother_single_value_inputs[7, :] = dataframe['CDS_position'].values\nother_single_value_inputs[8, :] = dataframe['UTR3_position'].values\nother_single_value_inputs[9, :] = dataframe['linearfold_dr_flag'].values\nother_single_value_inputs[10, :] = dataframe['GC_content'].values\n\n\n#classification\n#classes = dataframe['binary_relative_ratio'].values\nclasses = dataframe['binary_relative_ratio_075f'].values\n#classes = dataframe['top 20 pct per gene'].values\noutputs = classes.astype(np.float32)\n \n#guides_with_linfold = [np.concatenate((guide, linfold), axis=1) for guide, linfold in zip(encoded_guides, encoded_linearfold)]\n\nall_cols = [np.concatenate((encoded_guides, normalize(other_single_value_inputs.T)),axis=1), outputs]\n \n# group label to split\ngroups = dataframe['gene'].values \n# predefined split index\nfor g in gene_split_index.keys():\n dataframe.loc[dataframe['gene']== g,'predefined split index']= gene_split_index[g]\nps = PredefinedSplit(dataframe['predefined split index'].values)\nprint(ps.get_n_splits())\n\n\n#feature labels\nnuc_labels = []\nfor p in range(30):\n for bi in range(4):\n nuc_label = 'pos'+str(p)+'_'+ base_positions[bi]\n nuc_labels.append(nuc_label)\n \n#nuc_labels = [\"guide_pos\" + str(i) for i in range(120)]\n#feature_list=['linearfold_vals','is_5UTR','is_CDS','is_3UTR','target_transcript_percent','target unfold energy',\n#'UTR5_position','CDS_position','UTR3_position']\nfeature_list_all=['guide free energy','is_5UTR','is_CDS','is_3UTR','target_transcript_percent','target unfolding energy',\n'UTR5_position','CDS_position','UTR3_position','direct repeat disruption','GC content']\n\nfeature_names = nuc_labels+feature_list_all\n\ndf_select = pd.DataFrame(data=all_cols[0],\n columns=feature_names)\ndf_select['output label']=all_cols[1]\n\n\n#GradientBoostingClassifier\n# define GradientBoostingClassifier\nclf = ensemble.GradientBoostingClassifier(random_state=0,max_depth=4,\n max_features='sqrt', n_estimators=2000)\n\n#guide seq only model scores\ny = df_select['output label'].values\nx_seq = df_select[nuc_labels].values\ncv_results_base = cross_validate(clf,x_seq, y, cv=ps.split(), scoring=['roc_auc','average_precision'])\n\ndf_auroc = pd.DataFrame()\ndf_auroc['seq only']=cv_results_base['test_roc_auc']\ndf_auprc = pd.DataFrame()\ndf_auprc['seq only']=cv_results_base['test_average_precision']\n\n\nfor f in feature_list_all: #secondary feature loo\n\tfeature_add = nuc_labels+[f]\n\tX = df_select[feature_add].values\n\t#y = df_select['output label'].values\n\tcv_results = cross_validate(clf, X, y, cv=ps.split(), scoring=['roc_auc','average_precision'])\n\tdf_auroc[f]=cv_results['test_roc_auc']\n\tdf_auprc[f]=cv_results['test_average_precision']\n\t\nfeature_groups = [['is_5UTR','is_CDS','is_3UTR'],['UTR5_position','CDS_position','UTR3_position']]\nfor fg in feature_groups:\n feature_add = nuc_labels+fg\n X = df_select[feature_add].values\n #y = df_select['output label'].values\n cv_results = cross_validate(clf, X, y, cv=ps.split(), scoring=['roc_auc','average_precision'])\n df_auroc[(fg[0]+'_three')]=cv_results['test_roc_auc']\n df_auprc[(fg[0]+'_three')]=cv_results['test_average_precision']\n\n\n\ndf_auroc.to_csv('linearmodel_classi_gb_addf_no_nmer_auroc.csv')\ndf_auprc.to_csv('linearmodel_classi_gb_addf_no_nmer_auprc.csv')\n","sub_path":"models/Linear_ensemble/feature importance/linearmodel_classi_gb_addf.py","file_name":"linearmodel_classi_gb_addf.py","file_ext":"py","file_size_in_byte":15478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"654267561","text":"#!/usr/bin/env python\n# fwd-euler/fwd-euler.py\n\nfrom matplotlib import pyplot as plt\n\n\ndef fwd_euler(f, x, y, h, n):\n xs = [x]\n ys = [y]\n for ii in range(0, n):\n y = y + h*f(x, y)\n x = x + h\n ys.append(y)\n xs.append(x)\n return (xs, ys)\n\ndef imp_fwd_euler(f, x, y, h, n):\n xs = [x]\n ys = [y]\n for ii in range(0, n):\n k = h*f(x, y)\n l = h*f(x+h, y+k)\n y = y + 0.5*(k + l)\n x = x + h\n ys.append(y)\n xs.append(x)\n return (xs, ys)\n\n\ndef main():\n f = lambda x, y: 1.0/(1.0 + x*x) - 2.0*y*y\n h = 0.2\n n = 10\n x0 = 0\n y0 = 0\n plt.plot(*fwd_euler(f, x0, y0, h, n), linestyle='--', color='blue')\n plt.plot(*imp_fwd_euler(f, x0, y0, h, n), linestyle='--', color='red')\n plt.show()\n\n\nif __name__=='__main__':\n main()\n\n\n#EOF\n","sub_path":"fwd-euler/fwd-euler/fwd-euler.py","file_name":"fwd-euler.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"271571486","text":"from rest_framework import serializers\nfrom django_restql.serializers import NestedModelSerializer\nfrom django_restql.fields import NestedField\nfrom django_restql.mixins import DynamicFieldsMixin\nfrom shop.models import Product, Order, OrderItem\n\n# DynamicFieldsMixin,\nclass ProductSerializer(serializers.ModelSerializer):\n\tclass Meta:\n\t\tmodel = Product\n\t\tfields = [\n\t\t\t'id', 'name', 'description', 'price', 'product_image', 'supplier'\n\t\t]\n\n# DynamicFieldsMixin,\nclass OrderItemSerializer(NestedModelSerializer):\n\tproduct = NestedField(ProductSerializer, accept_pk=True)\n\tclass Meta:\n\t\tmodel = OrderItem\n\t\tfields = [\n\t\t\t'id','quantity','product'\n\t\t]\n\n\nclass OrderSerializer(DynamicFieldsMixin,NestedModelSerializer):\n\titems = NestedField(OrderItemSerializer, \n\t\tmany=True, \n\t\trequired=True,\n\t\tcreate_ops=[\"add\",\"create\"],\n\t)\n\n\tclass Meta:\n\t\tmodel = Order\n\t\t# fields = ['order_id','items']\n\t\tfields = '__all__'\n\n\n","sub_path":"mysite/shop/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"490923417","text":"from google.cloud import storage\nimport json\nfrom pathlib import Path\nfrom split_output import split_reviews\nfrom multiprocessing import Pool, get_context\nimport argparse\n\n\nclass InvalidArgument(Exception):\n pass\n\n\ndef blob_2_dict(blob):\n blob_dict = None\n bytes_data = blob.download_as_string()\n if bytes_data:\n json_data = json.loads(bytes_data)\n if json_data:\n blob_dict = json_data[0]\n return blob_dict\n\n\ndef divine_args(args, full_list):\n if args.half:\n halfway = int(round((float(len(full_list)))/2))\n if args.half == '1':\n task_list = full_list[:halfway]\n elif args.half == '2':\n task_list = full_list[halfway:]\n else:\n raise InvalidArgument('-d argument must be either 1 or 2')\n else:\n task_list = full_list\n\n if args.workers:\n try:\n num_workers = int(args.workers)\n if num_workers == 0:\n raise InvalidArgument('-n must be nonzero int')\n except InvalidArgument:\n raise InvalidArgument('-n must be nonzero int')\n else:\n num_workers = 1\n\n if args.worker_id:\n try:\n worker_id = int(args.worker_id)\n except InvalidArgument:\n raise InvalidArgument('-i must be int')\n else:\n worker_id = 0\n\n return task_list, num_workers, worker_id\n\n\ndef split_google_blobs(task_chunk, worker_id, bucket_name, bucket_sub_dir):\n print('Worker Number: {}'.format(worker_id))\n for blob in task_chunk:\n blob_dict = blob_2_dict(blob)\n if blob_dict:\n split_reviews(bucket_name, blob_dict, bucket_sub_dir)\n\n\ndef main():\n client = storage.Client()\n bucket = client.bucket('nlp_resources')\n print(\"Fetching Google Bucket list, takes a minute...\")\n full_list = list(bucket.list_blobs(prefix='ta-crawler/raw-output-3'))\n print(\"Number of files in bucket: {}\".format(len(full_list)))\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--workers\", \"-n\", help=\"Number of total workers in this bucket half\")\n parser.add_argument(\"--half\", \"-d\", help=\"Which half of the full list the workers will work on, 1 or 2\")\n parser.add_argument(\"--worker_id\", \"-i\", help=\"worker id\")\n args = parser.parse_args()\n\n task_list, num_workers, worker_id = divine_args(args, full_list)\n bunch_increment = int(round(float(len(task_list))/num_workers))\n task_chunks = [task_list[x:x+bunch_increment] for x in range(0, len(task_list), bunch_increment)]\n bucket_name = 'nlp_resources'\n bucket_sub_dir = 'ta-crawler/'\n task_chunk = task_chunks[worker_id]\n del task_chunks\n del full_list\n split_google_blobs(task_chunk, worker_id, bucket_name, bucket_sub_dir)\n\nif __name__ == \"__main__\":\n main()\n\n\n\n\n\n\n\n\n","sub_path":"babyscrape/compile.py","file_name":"compile.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186663445","text":"# -*- coding:utf-8 -*-\nimport pymysql\nimport boto3\nimport json\n\npymysql.install_as_MySQLdb()\n\nREGION = 'cn-north-1'\nAWS_ACCESS_KEY_ID = 'AKIAOQ4B3DDQTWOAZJUQ'\nAWS_SECRET_ACCESS_KEY = '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'\n\nsqs = boto3.resource('sqs', region_name=REGION, aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY)\nqueue = sqs.get_queue_by_name(QueueName='ab2')\n\nconfig = {\n \"host\": 'ab-web.cr3o99zu93gv.rds.cn-north-1.amazonaws.com.cn',\n \"port\": 3306,\n \"user\": 'ab_web',\n \"password\": '7!aHb#LGMf*qmI0Z',\n \"db\": 'ab_web',\n \"charset\": \"GBK\",\n \"cursorclass\": pymysql.cursors.DictCursor\n}\n\n\n\nif __name__ == \"__main__\":\n connection = pymysql.connect(**config)\n try:\n with connection.cursor() as cursor:\n cursor.execute(\"select * from ab2 where sync_flag=0\")\n for data in cursor.fetchall():\n total = data.get('total')\n for i in range(0, total):\n result = {}\n result['id'] = data.get('id')\n result['url'] = data.get('url')\n result['asin'] = data.get('asin')\n result['url'] = data.get('url')\n response = queue.send_message(MessageBody=json.dumps(result))\n cursor.execute(\"update ab2 set sync_flag =1 where id=%s\" % data.get('id'))\n connection.commit()\n finally:\n connection.close()","sub_path":"add_task.py","file_name":"add_task.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439025712","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/sachowdh/code/fedora-infra/badges/tahrir-api/alembic/versions/49165401144f_create_new_tables_for_quest.py\n# Compiled at: 2016-09-02 09:22:20\n\"\"\"Create new tables for Quest\n\nRevision ID: 49165401144f\nRevises: 508367dcbbb5\nCreate Date: 2016-09-02 18:52:20.241884\n\n\"\"\"\nrevision = '49165401144f'\ndown_revision = '508367dcbbb5'\nfrom alembic import op\nimport sqlalchemy as sa\n\ndef upgrade():\n op.create_table('milestone', sa.Column('id', sa.Integer(), nullable=False), sa.Column('position', sa.Integer(), nullable=True), sa.Column('badge_id', sa.Unicode(length=128), nullable=False), sa.Column('series_id', sa.Unicode(length=128), nullable=False), sa.ForeignKeyConstraint(['badge_id'], ['badges.id']), sa.ForeignKeyConstraint(['series_id'], ['series.id']), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('id'), sa.UniqueConstraint('position', 'badge_id', 'series_id'))\n op.drop_table('perk')\n\n\ndef downgrade():\n op.create_table('perk', sa.Column('id', sa.INTEGER(), nullable=False), sa.Column('position', sa.INTEGER(), nullable=True), sa.Column('badge_id', sa.VARCHAR(length=128), nullable=False), sa.Column('series_id', sa.VARCHAR(length=128), nullable=False), sa.ForeignKeyConstraint(['badge_id'], ['badges.id']), sa.ForeignKeyConstraint(['series_id'], ['series.id']), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('id'), sa.UniqueConstraint('position', 'badge_id', 'series_id'))\n op.drop_table('milestone')","sub_path":"pycfiles/tahrir-api-0.8.1.tar/49165401144f_create_new_tables_for_quest.py","file_name":"49165401144f_create_new_tables_for_quest.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"401328554","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\n\n\ndef train_classification(train_data, model, criterion, optimizer, batch_size, device, scheduler=None, generate_batch=None):\n \n # Set model to training mode\n model.train()\n train_loss = 0\n train_acc = 0\n \n # Create data loader\n data = DataLoader(train_data, batch_size=batch_size, shuffle=True, collate_fn=generate_batch)\n \n # Iterate through data by batch of observations\n for feature, target_class in data:\n\n # Reset gradients\n optimizer.zero_grad()\n \n # Load data to specified device\n feature, target_class = feature.to(device), target_class.to(device)\n \n # Make predictions\n output = model(feature)\n \n # Calculate loss for given batch\n loss = criterion(output, target_class.long())\n\n # Calculate global loss\n train_loss += loss.item()\n \n # Calculate gradients\n loss.backward()\n\n # Update Weights\n optimizer.step()\n \n # Calculate global accuracy\n train_acc += (output.argmax(1) == target_class).sum().item()\n\n # Adjust the learning rate\n if scheduler:\n scheduler.step()\n\n return train_loss / len(train_data), train_acc / len(train_data)\n","sub_path":"python/.ipynb_checkpoints/train_classification_model-checkpoint.py","file_name":"train_classification_model-checkpoint.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483192463","text":"from scipy import signal\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndef triangular():\n t = np.linspace(0, 1, 500)\n plt.plot(t, signal.sawtooth(2 * np.pi * 5 * t))\n\ndef gausspulse():\n\tt = np.linspace(-1, 1, 2 * 100, endpoint=False)\n\ti, q, e = signal.gausspulse(t, fc=5, retquad=True, retenv=True)\n\tplt.plot(t, i, t, q, t, e, '--')\n\ndef square ():\n\tt = np.linspace(0, 1, 500, endpoint=False)\n\tplt.plot(t, signal.square(2 * np.pi * 5 * t))\n\tplt.ylim(-2, 2)\n\ndef ricker():\n\tpoints = 100\n\ta = 4.0\n\tvec2 = signal.ricker(points, a)\n\tprint(len(vec2))\n\tplt.plot(vec2)\n\tplt.show()\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"122799575","text":"target = \"xilinx\"\naction = \"synthesis\"\n\nfetchto = \"../../../ip_cores\"\n\nsyn_device = \"xc6slx150t\"\nsyn_grade = \"-3\"\nsyn_package = \"fgg900\"\nsyn_top = \"svec_top\"\nsyn_project = \"svec_fine_delay.xise\"\n\nfiles = [ \"wrc-release.ram\" ]\nmodules = { \"local\" : [ \"../../../top/svec/wr\", \"../../../platform\" ] }\n","sub_path":"hdl/syn/svec/wr/Manifest.py","file_name":"Manifest.py","file_ext":"py","file_size_in_byte":298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395806228","text":"from __future__ import print_function, division\n\nimport turtle\nimport math\n\ndef polyline(t, n, length, angle):\n for i in range(n):\n t.fd(length)\n t.lt(angle)\n\ndef polygon(t, n, length):\n angle = 360.0/n\n polyline( t, n, length, angle)\n\ndef arc(t, r, angle):\n arc_length = 2 * math.pi * r * abs(angle)/360\n n = int(arc_length / 4) + 3\n step_length = arc_length / n\n step_angle = float(angle) / n\n t.lt(step_angle / 2)\n polyline(t, n, step_length, step_angle)\n t.rt(step_angle/2)\n\ndef circle(t, r):\n arc(t, r, 360)\n\nif __name__ == '__main__':\n bob = turtle.Turtle()\n circle(bob, 150)\n turtle.mainloop()","sub_path":"Think Python/Polygon.py","file_name":"Polygon.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42307205","text":"'''Catenary Question\r\nLength of Chain: 80 m\r\nHeight of end poles: 50 m\r\nheight of lowest point on chain: 20m from ground.\r\nHow far apart are the poles?\r\nJuly 20, 2018'''\r\n\r\nfrom slider import Slider\r\nfrom math import cosh\r\n\r\n\r\nscl = 10 #every x- and y-value will be multiplied by scl\r\npole_height = 50\r\nvertex_height = 20\r\ndiff_height = pole_height - vertex_height\r\n\r\n'''Catenary equation is \r\ny = a * cosh(x/a)\r\n'''\r\n\r\n#initialize slider for a\r\nslider1 = Slider(0,30,1) \r\npoints = []\r\ntarg,targy = 0,0 #beginning values\r\n\r\ndef setup():\r\n size(600,600)\r\n slider1.position(20,20)\r\n \r\ndef draw():\r\n global points,targ,targy\r\n background(255)\r\n a = slider1.value()\r\n #translate to the base of the catenary\r\n translate(width/2,height-50)\r\n #draw \"ground\"\r\n line(-300,0,300,0)\r\n #map a to the mouse\r\n #a = map(mouseX,0,600,0.1,20)\r\n \r\n slider1.label = 'a'\r\n #start at left side of screen\r\n x = -30\r\n points = [] #empty the points list\r\n while x < 30: #go up to right side of screen\r\n y = f(x,a) #calculate y\r\n points.append([x,y]) #add point to list\r\n #if the point is at the desired height:\r\n if abs(diff_height - y) < 0.5:\r\n targ = x #save that x-value\r\n targy = y\r\n x += 0.1\r\n #graph the catenary\r\n graphPoints(points)\r\n \r\n #draw the poles\r\n line(targ*scl,0,targ*scl,-targy*scl)\r\n ellipse(targ*scl,-targy*scl,10,10) #intersection point\r\n line(-targ*scl,0,-targ*scl,-targy*scl)\r\n ellipse(-targ*scl,-targy*scl,10,10) #intersection point\r\n println(\"distance: \"+ str(2*targ))\r\n println(\"height: \"+str(diff_height+f(0,a)))\r\n fill(255,0,0)\r\n textSize(18)\r\n text(str(2*integral(f,a,targ,100)),-100,-300)\r\n \r\ndef f(x,a):\r\n try:\r\n return a * cosh(x/float(a))\r\n except ZeroDivisionError:\r\n return a * cosh(x/float(a+0.1))\r\n \r\ndef graphPoints(pointList):\r\n '''Graphs points in pointList using segments'''\r\n for i,pt in enumerate(pointList):\r\n if i Tuple[np.ndarray, np.ndarray]:\n \"\"\"Generates batch samples of representations and factors.\n\n Args:\n dataset (BaseDataset): Dataset class.\n repr_fn (callable): Function that takes observation as input and\n outputs a representation.\n batch_size (int, optional): Batch size to sample points.\n num_points (int, optional): Number of samples.\n\n Returns:\n reprs (np.array): Represented latents `(num_points, num_latents)`\n factors (np.array): True factors `(num_points, num_factors)`\n \"\"\"\n\n reprs = []\n factors = []\n for i in range(num_points // batch_size + 1):\n # Calculate batch size\n batch_iter = min(num_points - batch_size * i, batch_size)\n\n # Sample fixed factor and observations\n factor_index = dataset.sample_factor_index()\n data, targets = dataset.sample_fixed_batch(batch_iter, factor_index)\n\n # Representation\n rep = repr_fn(data)\n\n # Add repr and target to list\n reprs.append(rep)\n factors.append(targets)\n\n return np.vstack(reprs), np.vstack(factors)\n\n\ndef discretize_target(target: Union[np.ndarray, Tensor],\n num_bins: int) -> np.ndarray:\n \"\"\"Discretizes targets.\n\n Args:\n target (np.ndarray or torch.Tensor): Targets of shape\n `(num_points, num_latents)`.\n num_bins (int): Number of bins.\n\n Returns:\n discretized (np.array): Discretized targets of shape\n `(num_points, num_latents)`.\n \"\"\"\n\n discretized = np.zeros_like(target)\n for i in range(target.shape[0]):\n discretized[i] = np.digitize(\n target[i], np.histogram(target[i], num_bins)[1][:-1])\n\n return discretized\n\n\ndef discrete_mutual_info(mus: Union[np.ndarray, Tensor],\n ys: Union[np.ndarray, Tensor]) -> np.ndarray:\n \"\"\"Discrete Mutual Information for all code-factor pairs.\n\n Args:\n mus (np.ndarray or torch.Tensor): Mean representation vector of shape\n `(num_samples, num_codes)`.\n ys (np.ndarray or torch.Tensor): True factor vector of shape\n `(num_samples, num_factors)`.\n\n Returns:\n mi (np.ndarray): MI matrix of shape `(num_codes, num_factors)`.\n \"\"\"\n\n num_codes = mus.shape[1]\n num_factors = ys.shape[1]\n\n mi = np.zeros((num_codes, num_factors))\n for i in range(num_codes):\n for j in range(num_factors):\n mi[i, j] = sklearn.metrics.mutual_info_score(ys[:, j], mus[:, i])\n return mi\n\n\ndef discrete_entropy(ys: Union[np.ndarray, Tensor]) -> np.ndarray:\n \"\"\"Discrete Mutual Information for all code-factor pairs.\n\n Args:\n ys (np.ndarray or torch.Tensor): Vector of shape\n `(num_samples, num_factors)`.\n\n Returns:\n h (np.ndarray): Entropy vector of shape `(num_factors)`.\n \"\"\"\n\n num_factors = ys.shape[1]\n h = np.zeros(num_factors)\n for i in range(num_factors):\n h[i] = sklearn.metrics.mutual_info_score(ys[:, i], ys[:, i])\n return h\n","sub_path":"dgmvae/metrics/util_funcs.py","file_name":"util_funcs.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535128697","text":"\"\"\"Queue configuration\n======================\n\n.. highlight:: cfg\n\nSparkplug can automatically declare and configure queues on startup::\n\n [queue:events]\n # Will the queue be declared as durable, and survive broker restarts?\n durable = True\n # Will the queue be declared as auto-deleted, and be removed if all\n # consumers exit?\n auto_delete = False\n # Is the queue exclusive to this program?\n exclusive = False\n\nQueues that are used by a consumer__ must be part of sparkplug's configuration.\nHowever, if the queue is expected to already exist, it's sufficient to mark\nthe queue as passive::\n\n [queue:expected]\n passive = True\n\n__ `Consumer configuration`_\n\"\"\"\n\nfrom sparkplug.config import DependencyConfigurer\nfrom sparkplug.config.types import convert, parse_bool, parse_dict\nfrom sparkplug.logutils import LazyLogger\n\n_log = LazyLogger(__name__)\n\n\nclass QueueConfigurer(DependencyConfigurer):\n def __init__(self, name, **kwargs):\n DependencyConfigurer.__init__(self)\n\n self.queue = name\n\n create_args = dict(kwargs)\n convert(create_args, 'durable', parse_bool)\n convert(create_args, 'auto_delete', parse_bool)\n convert(create_args, 'exclusive', parse_bool)\n convert(create_args, 'passive', parse_bool)\n convert(create_args, 'arguments', parse_dict)\n self.create_args = create_args\n\n dlx = create_args \\\n .get('arguments', {}) \\\n .get('x-dead-letter-exchange', None)\n if dlx:\n self.depends_on(dlx)\n\n def start(self, channel):\n _log.debug(\"Declaring queue %s (%r)\", self.queue, self.create_args)\n\n channel.queue_declare(queue=self.queue, **self.create_args)\n\n def __repr__(self):\n return \"Queue(queue={0.queue})\".format(self)\n","sub_path":"sparkplug/config/queue.py","file_name":"queue.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281048729","text":"import gym \nimport numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom paiutils.reinforcement import (\n DQNAgent, StochasticPolicy, GreedyPolicy,\n ExponentialDecay, RingMemory, Memory, GymWrapper\n)\nfrom paiutils.reinforcement_agents import (\n A2CAgent\n)\nfrom paiutils.neural_network import (\n dense\n)\n\n\ndef create_amodel(state_shape, action_shape):\n inputs = keras.layers.Input(shape=state_shape)\n x = dense(128)(inputs)\n x = dense(128)(x)\n outputs = dense(action_shape[0], activation='softmax',\n batch_norm=False)(x)\n \n amodel = keras.Model(inputs=inputs,\n outputs=outputs)\n amodel.compile(optimizer=keras.optimizers.Adam(.003),\n loss='mse', experimental_run_tf_function=False)\n amodel.summary()\n return amodel\n\n\ndef create_qmodel(state_shape, action_shape):\n inputs = keras.layers.Input(shape=state_shape)\n x = dense(64)(inputs)\n x1 = dense(64)(x)\n x2 = dense(64)(x)\n #outputs = keras.layers.Dense(action_shape[0])(x)\n outputs = DQNAgent.get_dueling_output_layer(action_shape, \n dueling_type='avg')(x1, x2)\n qmodel = keras.Model(inputs=inputs,\n outputs=outputs)\n qmodel.compile(optimizer=keras.optimizers.Adam(.01),\n loss='mae', experimental_run_tf_function=False)\n qmodel.summary()\n return qmodel\n\n\ndef create_cmodel(state_shape):\n inputs = keras.layers.Input(shape=state_shape)\n x = dense(64)(inputs)\n x = dense(64)(x)\n outputs = keras.layers.Dense(1)(x)\n\n cmodel = keras.Model(inputs=inputs,\n outputs=outputs)\n cmodel.compile(optimizer=keras.optimizers.Adam(.01),\n loss='mse', experimental_run_tf_function=False)\n cmodel.summary()\n return cmodel\n\n\nif __name__ == '__main__':\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True \n sess = tf.compat.v1.Session(config=config)\n\n # Solved = Undefined ~= -42.37 avg. reward over 100 episodes\n\n solved = -42.37\n save_dir = ''\n env = gym.make('Acrobot-v1')\n max_steps = env._max_episode_steps # (500)\n env = GymWrapper(env, (6,), (3,))\n\n agents_to_use = ['DQN', 'A2C']\n agent_to_use = agents_to_use[0]\n\n if agent_to_use == 'DQN':\n policy = StochasticPolicy(\n GreedyPolicy(), ExponentialDecay(1, .001, .01),\n .01, env.action_shape[0]\n )\n qmodel = create_qmodel(env.state_shape, env.action_shape)\n agent = DQNAgent(policy, qmodel, .99,\n create_memory=lambda: RingMemory(200000),\n enable_target=True, enable_double=False, \n enable_PER=False)\n\n agent.set_playing_data(training=False, memorizing=True)\n env.play_episodes(agent, 200, max_steps, random=True,\n verbose=True, episode_verbose=False,\n render=False)\n\n agent.set_playing_data(training=True, memorizing=True, \n learns_in_episode=False, batch_size=32, \n mini_batch=10000, epochs=1,\n verbose=True, target_update_interval=1, tau=.01)\n for ndx in range(1):\n print(f'Save Loop: {ndx}')\n env.play_episodes(agent, 1, max_steps,\n verbose=True, episode_verbose=False,\n render=True)\n result = env.play_episodes(agent, 19, max_steps,\n verbose=True, episode_verbose=False,\n render=False)\n agent.save(save_dir, note=f'DQN_{ndx}_{result}')\n if result >= solved:\n break\n\n agent.set_playing_data(training=False, memorizing=False)\n avg = env.play_episodes(agent, 100, max_steps,\n verbose=True, episode_verbose=False,\n render=False)\n print(len(agent.states))\n print(avg)\n elif agent_to_use == 'A2C':\n amodel = create_amodel(env.state_shape, env.action_shape)\n cmodel = create_cmodel(env.state_shape)\n agent = A2CAgent(amodel, cmodel, .99, lambda_rate=.95,\n create_memory=lambda: RingMemory(200000))\n\n agent.set_playing_data(training=False, memorizing=True)\n env.play_episodes(agent, 200, max_steps, random=True,\n verbose=True, episode_verbose=False,\n render=False)\n\n agent.set_playing_data(training=True, memorizing=True,\n batch_size=64, mini_batch=10000, epochs=1,\n entropy_coef=0.0,\n verbose=True)\n for ndx in range(18):\n print(f'Save Loop: {ndx}')\n env.play_episodes(agent, 1, max_steps,\n verbose=True, episode_verbose=False,\n render=True)\n result = env.play_episodes(agent, 19, max_steps,\n verbose=True, episode_verbose=False,\n render=False)\n agent.save(save_dir, note=f'A2C_{ndx}_{result}')\n if result >= solved:\n break\n\n agent.set_playing_data(training=False, memorizing=False)\n avg = env.play_episodes(agent, 100, max_steps,\n verbose=True, episode_verbose=False,\n render=False)\n print(len(agent.states))\n print(avg)","sub_path":"video14/acrobot_example.py","file_name":"acrobot_example.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"261667499","text":"# -*- coding: utf-8 -*- \n\"\"\"\n Created with IntelliJ IDEA.\n Description:\n User: jinhuichen\n Date: 2/13/2019 2:44 PM \n Description: \n\"\"\"\nimport numpy as np\nimport simpleflow as sf\nimport matplotlib.pyplot as plt\n\n# # Create a graph\n# with sf.Graph().as_default():\n# a = sf.constant(1.0, name='a')\n# b = sf.constant(2.0, name='b')\n# result = sf.add(a, b, name='a+b')\n# c = sf.constant(3.0, name=\"c\")\n# result = sf.multiply(result, c, name=\"c * (a + b)\")\n# # Create a session to run the graph\n# with sf.Session() as sess:\n# print(sess.run(result))\n\n\ninput_x = np.linspace(-1, 1, 100, dtype=np.float32)[:, np.newaxis]\nnoise = np.random.normal(0, 0.25, input_x.shape).astype(np.float32)\ninput_y = input_x * 3 + 0.5 + noise\n# Placeholders for training data\nx = sf.Placeholder(name=\"X值\")\ny_ = sf.Placeholder(name=\"Y值\")\n\n# Weigths\nw = sf.Variable([[1.0]], name='weight')\n\n# Threshold\nb = sf.Variable(0.1, name='threshold')\n\n# Predicted class by model\n# y = x*w + b\ny = sf.matmul(x, w, name=\"x.dot(w)\") + b\n\nloss = sf.MSELoss(y_, y)()\n\ntrain_op = sf.GradientDescentOptimizer(learning_rate=0.005).minimize(loss)\n\n# plot the real data\n# fig = plt.figure()\n# ax = fig.add_subplot(1, 1, 1)\n# ax.scatter(np.reshape(input_x, (-1, 1)), np.reshape(input_y, (-1, 1)))\n# plt.ion()\n# plt.show()\n\nfeed_dict = {x: input_x, y_: input_y}\nwith sf.Session() as sess:\n for step in range(200):\n # 迭代训练\n sess.run(train_op, feed_dict)\n\n # 画图\n # if step % 10 == 0:\n # try:\n # ax.lines.remove(lines[0])\n # except Exception:\n # pass\n # prediction_value = sess.run(y, feed_dict=feed_dict)\n # # plot the prediction\n # lines = ax.plot(input_x, prediction_value, 'r-', lw=5)\n # plt.pause(1)\n w_value = sess.run(w, feed_dict=feed_dict)\n b_value = sess.run(b, feed_dict=feed_dict)\n print('w: {}, b: {}'.format(w_value, b_value))","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525435457","text":"\"\"\"This module implements class that represents the notification entity.\"\"\"\n\nfrom datetime import date\n\nfrom django.db import models, IntegrityError\nfrom django.db.utils import OperationalError\n\nfrom utils.abstract_models import AbstractModel\nfrom utils.loggerhelper import LOGGER\nfrom way.models import Way\n\n\nclass Notification(AbstractModel):\n \"\"\"Model for Notification entity.\"\"\"\n way = models.ForeignKey(Way, on_delete=models.CASCADE, related_name='notifications')\n start_time = models.DateField()\n end_time = models.DateField()\n week_day = models.PositiveSmallIntegerField()\n time = models.TimeField()\n\n def __str__(self):\n \"\"\"Method that returns route instance as string.\"\"\"\n return f'notification at: {self.week_day} {str(self.time)}'\n\n def to_dict(self):\n \"\"\"Method that returns dict with object's attributes.\"\"\"\n return {\n 'id': self.id,\n 'start_time': self.start_time,\n 'end_time': self.end_time,\n 'week_day': self.week_day,\n 'time': self.time,\n 'way': self.way_id\n }\n\n def is_for_today(self):\n \"\"\"Return `True` if the notification was scheduled for today.\"\"\"\n today = date.today()\n\n if not today.weekday() == self.week_day:\n return False\n\n if not self.start_time <= today <= self.end_time:\n return False\n\n return True\n\n @classmethod\n def create(cls, way, start_time, end_time, week_day, time): # pylint: disable=arguments-differ\n \"\"\"Method for object creation.\"\"\"\n notification = cls()\n notification.start_time = start_time\n notification.end_time = end_time\n notification.week_day = week_day\n notification.time = time\n\n try:\n notification.way = way\n notification.save()\n return notification\n except (ValueError, IntegrityError, OperationalError) as err:\n LOGGER.error(f'Unsuccessful notification creating. {err}')\n\n @classmethod\n def get_expired(cls):\n \"\"\"Retrieve all notifications with expired datetime.\"\"\"\n today = date.today()\n expired_notifications = cls.objects.filter(end_time__lt=today)\n\n return expired_notifications\n\n @classmethod\n def get_today_scheduled(cls):\n \"\"\"Retrieve notifications that are scheduled for today.\"\"\"\n today = date.today()\n notifications = cls.objects.filter(\n week_day=today.weekday(),\n start_time__lte=today,\n end_time__gte=today,\n )\n\n return notifications\n","sub_path":"way_to_home/notification/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"27208329","text":"from django.conf.urls import url, include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index, name='index'),\n\n url(r'^news/$', views.news, name='news'),\n url(r'^news/(?P[\\w-]+)/$', views.news, name='news'),\n\n url(r'^(?P[\\w-]+)/stock/$', views.stock, name='stock'),\n url(r'^(?P[\\w-]+)/stock/(?P[\\w-]+)/$', views.stock, name='stock'),\n\n url(r'^(?P[\\w-]+)/program/$', views.program, name='program'),\n url(r'^(?P[\\w-]+)/program/(?P[\\w-]+)/$', views.program, name='program'),\n\n # url(r'^(?P[\\w-]+)/topfitness/$', views.fitness, name='fitness'),\n url(r'^(?P[\\w-]+)/schedule/(?P[\\w-]+)$', views.schedule, name=\"schedule\"),\n # url(r'^(?P[\\w-]+)/trainers/$', views.trainers, name=\"trainers\"),\n url(r'^(?P[\\w-]+)/comments/$', views.comments, name=\"comments\"),\n # url(r'^(?P[\\w-]+)/about/$', views.about, name=\"about\"),\n url(r'^(?P[\\w-]+)/contacts/$', views.contacts, name=\"contacts\"),\n url(r'^(?P[\\w-]+)/call/$', views.call, name='call'),\n url(r'^call/$', views.call, name='call'),\n url(r'^(?P[\\w-]+)/entry/$', views.entry, name='entry'),\n url(r'^(?P[\\w-]+)/abonement/$', views.abonement, name='abonement'),\n\n url(r'^pages/(?P[\\w-]+)/$',views.page,name='page'),\n\n url(r'^(?P[\\w-]+)/$', views.index, name='index'),\n\n] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n","sub_path":"src/mainapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48785087","text":"import MySQLdb, urllib, json, time\nfrom collections import OrderedDict\n\n'''\nThese global variables are need to configure your current environment to run\nthe WebTracker\n'''\n\n\nhost = '127.0.0.1'\nuser = 'wordpressuser739'\npassword = 't2[%Ch8lFw5T'\ndatabasename = 'wordpress739'\nmap_id = 1797\npost_title = 'Race Tracker'\nimage_root = 'http://www.blueskysolar.utoronto.ca/wordpress/wp-content/uploads/2015/10/'\nurl = 'http://www.worldsolarchallenge.org/api/positions'\nname = 'Blue Sky Solar Racing'\n\n'''\n#Sean's Computer\nhost = 'localhost'\nuser = 'root'\npassword = ''\ndatabasename = 'wordpress4'\nmap_id = 6\nimage_root = 'http://localhost/wordpressblue2/wp-content/uploads/2015/10/'\npost_title = 'Test'\n'''\n'''\n#nick's Computer\n\nhost = '127.0.0.1'\nuser = 'root'\npassword = ''\ndatabasename = 'wordpress'\nmap_id = 4\nurl = 'http://www.worldsolarchallenge.org/api/positions'\nname = 'Blue Sky Solar Racing'\npost_title = 'i fkin love darwin'\n'''\n\ncontrol_stop = [{'name': 'Control Stop 1', 'description': 'Katherine', 'lat': -14.4666667, 'lng': 132.2666667},\n {'name': 'Control Stop 2', 'description': 'Dunmarra', 'lat': -16.67983333, 'lng': 133.41188889},\n {'name': 'Control Stop 3', 'description': 'Tennant Creek', 'lat': -19.65775, 'lng': 134.1885},\n {'name': 'Control Stop 4', 'description': 'Barow Creek', 'lat': -21.5319444, 'lng': 133.8888889},\n {'name': 'Control Stop 5', 'description': 'Alice Springs', 'lat': -23.70861111, 'lng': 133.87555556},\n {'name': 'Control Stop 6', 'description': 'Kulgera', 'lat': -25.83911111, 'lng': 133.31572222},\n {'name': 'Control Stop 7', 'description': 'Coober Pedy', 'lat': -29.01105556, 'lng': 134.75466667},\n {'name': 'Control Stop 8', 'description': 'Glendambo', 'lat': -30.96986111, 'lng': 135.749},\n {'name': 'Control Stop 9', 'description': 'Port Augusta', 'lat': -32.50919444, 'lng': 137.79672222}\n ]\n\n\nstart_end = [{'name': 'Start: Darwin', 'description': 'State Square, Darwin', 'lat': -12.46305556, 'lng': 130.83780556},\n {'name': 'Finish: Adelaide', 'description': 'Victoria Square, Adelaide ', 'lat': -34.9306000, 'lng': 138.6042000}\n ]\n\nurl = 'http://www.worldsolarchallenge.org/api/positions'\nname = 'Blue Sky Solar Racing'\n\ndef connect_database(host, user, password, databasename):\n '''\n Takes database info and connects to the MySQL database\n '''\n \n db = MySQLdb.connect(host = host,\n user = user,\n passwd = password,\n db = databasename)\n \n return db\n\n\ndef parseCars(url):\n '''\n Takes url and parses the cars using .json\n '''\n \n roster = urllib.urlopen(url) #fetch from url\n roster = json.load(roster) #parse into JSON\n \n roster = [i for i in roster if i[\"class_id\"] == 5] #only want challengers\n \n \n \n return roster\n\ndef blue_last(roster):\n '''\n Modifies roster and returns it with blue sky solar at the end\n '''\n \n for i in range(len(roster)):\n if roster[i]['name'] == name:\n roster[-1], roster[i] = roster[i], roster[-1]\n \n return roster \n\ndef find_blue_remaining(roster):\n for i in range(len(roster)):\n if roster[i]['name'] == name:\n return roster[i]['dist_adelaide']\n \n\ndef edit_map(value, column):\n '''\n Function that will change a 'column' of wp_postmeta to 'value'\n '''\n \n cur.execute('''\n UPDATE wp_postmeta\n SET meta_value = %s\n WHERE meta_key = %s\n ''', (value, column)) \n\ndef edit_scrollbar(value, column):\n cur.execute('''\n UPDATE wp_posts\n SET post_content = %s\n WHERE post_title = %s\n ''', (value, column)) \n\ndef create_scrollbar(roster, map_id):\n string = '''\n
\n [google_maps id=\"%s\"] \n
\n\n
''' % (map_id)\n \n flagDict = {\n 'ca':('Canada','canadian-flag.jpg'),\n \t'us':('United States of America','american-flag.jpg'),\n \t'ph':('Phillipines','filipino-flag.jpg'),\n \t'nl':('Netherlands','dutch-flag.jpg'),\n \t'cn':('China','chinese-flag.jpg'),\n \t'jp':('Japan','japanese-flag.jpg'),\n \t'hk':('Hong Kong','chinese-flag.jpg'),\n \t'de':('Germany','german-flag.jpg'),\n \t'nz':('New Zealand','new-zealander-flag.jpg'),\n \t'sg':('Singapore','singaporean-flag.jpg'),\n \t'au':('Australia','australian-flag.jpg'),\n \t'id':('Indonesia','indonesian-flag.jpg'),\n \t'tr':('Turkey','turkish-flag.jpg'),\n \t'my':('Malaysia','malaysian-flag.jpg'),\n \t'it':('Italy','italian-flag.jpg'),\n \t'be':('Belgium','belgian-flag.jpg'),\n \t'co':('Colombia','colombian-flag.jpg'),\n \t'tw':('Taiwan','taiwan-flag.jpg'),\n \t'se':('Sweden','swedish-flag.jpg'),\n 'ch':('Switzerland','swiss-flag.jpg'),\n 'cl':('Chile','chilean-flag.jpg'),\n 'kr':('South Korea','south-korean-flag.jpg'),\n 'gb':('Great Britain','great-britan-flag.jpg')\n }\n\n \n #create box for bssr at the top of the leaderboard. it will be distinct as the first child of the leaderboard\n \n #first we need to gather information about blue sky solar racing, find the index of our team\n for j in range(len(roster)):\n if (roster[j]['name'] == \"Blue Sky Solar Racing\"):\n bssrIndex = j\n \n #now that we have the index we can extract information about it. \n string += '''\n
\n

%s - Blue Sky Solar Racing Team

\n

To Adelaide: %skm

\n
\"%s\"
\n
''' % \\\n (bssrIndex + 1,\n distance_shorten(roster[bssrIndex]['dist_adelaide']),\n image_root + \"canadian-flag.jpg\",\n \"Canada\")\n \n #create boxes for other teams in the leaderboard, including bssr \n for i in range(len(roster)):\n curCar = roster[i]\n curCarCountry = roster[i]['country']\n if curCarCountry in flagDict:\n string += create_team_scrollbar(curCar, i, find_blue_remaining(roster), flagDict[curCarCountry][0], (image_root + flagDict[curCarCountry][1]))\n else:\n string += create_team_scrollbar(curCar, i, find_blue_remaining(roster), 'not found', 'not found')\n \n string += '
'\n \n return string\n\n\ndef create_markers(roster, blue_remaining):\n '''\n Creates all the markers that will show on the map usign syntax that the\n GMB plugin understands\n '''\n \n \n string = \"\"\"a:%s:{\"\"\" % \\\n (str(len(roster)+len(control_stop)+len(start_end)))\n \n for i in range(len(roster) - 1): #for loop to add all teams\n team_string = \"\"\"i:%s;a:8:%s\"\"\" % \\\n (str(i),\n create_team(roster[i], blue_remaining))\n \n string += team_string\n \n string += \"\"\"i:%s;a:8:\"\"\" % \\\n (str(len(roster)-1)) + \\\n create_blue(roster[-1]) #special function to add BSS\n \n for i in range(len(control_stop)):\n control_string = \"\"\"i:%s;a:8:%s\"\"\" % \\\n ((len(roster) + i),\n create_control(control_stop[i]))\n \n string += control_string\n \n for i in range(len(start_end)):\n start_end_string = \"\"\"i:%s;a:8:%s\"\"\" % \\\n ((len(roster) + len(control_stop) + i),\n create_start_end(start_end[i]))\n \n string += start_end_string\n \n string += '}'\n return string\n\ndef create_blue(data):\n '''\n Function to create the BSS marker\n '''\n \n description = generate_description(data, 0, True)\n \n string = \"\"\"{s:5:\"title\";s:%s:\"%s\";s:11:\"description\";s:%s:\"%s\";s:9:\"reference\";s:0:\"\";s:12:\"hide_details\";b:0;s:3:\"lat\";s:%s:\"%s\";s:3:\"lng\";s:%s:\"%s\";s:6:\"marker\";s:108:\"{ path : MAP_PIN, fillColor : \"#428BCA\", fillOpacity : 1, strokeColor : \"\", strokeWeight: 0, scale : 1 / 3 }\";s:5:\"label\";s:0:\"\";}\"\"\" % \\\n (str(len(data['name'])), \n data['name'], \n \n len(description),\n description,\n \n str(len(str(data['lat']))), \n str(data['lat']), \n \n str(len(str(data['lng']))), \n str(data['lng']))\n \n return string \n\ndef create_team(data, blue_remaining):\n '''\n Function to create a generaic team\n '''\n \n description = generate_description(data, blue_remaining, False)\n\n string = \"\"\"{s:5:\"title\";s:%s:\"%s\";s:11:\"description\";s:%s:\"%s\";s:9:\"reference\";s:0:\"\";s:12:\"hide_details\";b:0;s:3:\"lat\";s:%s:\"%s\";s:3:\"lng\";s:%s:\"%s\";s:6:\"marker\";s:0:\"\";s:5:\"label\";s:0:\"\";}\"\"\" % \\\n (str(len(data['name'])), \n data['name'], \n \n len(description),\n description,\n \n str(len(str(data['lat']))), \n str(data['lat']), \n \n str(len(str(data['lng']))), \n str(data['lng']))\n \n return string\n\ndef create_control(data):\n '''\n Function to create a generaic control stop\n '''\n\n string = \"\"\"{s:5:\"title\";s:%s:\"%s\";s:11:\"description\";s:%s:\"%s\";s:9:\"reference\";s:0:\"\";s:12:\"hide_details\";b:0;s:3:\"lat\";s:%s:\"%s\";s:3:\"lng\";s:%s:\"%s\";s:6:\"marker\";s:111:\"{ path : SQUARE_PIN, fillColor : \"#428BCA\", fillOpacity : 1, strokeColor : \"\", strokeWeight: 0, scale : 1 / 3 }\";s:5:\"label\";s:100:\"\";}\"\"\" % \\\n (str(len(data['name'])), \n data['name'], \n \n str(len(str(data['description']))), \n str(data['description']),\n \n str(len(str(data['lat']))), \n str(data['lat']), \n \n str(len(str(data['lng']))), \n str(data['lng']))\n \n return string\n\ndef create_start_end(data):\n '''\n Function to create a generaic control stop\n '''\n \n string = \"\"\"{s:5:\"title\";s:%s:\"%s\";s:11:\"description\";s:%s:\"%s\";s:9:\"reference\";s:0:\"\";s:12:\"hide_details\";b:0;s:3:\"lat\";s:%s:\"%s\";s:3:\"lng\";s:%s:\"%s\";s:6:\"marker\";s:111:\"{ path : SQUARE_PIN, fillColor : \"#428BCA\", fillOpacity : 1, strokeColor : \"\", strokeWeight: 0, scale : 1 / 3 }\";s:5:\"label\";s:100:\"\";}\"\"\" % \\\n (str(len(data['name'])), \n data['name'], \n \n str(len(str(data['description']))), \n str(data['description']),\n \n str(len(str(data['lat']))), \n str(data['lat']), \n \n str(len(str(data['lng']))), \n str(data['lng']))\n \n return string \n\ndef create_team_scrollbar(data, place, blue_remaining, carCountryFull, carCountryImg):\n if carCountryFull != 'not found':\n string = '''\n
\n

%s - %s

\n

To Adelaide: %skm

\n

From Blue Sky %skm

\n

Country: %s

\n
\"%s\"
\n
''' % \\\n (str(place + 1),\n data['name'],\n distance_shorten(data['dist_adelaide']),\n distance_shorten(data['dist_adelaide'] - blue_remaining),\n carCountryFull,\n carCountryImg,\n carCountryFull)\n \n else:\n string = '''\n
\n

%s - %s

\n

To Adelaide: %skm

\n

From Blue Sky %skm

\n \n
''' % \\\n (str(place + 1),\n data['name'],\n distance_shorten(data['dist_adelaide']),\n distance_shorten(data['dist_adelaide'] - blue_remaining)\n )\n \n return string\n \n \ndef place_string(number):\n number = str(number + 1)\n \n if number[-1] == 1:\n return number + 'st'\n if number[-1] == 2:\n return number + 'nd'\n if number[-1] == 3:\n return number + 'rd'\n return number + 'th'\n \ndef distance_shorten(distance):\n distance = str(distance)\n distance = distance[:distance.find('.')]\n return distance\n\ndef generate_description(data, blue_remaining, blue):\n '''\n Function to generate box description for each team\n '''\n \n if not blue:\n desc = '''Distance from Darwin: %skm
Distance to Adelaide: %skm
Distance from Blue Sky Solar Racing: %skm''' % \\\n (data['dist_darwin'],\n data['dist_adelaide'],\n data['dist_adelaide'] - blue_remaining)\n return desc\n \n desc = '''Distance from Darwin: %skm
Distance to Adelaide: %skm''' % \\\n (data['dist_darwin'],\n data['dist_adelaide']) \n return desc\n\n\ndef set_center(data):\n '''\n Function to set the pan to where BSS is\n '''\n \n string = '''a:2:{s:8:\"latitude\";s:%s:\"%s\";s:9:\"longitude\";s:%s:\"%s\";}''' % \\\n (str(len(str(data['lat']))),\n str(data['lat']),\n \n str(len(str(data['lng']))),\n str(data['lng']))\n \n edit_map(string, 'gmb_lat_lng')\n\n\ndef getFinishedTeamsRoster(roster):\n '''\n looks to see if any teams are finished and adds them to the beginning of the roster\n '''\n\n finishedFile = open(\"finishedTeams.txt\",\"r\") #load the file that shows the teams that finished the race\n \n finishedTeams = json.load(finishedFile) #..and parse them into an array of dictionaries\n \n roster = finishedTeams + roster #combine the finished teams and the roster\n roster = remove_repeats(roster) #removing any repeated terms\n finishedFile.close()\n \n \n finishedTeams = [] #reseting finished teams\n for i in range(len(roster)): #interating through the roster\n if roster[i]['dist_adelaide'] < 3: #searching for finished teams\n finishedTeams.append(roster[i])\n \n \n \n \n finishedFile = open(\"finishedTeams.txt\", \"w\") #rewriting the finishedTeams file\n finishedFile.write(json.dumps(finishedTeams)) #you do not need to truncate the file before re writing the list of dictionaries because opening in w+ mode does so.\n \n finishedFile.close()\n \n return roster\n\n\ndef remove_repeats(seq):\n for i in seq:\n occured = False\n for j in seq:\n if i == j:\n if occured:\n seq.remove(j)\n continue\n else:\n occured = True\n \n \n return seq\n \n\nif __name__ == \"__main__\":\n \n db = connect_database(host, user, password, databasename)\n cur = db.cursor()\n \n #roster = parseCars(url)\n #bypass mode\n roster = open(\"positions.txt\", \"r\") #fetch from url\n roster = json.load(roster) #parse into JSON\n \n roster = [i for i in roster if i[\"class_id\"] == 5] #only want challengers\n\n \n roster = getFinishedTeamsRoster(roster)\n scrollbar = create_scrollbar(roster, map_id)\n\n \n edit_scrollbar(scrollbar, post_title)\n\n roster = blue_last(roster)\n\n set_center(roster[-1])\n\n markers = create_markers(roster, roster[-1]['dist_adelaide'])\n\n \n \n edit_map(markers, 'gmb_markers_group')\n \n\n \n \n \n \n db.commit()\n \n cur.close()\n \n \n","sub_path":"Map.py","file_name":"Map.py","file_ext":"py","file_size_in_byte":15514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"581162151","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jul 28 20:43:55 2016\n\n@author: Stranger\n\"\"\"\n\nclass Solution(object):\n def combinationSum(self, candidates, target):\n result = []\n candidates = sorted(candidates)\n def dfs(remain, stack):\n if remain == 0:\n result.append(stack)\n return \n\n for item in candidates:\n if item > remain: break\n if stack and item < stack[-1]: continue\n else:\n dfs(remain - item, stack + [item])\n \n dfs(target, [])\n return result\nif __name__ == '__main__':\n a = Solution()\n nums = [5,5,10,2,3]\n b = a.combinationSum(nums,15)\n","sub_path":"Strangerbai_algorithm/data structure/combinationSum.py","file_name":"combinationSum.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150890364","text":"from flask import Flask, render_template, request, redirect, flash\napp = Flask(__name__)\napp.secret_key = \"key\"\n\nvalid = True\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/results', methods=['POST'])\ndef results():\n content = {\n \"name\" : request.form['name'],\n \"location\" : request.form['location'],\n \"language\" : request.form['language'],\n \"comment\" : request.form['comment']\n }\n\n if len(content['name']) < 1:\n flash(\"Please enter a name!\")\n return redirect('/')\n if len(content['comment']) < 1:\n flash(\"Please add a comment!\")\n return redirect('/')\n elif len(content['comment']) > 120:\n flash(\"You'r comment is too long!\")\n return redirect('/')\n if valid:\n return render_template(\"results.html\", content = content)\n\n\napp.run(debug=True)\n","sub_path":"dylan_eckert/flask_fund/dojo_survey/dojosurvey.py","file_name":"dojosurvey.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"14981027","text":"from odoo import models, fields, api, _\nfrom odoo.tools.misc import formatLang\n\n\nclass AccountFinancialReportLine(models.Model):\n _inherit = \"account.financial.html.report.line\"\n\n calculation_mab = {\n 'net_sales': 0.0,\n # 'total_expenses': 0.0,\n # 'total_interest': 0.0,\n # 'total_general_tax': 0.0,\n # 'total_other_income': 0.0,\n }\n @api.multi\n def _get_lines(self, financial_report, currency_table, options, linesDicts):\n res = super(AccountFinancialReportLine, self)._get_lines(financial_report, currency_table, options, linesDicts)\n if financial_report.name in ['Income Statement', 'الأرباح و الخسائر']:\n for line in res:\n for i, col in enumerate(line['columns']):\n if i > 0:\n break\n field_name = ''\n if 'no_format_name' in col:\n field_name = 'no_format_name'\n elif 'name' in col and col['name'] and (type(col['name']) == float):\n field_name = 'name'\n if field_name:\n if line['name'] == 'Sales':\n self.calculation_mab['net_sales'] = float(col[field_name])\n # elif line['name'] in ['الإجمالي Expenses', 'Total Expenses']:\n # self.calculation_mab['total_expenses'] = float(col[field_name])\n # elif line['name'] in ['الإجمالي Interest', 'Total Interest']:\n # self.calculation_mab['total_interest'] = float(col[field_name])\n # elif line['name'] in ['الإجمالي General Tax', 'Total General Tax']:\n # self.calculation_mab['total_general_tax'] = float(col[field_name])\n # elif line['name'] in ['الإجمالي Other Income', 'Total Other Income']:\n # self.calculation_mab['total_other_income'] = float(col[field_name])\n\n for line in res:\n for i, col in enumerate(line['columns']):\n line_name = ''\n if line['name'] in [\n 'الدخل التشغيلي', 'Net Sales', 'تكاليف الدخل', 'Cost of Goods Sold (COGS)',\n 'الإجمالي إجمالي الربح', 'Total Gross Profit', 'الإجمالي الدخل', 'Total Income',\n 'الإجمالي المصروفات', 'Total Expenses and Depreciation', 'صافي الربح',\n 'Net Profit Before Interest', 'الإجمالي Interest', 'Total Interest',\n 'Net Profit Before Tax', 'الإجمالي General Tax', 'Total General Tax',\n 'الإجمالي Other Income', 'Total Other Income', 'Net Profit after Tax'\n ] or line['name'] in [\n 'Salaries Expense', 'Admin Salaries Expenses', 'Admin Expenses',\n 'Distribution Expenses', 'Hr Expenses', 'Regestration Expenses', 'Marketing Expenses',\n 'Other Marketing Expenses', 'Factory expenses', 'الإجمالي Expenses', 'Total Expenses',\n 'إستهلاك', 'Depreciation'\n ] or line['name'] in [\n 'Bank Interest', 'Capital Interest'\n ] or line['name'] in [\n 'TAX'\n ] or line['name'] in [\n 'Other Income'\n ] or line['name'] in [\n 'Sales', 'Discount', 'Gain & Loss from other investment', 'Total Net Sales'\n ]:\n line_name = 'net_sales'\n # elif line['name'] in [\n # 'Salaries Expense', 'Admin Salaries Expenses', 'Admin Expenses',\n # 'Distribution Expenses', 'Hr Expenses', 'Regestration Expenses', 'Marketing Expenses',\n # 'Other Marketing Expenses', 'Factory expenses', 'الإجمالي Expenses', 'Total Expenses',\n # 'إستهلاك', 'Depreciation'\n # ]:\n # line_name = 'total_expenses'\n # elif line['name'] in ['Bank Interest', 'Capital Interest']:\n # line_name = 'total_interest'\n # elif line['name'] in ['TAX']:\n # line_name = 'total_general_tax'\n # elif line['name'] in ['Other Income']:\n # line_name = 'total_other_income'\n\n if line_name:\n if 'no_format_name' in col:\n if self.calculation_mab[line_name] == 0.0:\n col['percent'] = ' 0.00 %'\n else:\n col['percent'] = ' ' + '{0:,.2f}'.format(100 * (float(col['no_format_name']) / self.calculation_mab[line_name])) + ' % '\n elif self._context.get('print_mode') and self._context.get('no_format') and not self._context.get('prefetch_fields') and (col['name'] or col['name'] == 0.0):\n if type(col['name']) is float:\n if self.calculation_mab[line_name] == 0.0:\n col['col_name'] = '0.00%'\n else:\n col['col_name'] = '{0:,.2f}'.format(100 * (float(col['name']) / self.calculation_mab[line_name])) + '%'\n return res\n","sub_path":"bi_pl_report_customization/models/account_financial_report.py","file_name":"account_financial_report.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105318875","text":"import matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab \n\n\ndef factorial(N):\n if n == 0:\n return 1\n else:\n return n * factorial(n - 1)\n\n\n# Data for plotting\nn = np.arange(0.0, 5.0, 0.1)\n\n# y1 = 1\ny1 = np.log(n)\ny2 = n**0.5\ny3 = n\ny4 = n * np.log(n)\ny5 = n**2\ny6 = n**3\n#y7 = factorial(n)\n\n\n# Note that using plt.subplots below is equivalent to using\n# fig = plt.figure() and then ax = fig.add_subplot(111)\nfig, ax = plt.subplots()\n# ax.plot(n, y1)\nax.plot(n, y6, label=\"n^3\")\nax.plot(n, y5, label=\"n^2\")\nax.plot(n, y4, label=\"n * log(n)\")\nax.plot(n, y3, label=\"n\")\nax.plot(n, y2, label=\"n^0.5\")\nax.plot(n, y1, label=\"log(n)\")\n\npylab.legend(loc='upper left')\n\nax.set(xlabel='n', ylabel='y',\n title='growth of different functions')\nax.grid()\n\nfig.savefig(\"growth.png\")\nplt.show()\n\n\n","sub_path":"2018summer/cs325_algorithms/week5/midterm/growth.py","file_name":"growth.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554760977","text":"from __future__ import division\nimport sys\nimport datetime\nimport itertools\nimport ntpath\nimport multiprocessing as mp\ntry:\n import simplejson as json\nexcept ImportError:\n import json\nimport numpy as np\nfrom pymongo.errors import PyMongoError\nfrom pymongo import MongoClient, GEOSPHERE\nfrom netCDF4 import Dataset\nfrom constants import MONGO\nfrom atlas.utils.round_to_n import round_to_n\n\n\n__author__ = \"rblourenco@uchicago.edu\"\n# 2015-08-19 - Initial commit\n\n\nuri = \"mongodb://{}:{}@{}/{}?authMechanism=SCRAM-SHA-1\".format(\n MONGO['user'], MONGO['password'], MONGO['domain'], MONGO['database']\n)\nclient = MongoClient(uri) if not MONGO['local'] \\\n else MongoClient('localhost', MONGO['port'])\ndb = client['atlas']\n\n\nclass NetCDFToMongo(object):\n def __init__(self, nc_file, sigfigs=3):\n \"\"\"Class for writing geospatial information to Mongo from netCDF files\n\n :param nc_file: Path to netCDF input file\n :type nc_file: str\n :return: None\n :rtype: None\n \"\"\"\n self.nc_file = nc_file\n self.nc_dataset = Dataset(self.nc_file, 'r')\n self.sigfigs = sigfigs\n self.name = None\n self.human_name = None\n self._lon_var = None\n self._lat_var = None\n self._variables = None\n self._dimensions = None\n self._parameters = None\n try:\n self._lats = self.nc_dataset.variables[self.lat_var][:]\n except KeyError:\n raise Exception('Dataset must have a latitude dimension.')\n try:\n self._lons = self.nc_dataset.variables[self.lon_var][:]\n except KeyError:\n raise Exception('Dataset must have a longitude dimension.')\n\n @property\n def parameters(self):\n return self._parameters\n\n @parameters.setter\n def parameters(self, value):\n self._parameters = value\n\n @property\n def dimensions(self):\n \"\"\"List of dimensions other than longitude and latitude.\n\n :return: List of dimensions in NetCDF file (excluding lonlat)\n :rtype: list\n \"\"\"\n if self._dimensions is None:\n self._dimensions = [d for d in self.nc_dataset.dimensions.keys()\n if d not in [self.lon_var, self.lat_var]]\n return self._dimensions\n\n @property\n def variables(self):\n \"\"\"List of variables in NetCDF, other than dimensions in NetCDF.\n\n :return: List of variables in NetCDF file (excluding dimensions)\n :rtype: list\n \"\"\"\n if self._variables is None:\n self._variables = [v for v in self.nc_dataset.variables.keys()\n if v not in self.nc_dataset.dimensions.keys()]\n return self._variables\n\n @property\n def lat_var(self):\n if self._lat_var is None:\n self._lat_var = 'lat'\n return self._lat_var\n\n @property\n def lon_var(self):\n if self._lon_var is None:\n self._lon_var = 'lon'\n return self._lon_var\n\n @property\n def lats(self):\n return self._lats\n\n @property\n def lons(self):\n return self._lons\n\n @property\n def pixel_side_length(self):\n \"\"\"\n\n Using degree decimals - if zero, states a point\n\n :return:\n :rtype: tuple\n \"\"\"\n return abs(np.diff(self.lons[:2])[0]), abs(np.diff(self.lats[:2])[0])\n\n def num_or_null(self, arr):\n \"\"\"Represent null values from netCDF as '--' and numeric values\n as floats.\n \"\"\"\n print(arr)\n if np.ma.getmask(arr):\n if arr.count() == 0:\n return None\n arr = np.ma.filled(arr, None)\n try:\n return round_to_n(arr, self.sigfigs)\n except ValueError:\n print('\\n*** Encountered uncoercible non-numeric ***\\n{}\\n\\n'.format(\n arr\n ))\n pass\n\n @property\n def metadata(self):\n return {}\n\n def parallel_ingest(self):\n self.ingest_metadata()\n for variable in self.variables:\n values = self.nc_dataset[variable][:]\n jobs = []\n n = mp.cpu_count()\n for i in range(n):\n p = mp.Process(target=self.ingest_data, args=(values, variable, n, i))\n jobs.append(p)\n p.start()\n for j in jobs:\n j.join()\n\n def ingest_metadata(self):\n db['raster_meta'].insert_one(self.metadata)\n\n def ingest_data(self, values, variable, sectors=1, sector=0):\n\n start_time = datetime.datetime.now()\n print('*** Start Run ***\\n{}\\n\\n'.format(start_time))\n\n lons_lats = itertools.product(\n enumerate(self.lats), enumerate(self.lons))\n lons_lats = np.array_split(\n np.array([x for x in lons_lats]), sectors)[sector]\n\n try:\n\n points = db['{}_{}'.format(self.name, variable)]\n\n values = np.swapaxes(\n values, self.nc_dataset.variables[variable].dimensions.index(\n self.lat_var), 0)\n\n values = np.swapaxes(\n values, self.nc_dataset.variables[variable].dimensions.index(\n self.lon_var), 1)\n\n for (lat_idx, lat), (lon_idx, lon) in lons_lats:\n\n try:\n values = self.num_or_null(\n values[lat_idx, lon_idx])\n if values is None:\n continue\n\n tile = GenerateDocument(\n lon, lat, values,\n self.pixel_side_length[0],\n self.pixel_side_length[1],\n self.dimensions,\n ).as_dict\n result = points.insert_one(tile)\n\n except:\n print('Unexpected error:', sys.exc_info()[0])\n raise\n # print '*** Inserted {} Points ***'.format(len(new_points))\n # print result.inserted_ids\n # print '*** End Points ***'\n tile = {}\n values[:] = []\n\n except PyMongoError:\n print('Error while committing on MongoDB')\n raise\n except:\n print('Unexpected error:', sys.exc_info()[0])\n raise\n\n # start_index = datetime.datetime.now()\n # print('\\n*** Start Indexing ***\\n{}\\n'.format(start_index))\n # points.create_index([('geometry', GEOSPHERE)])\n # end_index = datetime.datetime.now()\n # print('\\n*** Elapsed ***\\n{}\\n'.format(end_index - start_index))\n\n end_time = datetime.datetime.now()\n print('\\n*** End Run ***\\n{}\\n'.format(end_time))\n elapsed_time = end_time - start_time\n print('\\n*** Elapsed ***\\n{}\\n'.format(elapsed_time))\n\n\n# Define GeoJSON standard for ATLAS\nclass GenerateDocument(object):\n def __init__(self, x, y, value, side_x, side_y, dimensions):\n self.x = x\n self.y = y\n self.value = value\n self.side_x = side_x\n self.side_y = side_y\n self.dimensions = dimensions\n\n @property\n def __geo_interface__(self):\n \"\"\"Define polygon based on centroid (x, y) and side\n\n ATTENTION: When referring to MongoDB user reference,\n GeoJSON standard 'geometry' should be used instead of 'loc',\n for geoindexing.\n\n :return: GeoJSON object representing data point\n :rtype: dict\n \"\"\"\n\n x2 = self.side_x / 2\n y2 = self.side_y / 2\n\n document = {\n 'type': 'Feature',\n 'geometry': {'type': 'Polygon', 'coordinates': [[\n [self.x - x2, self.y + y2],\n [self.x + x2, self.y + y2],\n [self.x + x2, self.y - y2],\n [self.x - x2, self.y - y2],\n [self.x - x2, self.y + y2]]]},\n 'properties': {\n 'centroid': {'geometry': {\n 'type': 'Point', 'coordinates': [self.x, self.y]}},\n 'values': self.value,\n 'dimensions': self.dimensions,\n }}\n\n return document\n\n @property\n def as_dict(self):\n return self.__geo_interface__\n\n\nif __name__ == '__main__':\n from constants import NC_FILE\n try:\n mi = NetCDFToMongo(NC_FILE)\n mi.parallel_ingest()\n except:\n raise\n","sub_path":"atlas/nc4_to_mongodb.py","file_name":"nc4_to_mongodb.py","file_ext":"py","file_size_in_byte":8364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119365679","text":"##KNN Algorithm\n\n\n\n\n\nimport pandas as pd\nimport numpy as np\n##import the csv file from EpiCollect 5\ndata = pd.read_csv(\"gg.csv\")\n\n\n##make into Dataframe\ndata = pd.DataFrame(data)\n\n\n#Make the qunatitative measures the explanatory variables (x variables)\n#Make the response variable the plant family classification\n#No qualitative measures? Still useful! Why is that? Could we include it?\nX = data.iloc[:, [7,8,9]].values\ny = data.iloc[:, 3].values\n\n\n#Preprocessing\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20)\n\n#We replace the \"test\" data with the data of the unknown speciman \nX_test = np.array([[16,3,1],[40,2,3],[28,5,2],[38,3,1],[42,4,8]])\n\n#Scale the data for better estimation of overall prediction\nfrom sklearn.preprocessing import StandardScaler\nscaler = StandardScaler()\nscaler.fit(X_train)\n\nX_train = scaler.transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n\n#Imply the classification algorithm\nfrom sklearn.neighbors import KNeighborsClassifier\nclassifier = KNeighborsClassifier(n_neighbors=1)\nclassifier.fit(X_train, y_train)\n\ny_pred = classifier.predict(X_test)\n\n#Lets see!\ny_pred","sub_path":"Fall 2019/DSU_KNN.py","file_name":"DSU_KNN.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272205185","text":"#!/usr/bin/python3\n\"\"\"\n Send search request to Star Wars API with given string\n\"\"\"\nimport requests\nfrom sys import argv\nif __name__ == \"__main__\":\n params = {\"search\": argv[1] if len(argv) > 1 else \"\"}\n try:\n r = requests.get('https://swapi.co/api/people/', params=params).json()\n count = r.get(\"count\")\n print(\"Number of results: {}\".format(count))\n if count > 0:\n for res in r.get(\"results\"):\n print(res[\"name\"])\n except ValueError:\n print(\"Not a valid JSON\")\n","sub_path":"0x11-python-network_1/9-starwars.py","file_name":"9-starwars.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"138385960","text":"from __future__ import print_function\n\nfrom datetime import datetime\n\nfrom airflow import DAG\nfrom airflow.operators.bash_operator import BashOperator\n\ndefault_args = {\n 'owner': 'esoboliev',\n 'start_date': datetime.utcnow(),\n 'schedule_interval': '@once'\n}\n\ndag = DAG(dag_id=\"TestDag\",\n default_args=default_args)\n\ntask0 = BashOperator(task_id='set_user',\n bash_command=\"./get_user.sh\",\n xcom_push=True,\n dag=dag)\n\ntask1 = BashOperator(task_id='set_variable',\n bash_command='echo {{ ti.xcom_pull(\"set_user\") }}',\n dag=dag)\n\ntask2 = BashOperator(task_id='show_variable_inside_bash_script',\n bash_command='./show_user.sh',\n context=True,\n dag=dag)\n\ntask0 >> task1 >> task2\n","sub_path":"dags/test_dag.py","file_name":"test_dag.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"38945013","text":"from tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D, Dropout, Flatten, Dense\nimport numpy as np\n\nx_train = np.array(np.random.random((128, 64, 64, 1)), dtype=np.float32)\nx_test = np.array(np.random.random((64, 64, 64, 1)), dtype=np.float32)\ny_train = np.random.randint(0, 24, 128, dtype=np.int32)\ny_test = np.random.randint(0, 24, 64, dtype=np.int32)\n\nmodel = Sequential()\n\nmodel.add(Conv2D(64, kernel_size=4, strides=1, activation='relu', input_shape=(64, 64, 1), padding='same'))\nmodel.add(Conv2D(64, kernel_size=4, strides=2, activation='relu', padding='same'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(128, kernel_size=4, strides=1, activation='relu', padding='same'))\nmodel.add(Conv2D(128, kernel_size=4, strides=2, activation='relu', padding='same'))\nmodel.add(Dropout(0.2))\n\nmodel.add(Conv2D(256, kernel_size=4, strides=1, activation='relu', padding='same'))\nmodel.add(Conv2D(256, kernel_size=4, strides=2, activation='relu', padding='same'))\n\nmodel.add(Flatten())\nmodel.add(Dropout(0.3))\nmodel.add(Dense(512, activation='relu'))\nmodel.add(Dense(24, activation='softmax'))\n\nmodel.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n\nprint(model.summary())\n\nmodel.fit(x_train, y_train, validation_data=(x_test, y_test), batch_size=64, epochs=1)","sub_path":"SFData/StackOverflow/s56479008_context.py","file_name":"s56479008_context.py","file_ext":"py","file_size_in_byte":1318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183922979","text":"import re\n\nblock_start = re.compile(r\"\\s*(\\w+)\\s*\\{\")\nblock_end = re.compile(r\"\\s*\\}\")\nofficial_repo = re.compile(r\"\\s*(\\w+)\\(\\)\")\ncustom_repo = re.compile(r\"\\s*maven\\s*\\{\")\ndep_format1 = re.compile(\n r\"\\s*(classpath|test\\w*|compile\\w*)\\s+group\\s*:\\s*'([\\w\\.]+)'\\s*,\\s*name\\s*:\\s*'([\\w\\.\\-]+)'\\s*,\\s*version:\\s*('?[\\w\\.0-9]+'?)\")\ndep_format2 = re.compile(r\"\\s*(classpath|test\\w*|compile\\w*)\\s+'([\\w\\.]+):([\\w\\.\\-]+):([\\w\\.0-9]+)\")\n\n\ndef parse(path, properties):\n root = {}\n is_processing = True\n with open(path) as f:\n data = None\n is_processing = True\n is_eof = False\n while is_processing:\n if is_eof:\n is_processing = False\n else:\n line = f.readline()\n if not line:\n is_processing = False\n is_eof = True\n else:\n match_start = block_start.match(line)\n if match_start:\n child_block_name = match_start.group(1)\n data = {}\n root[child_block_name], is_eof = parse_block(f, properties, child_block_name, data)\n return root\n\n\ndef parse_block(file_handler, properties, block_name, block):\n is_processing = True\n is_eof = False\n while is_processing:\n if is_eof:\n is_processing = False\n else:\n line = file_handler.readline()\n if not line:\n is_processing = False\n is_eof = True\n else:\n match_end = block_end.match(line)\n if match_end:\n is_processing = False\n else:\n match_start = block_start.match(line)\n if match_start and match_start.group(1) != \"maven\":\n child_block_name = match_start.group(1)\n data = {}\n block[child_block_name], is_eof = parse_block(file_handler, properties, child_block_name, data)\n else:\n if block_name == \"repositories\":\n match_official_repo = official_repo.match(line)\n if match_official_repo:\n repo_name = match_official_repo.group(1)\n block[repo_name] = \"\"\n del repo_name\n else:\n match_custom_repo = custom_repo.match(line)\n if match_custom_repo:\n # FIXME\n tokens = file_handler.readline().strip().split()\n _ = file_handler.readline()\n start_is_found, end_is_found, start, end = extract(tokens[1], \"'\", \"'\")\n repo_name = tokens[1][start:end]\n block[repo_name] = \"\"\n del repo_name\n else:\n raise Exception(\"Malformed line '{0}'\".format(line))\n elif block_name == \"dependencies\":\n matcher = None\n match_dep_format1 = dep_format1.match(line)\n if match_dep_format1:\n matcher = match_dep_format1\n if matcher.group(4).startswith(\"'\"):\n if matcher.group(4).endswith(\"'\"):\n version = matcher.group(4)[1:-1]\n else:\n raise Exception(\"Malformed dependencies line: '{0}'\".format(line))\n else:\n version_key = matcher.group(4)\n version = properties.get(version_key, \"\")\n group = matcher.group(2)\n name = matcher.group(3)\n block[group + \":\" + name + \":\" + version] = \"\"\n else:\n match_dep_format2 = dep_format2.match(line)\n if match_dep_format2:\n matcher = match_dep_format2\n version = matcher.group(4)\n group = matcher.group(2)\n name = matcher.group(3)\n block[group + \":\" + name + \":\" + version] = \"\"\n # else:\n # raise Exception(\"Malformed line: '{0}'\".format(line))\n return block, is_eof\n\n\ndef extract(statement, start_delimiter, end_delimiter):\n start_delimiter_pos = statement.find(start_delimiter)\n start = start_delimiter_pos + len(start_delimiter)\n end_delimiter_pos = statement[start:].find(end_delimiter)\n end = end_delimiter_pos + len(end_delimiter) + start - 1\n start_is_found = True if start_delimiter_pos >= 0 else False\n end_is_found = True if end_delimiter_pos >= 0 else False\n return start_is_found, end_is_found, start, end\n\n\ndef extractor(statement, start_delimiter, end_delimiter):\n is_processing = True\n items = []\n while is_processing:\n start_is_found, end_is_found, start, end = extract(statement, start_delimiter, end_delimiter)\n if not start_is_found:\n is_processing = False\n elif not end_is_found:\n raise Exception(\"End delimiter '{0}' was not found\".format(end_delimiter))\n else:\n items.append(statement[start:end])\n statement = statement[end + 1:]\n return items\n\n\ndef get_projects_names(path, properties=None):\n include_token = 'include'\n include_token_counter = 0\n root_project_token = 'rootProject.name'\n root_project_token_counter = 0\n\n items = []\n with open(path, 'r') as f:\n for line in f:\n include_pos = line.find(include_token)\n if include_pos >= 0:\n start = include_pos + len(include_token)\n line = line[start:]\n sub_projects = extractor(line, \"':\", \"'\")\n items += map(lambda x: {\"name\": x, \"is_subprojects\": True}, sub_projects)\n include_token_counter += 1\n else:\n root_project_pos = line.find(root_project_token)\n if root_project_pos >= 0:\n equal_pos = line.find(\"=\")\n if equal_pos < 0:\n raise Exception(\"An equal token is expected on line: {0}\".format(line))\n else:\n item = line[equal_pos + 1:].strip()\n if item.startswith(\"'\"):\n if item.endswith(\"'\"):\n item = item[1:-1]\n else:\n raise Exception(\"Malformed statement ' is missing: {0}\".format(line))\n else:\n property_token = item\n if properties is None:\n raise Exception(\n \"Could not evaluate the property {0} without properties dictionary\".format(\n property_token))\n item = properties.get(property_token, None)\n if item is None:\n raise Exception(\"Property not found '{0}'\".format(property_token))\n items.append({\"name\": item, \"is_subprojects\": False})\n root_project_token_counter += 1\n if include_token_counter == 0 and root_project_token_counter == 0:\n raise Exception(\"Missing include or rootProject.name token\")\n elif root_project_token_counter > 1:\n raise Exception(\"Error multiple rootProject.name token used\")\n return items\n","sub_path":"syndle/gradle.py","file_name":"gradle.py","file_ext":"py","file_size_in_byte":8152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"572473063","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nnx = 11\nnt = 24\ndt = 0.1\ndx = 1.\n\numax = 5.\n\nu = np.zeros(nx)\nu[:5] = umax\n\nprint(\"Stability: %.3f\"%(umax*dt/dx))\n\n# plt.figure()\n\nfor ti in range(nt):\n ## predictor\n us = u.copy()\n un = u.copy()\n for i in range(1,nx-1):\n us[i] = u[i] - (dt/dx)*( (u[i])**2. - u[i-1]**2.)/2.\n print(i,u[i]) \n\n \n # for i in range(1,nx-1):\n u[i] = (u[i] + us[i])/2. - (dt/dx)*( (us[i])**2. - us[i-1]**2.)/4.\n\n # u = un.copy()\n\n plt.clf(); plt.subplot(111)\n plt.plot(us,'k--')\n plt.plot(un,'--')\n plt.draw()\n plt.pause(0.1)\n # if(ti%6 == 0):\n # plt.plot(u,'-',label='t=%.3f'%(ti*dt))\n# plt.legend()\n# plt.show()\n\n ","sub_path":"mccormacks.py","file_name":"mccormacks.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"510924553","text":"from flask import request\nfrom flask_restful import Resource\n\nfrom pokedex.managers.analytics import add_pokemon_search_history\nfrom pokedex.managers.pokemons import search_pokemons, get_pokemon_by_name, create_pokemon, delete_pokemon, \\\n get_stat_average, edit_pokemon\n\n\n# from pokedex.managers.types import get_list_types, get_types\n\nclass Pokemons(Resource):\n def get(self):\n # query = request.args['query']\n query = request.args.get('query', \"\")\n type_query = request.args.get('type', None)\n ability_query = request.args.get('filter_ability', None)\n ask_effect = request.args.get('effect', 'false') == 'true'\n ask_shape = request.args.get('shape', 'false') == 'true'\n show_abilities = request.args.get('show_abilities', 'false') == 'true'\n pokemons_matching = search_pokemons(query, ability_query, type_query)\n pokemons = [pokemon.get_small_data(ask_effect, ask_shape, show_abilities) for pokemon in pokemons_matching]\n\n add_pokemon_search_history(request.remote_addr, query)\n\n return pokemons\n\n def post(self):\n data = request.json\n pokemon = create_pokemon(data['name'], data['hp'], 10, 0, 0, 0, 0)\n return pokemon.get_small_data()\n\n\nclass Pokemon(Resource):\n def get(self, pokemon_name):\n ask_shape = request.args.get('shape', 'false') == 'true'\n pokemon = get_pokemon_by_name(pokemon_name)\n # if pokemon is None:\n # raise PokemonNotFoundError(pokemon_name)\n return pokemon.get_small_data(ask_shape)\n\n def patch(self, pokemon_name):\n pokemon = get_pokemon_by_name(pokemon_name)\n if pokemon is None:\n raise PokemonNotFoundError(pokemon_name)\n # if pokemon is None:\n # return {'msg': 'Not found'}, 404\n data = request.json\n edit_pokemon(pokemon, data)\n pokemon = get_pokemon_by_name(pokemon_name)\n return pokemon.get_small_data()\n # return 'panic', 500\n\n def delete(self, pokemon_name):\n result = delete_pokemon(pokemon_name)\n return result\n\n\nclass Stats(Resource):\n def get(self):\n return get_stat_average()\n\n#\n# class Types(Resource):\n# def get(self):\n# data=[]\n# ask_pokemons = bool(request.args['pokemons'])\n# types=get_types()\n# if ask_pokemons is True:\n# for type in types:\n# pokemon_matching=search_pokemons('all', type.name)\n# pokemon_names = [p.name for p in pokemon_matching]\n# data.append({'type': type.name, 'pokemons' : pokemon_names})\n# else:\n# data=[type.name for type in types]\n#\n# return data\n","sub_path":"back/pokedex/api/pokemons.py","file_name":"pokemons.py","file_ext":"py","file_size_in_byte":2696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"242852497","text":"import math\n\n\n# --- Function and variable definitions and tests ---#\n\n\n# Constants\n# Allowed max and min inputs.\nMAX_NUMBER = 1000\nMIN_NUMBER = -1000\n\n# Define the maximum number of inputs permitted.\nMAX_LENGTH = 20\n\n# Error messages\nPOSITIVE_AND_WHOLE_ERROR_MESSAGE = 'Number has to be a positive and whole number.'\nWHOLE_NUMBER_ONLY_ERROR_MESSAGE = 'Enter whole numbers only.'\nRANGE_ERROR_MESSAGE = f'Number has to be between {MIN_NUMBER} and {MAX_NUMBER}.'\nNOT_AN_ARRAY_ERROR_MESSAGE = 'The given input is not an array, or a value in the array is not an integer.'\nEMPTY_ARRAY = 'The given array does not include any elements.'\n\n# Alert messages\nRESTART_PROMPT = 'Do you want to restart? (y/n) '\nRETRY_MESSAGE = 'Please type \\'y\\' to try again, or \\'n\\' to quit. ' \nGOOD_BYE_MESSAGE = 'Thank you and goodbye!'\n\n\ndef is_prime(num):\n \"\"\"\n Checks if a number is a prime number, or not. Only accepts positive whole numbers as input.\n\n :param num: The number to check for prime.\n :type num: int\n :rtype bool\n :return:\n Returns True if the parameter is a prime number. Returns False if the parameter is not a prime number.\n\n >>> is_prime(3)\n True\n\n >>> is_prime(8)\n False\n\n >>> is_prime(0)\n False\n\n >>> is_prime(-7)\n Traceback (most recent call last):\n ...\n AssertionError: Number has to be a positive and whole number.\n\n >>> is_prime('foo')\n Traceback (most recent call last):\n ...\n AssertionError: Number has to be a positive and whole number.\n \"\"\"\n \n assert type(num) == int and num >= 0, POSITIVE_AND_WHOLE_ERROR_MESSAGE\n # Returns True if the number is greater than 0 and smaller or equal to 3, as 1, 2, and 3 are all prime numbers.\n if 0 < num <= 3:\n return True\n\n # If a number is even, it is not a prime number, and we will thus return False.\n if num % 2 == 0:\n return False\n\n # Now we will check whether a number is divisible by an odd integer starting at 3, incrementing it by 2,\n # since only even numbers would be divisible by other even numbers.\n # We will stop the loop after we hit the square root of a given number. \n # This is for optimization purposes, to avoid running unnecessary iterations.\n # Explanation: One of the factors to make up a number has to be smaller than the square root, since otherwise, \n # the product would be greater than the original number. Therefore, we can stop checking after hitting\n # the square root, since that would mean both factors are bigger than the square root, which is impossible.\n for i in range(3, math.ceil(math.sqrt(num)), 2):\n if num % i == 0:\n return False\n return True\n\n\nnumber_array = []\n\n\ndef handle_input(raw_input):\n \"\"\"\n :param raw_input: The input to check for errors.\n :type raw_input: str\n\n >>> handle_input('foo')\n Enter whole numbers only.\n\n >>> handle_input('')\n Enter whole numbers only.\n \n >>> handle_input(1.3)\n Traceback (most recent call last):\n ... \n AssertionError: Enter whole numbers only.\n\n >>> handle_input(-101)\n Traceback (most recent call last):\n ... \n AssertionError: Number has to be between -100 and 100.\n\n >>> handle_input(101)\n Traceback (most recent call last):\n ... \n AssertionError: Number has to be between -100 and 100.\n\n >>> handle_input(100)\n You have entered ...\n\n >>> handle_input(0)\n You have entered ...\n\n >>> handle_input(-100)\n You have entered ...\n \"\"\"\n\n try:\n number_input = int(raw_input)\n\n assert number_input == float(raw_input), WHOLE_NUMBER_ONLY_ERROR_MESSAGE\n assert MIN_NUMBER <= number_input <= MAX_NUMBER, RANGE_ERROR_MESSAGE\n\n number_array.append(number_input)\n\n if len(number_array) < MAX_LENGTH:\n print(f'You have entered {len(number_array)} valid numbers, you can add {MAX_LENGTH - len(number_array)} more numbers.')\n print(f'So far, your numbers are: {number_array}')\n\n except ValueError:\n print(WHOLE_NUMBER_ONLY_ERROR_MESSAGE)\n\n\ndef compute_numbers(numbers):\n \"\"\"\n Takes in an array an finds the prime numbers up to the max number of the array. Calculates the sum of numbers that a given prime number is \n a factor of, and stores the values in a map.\n\n :param numbers: The array to check for prime numbers and the sum of prime factors.\n :type numbers: list\n :rtype dict\n :return:\n Returns a dictionary with the prime numbers as keys, and the sum of the numbers a prime is a factor of.\n\n >>> compute_numbers([4, 5, -2])\n {2: 2}\n\n >>> compute_numbers(3)\n The given input is not an array, or a value in the array is not an integer.\n \n >>> compute_numbers('foo')\n The given input is not an array, or a value in the array is not an integer.\n\n >>> compute_numbers((2,3))\n The given input is not an array, or a value in the array is not an integer.\n\n >>> compute_numbers([2, -1, 'foo'])\n The given input is not an array, or a value in the array is not an integer.\n \n >>> compute_numbers([2, -1, [1, 2]])\n The given input is not an array, or a value in the array is not an integer.\n \n >>> compute_numbers([])\n The given array does not include any elements.\n \n\n \"\"\"\n # Create an empty map for the prime numbers The keys will be the primes, and the values will be the sum of numbers\n # that the specific prime is a factor of.\n primes_and_sums = {}\n\n try:\n # Check that the input is an array, and the array is not empty.\n if len(numbers) == 0:\n raise ValueError\n if type(numbers) != list:\n raise TypeError\n # abs() will raise a TypeError if an element of the array is not an integer. The error will be caught, and we will output an error-message.\n abs_numbers = list(map(abs, numbers))\n\n for number in range(2, max(abs_numbers)+1):\n # Check each number between 2 and the max of the absolute numbers (+1 to include the biggest number too) if it's a prime number.\n # A non-valid number or input will never reach the is_prime() function, because handle_input() checks all the user inputs for validity\n if is_prime(number):\n # If a given number is a prime, we will check whether it is a factor of one the original items in the numbers.\n for num in numbers:\n if num % number == 0:\n # If a number is a factor of one of the original numbers, we will check if the number is already registered in the map of the the primes and factor sums.\n if number not in primes_and_sums:\n # If the number is not registered, we will make a new entry with the prime number that we found and add the first value (num) to it.\n primes_and_sums[number] = num\n else:\n # If the number is already registered, we do not want to create a new entry, but we want to add the number to the sum.\n primes_and_sums[number] += num\n return primes_and_sums\n except TypeError:\n print(NOT_AN_ARRAY_ERROR_MESSAGE)\n except ValueError:\n print(EMPTY_ARRAY)\n\n\ndef restart_program_process(): \n \"\"\"\n Re-run program if desired. 'y' will re-run, 'n' will quit the program.\n Any other input will ask the question again.\n \"\"\" \n while True:\n try_again = input('You have not entered any valid numbers. ' + RETRY_MESSAGE)\n if try_again in ('y', 'n'):\n break\n else:\n print(RETRY_MESSAGE)\n\n if try_again == 'y':\n main_program()\n else:\n print(GOOD_BYE_MESSAGE)\n exit()\n\n\n# Defining main_program as a function to establish re-running possibilities.\ndef main_program():\n\n # Making the variable global, because testing already mutates the original array. \n global number_array \n number_array = []\n\n # Let the user put in numbers until the max is reached or the user decides to move on. \n while len(number_array) < MAX_LENGTH:\n raw_input = input(f'Please enter a whole number between {MIN_NUMBER} and {MAX_NUMBER}, or leave empty and press enter to continue: ')\n # Break out of the while loop when the user just hits enter (submits an empty string).\n if raw_input == '':\n break\n try:\n handle_input(raw_input)\n except AssertionError:\n continue\n \n if len(number_array) == 0:\n # Initialize restarting process.\n restart_program_process()\n\n # The array of numbers input by the user is forwarded to compute_numbers() and the return value is stored in a variable called 'primes'.\n primes = compute_numbers(number_array)\n \n # Print out the results.\n print(f'\\nYour prime numbers and the sum of numbers of which the primes are factors of are: {primes}\\n')\n\n\n # Initialize restarting process\n restart_program_process()\n\n\nif __name__ == '__main__':\n import doctest\n doctest.testmod(verbose=True, optionflags=doctest.ELLIPSIS)\n\n\n# Run program.\nmain_program()\n","sub_path":"program.py","file_name":"program.py","file_ext":"py","file_size_in_byte":9186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"341169773","text":"\r\n# 基于函数操作数据库增删改查\r\n\r\n# 函数:\r\n# 1.代码重用\r\n# 2.可扩展性\r\n# 3.易维护\r\n\r\n# 作业实现思路:\r\n# sql=input('sql> ').strip()\r\n# sql=\"select id,name from db1.emp where id>10 limit 3\"\r\n\r\n# 第一部分:sql_dic=sql解析(sql)\r\nsql_dic={\r\n 'select':[\"id,name\"],\r\n 'from':[\"db1.emp\"],\r\n 'where':[\"id>10\"],\r\n 'limit':[\"3\"],\r\n}\r\n\r\n#第二部分:res=sql执行(sql_dic)\r\n\r\n# 主函数\r\n# 1. sql=input('sql> ').strip()\r\n# 2. sql_dic=sql解析(sql)\r\n# 3. res=sql执行(sql_dic)\r\n# 4. 格式化输出res\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"python3基础/3.Python修炼第三层/day3_预习/基于函数操作数据库增删改查.py","file_name":"基于函数操作数据库增删改查.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134152425","text":"# %%\nimport torch\nimport torch.nn as nn\nfrom utils import utils\nfrom pytorch_lightning.callbacks.early_stopping import EarlyStopping\nfrom pytorch_lightning.callbacks.model_checkpoint import ModelCheckpoint\nimport pytorch_lightning as pl\n\n\n# %%\n\n\nclass AutoEncoder(pl.LightningModule):\n def __init__(self, input_size, output_size, params=None,\n model_path='models/ae.pth', fold=None):\n super(AutoEncoder, self).__init__()\n self.dim_1 = params['dim_1']\n self.dim_2 = params['dim_2']\n self.dim_3 = params['dim_3']\n self.dim_4 = params['dim_4']\n self.hidden = params['hidden']\n self.dropout_prob = params['dropout']\n self.lr = params['lr']\n self.activation = params['activation']\n self.input_size = input_size\n self.output_size = output_size\n self.aeloss = nn.MSELoss()\n self.loss = nn.BCEWithLogitsLoss()\n self.weight_decay = params['weight_decay']\n self.label_smoothing = params['label_smoothing']\n self.amsgrad = params['amsgrad']\n self.encoder = nn.Sequential(\n nn.BatchNorm1d(input_size),\n nn.Linear(input_size, self.dim_1, bias=False),\n nn.BatchNorm1d(self.dim_1),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_1, self.dim_2, bias=False),\n nn.BatchNorm1d(self.dim_2),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_2, self.dim_3, bias=False),\n nn.BatchNorm1d(self.dim_3),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_3, self.dim_4, bias=False),\n nn.BatchNorm1d(self.dim_4),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_4, self.hidden, bias=False),\n nn.BatchNorm1d(self.hidden),\n self.activation(),\n nn.Dropout(p=self.dropout_prob)\n )\n self.decoder = nn.Sequential(\n nn.Linear(self.hidden, self.dim_4, bias=False),\n nn.BatchNorm1d(self.dim_4),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_4, self.dim_3, bias=False),\n nn.BatchNorm1d(self.dim_3),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_3, self.dim_2, bias=False),\n nn.BatchNorm1d(self.dim_2),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_2, self.dim_1, bias=False),\n nn.BatchNorm1d(self.dim_1),\n self.activation(),\n nn.Dropout(p=self.dropout_prob),\n nn.Linear(self.dim_1, self.input_size, bias=False),\n nn.BatchNorm1d(self.input_size)\n )\n\n def forward(self, x):\n z = self.encoder(x)\n return z\n\n def training_step(self, batch, batch_idx):\n x, y = batch['data'], batch['target']\n y = y.view(-1)\n x = x.view(x.size(1), -1)\n z = self(x)\n x_hat = self.decoder(z)\n loss = self.aeloss(x_hat, x)\n self.log('t_loss', loss, prog_bar=True, on_epoch=True)\n return loss\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(\n self.parameters(), lr=self.lr, amsgrad=self.amsgrad)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, patience=5, factor=0.1, min_lr=1e-7, eps=1e-8)\n return {'optimizer': optimizer, 'lr_scheduler': scheduler, 'monitor': 't_loss'}\n\n\ndef train_autoencoder():\n data = utils.load_data(root_dir='./data/', mode='train')\n data, target, features, date = utils.preprocess_data(data, nn=True)\n dataset = utils.FinData(data=data, target=target, date=date)\n p = {'batch_size': 4597,\n 'dim_1': 231,\n 'dim_2': 851,\n 'dim_3': 777,\n 'dim_4': 192,\n 'hidden': 50,\n 'dropout': 0.017122456592972537,\n 'lr': 0.0013131268366473552,\n 'activation': nn.GELU,\n 'label_smoothing': 0.09401544509474698,\n 'weight_decay': 0.005078413740277699,\n 'amsgrad': True}\n train_idx = [i for i in range(len(data))]\n val_idx = [i for i in range(10000)]\n dataloaders = utils.create_dataloaders(dataset=dataset,\n indexes={\n 'train': train_idx, 'val': val_idx},\n batch_size=p['batch_size'])\n\n checkpoint_callback = ModelCheckpoint(\n dirpath='logs', monitor='t_loss', mode='min', save_top_k=1, period=10)\n input_size = data.shape[-1]\n output_size = 1\n model = AutoEncoder(input_size=input_size,\n output_size=output_size, params=p)\n es = EarlyStopping(monitor='t_loss', patience=10,\n min_delta=0.0005, mode='min')\n trainer = pl.Trainer(max_epochs=500, gpus=1, callbacks=[checkpoint_callback, es],\n precision=16)\n trainer.fit(model, train_dataloader=dataloaders['train'])\n\n\ndef main():\n train_autoencoder()\n\n if __name__ == '__main__':\n main()\n","sub_path":"models/autoencoder.py","file_name":"autoencoder.py","file_ext":"py","file_size_in_byte":5256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567356825","text":"import torch\nfrom torch.utils import data, model_zoo\nimport numpy as np\nimport torch.optim as optim\nimport torch.backends.cudnn as cudnn\nimport torch.nn as nn\nimport os\nimport os.path as osp\nimport random\nimport torch.nn.functional as F\nimport copy\nfrom ChangeSim_data import ChangeSimDataset\nfrom models.deeplabv2.deeplab_multi import DeeplabMultiFeature, DeeplabMulti\nfrom models.deeplabv3.deeplabv3 import DeepLabV3\nfrom models.deeplab_multi import Deeplab_multi\nfrom models.discriminator import FCDiscriminator, DHA\nfrom utils.customized_function import save_model, load_from_checkpoint\nfrom options_train import TrainOptions\n\nimport pdb\nfrom tensorboardX import SummaryWriter\n\n\nnum_workers = 4\n\nnum_classes = 32\ninput_size = (320, 240)\n\n# data_path_train = '/media/smyoo/Backup_Data/dataset/Query_Seq_Train'\n# data_path_test = '/media/smyoo/Backup_Data/dataset/Query_Seq_Test'\n\ndata_path_train = '../dataset/smyoo/Query_Seq_Train'\ndata_path_test = '../dataset/smyoo/Query_Seq_Test'\n\nrestore_from = 'http://vllab.ucmerced.edu/ytsai/CVPR18/DeepLab_resnet_pretrained_init-f81d91e8.pth'\nsource_mode = 'normal'\n# mode = 'dust'\n\n\ndef lr_poly(base_lr, iter, max_iter, power):\n return base_lr * ((1 - float(iter) / max_iter) ** (power))\n\n\ndef adjust_learning_rate_D(optimizer, i_iter, args):\n lr = lr_poly(args.learning_rate_D, i_iter, args.num_steps, args.power)\n optimizer.param_groups[0]['lr'] = lr\n\n\ndef adjust_learning_rate(optimizer, i_iter, args):\n lr = lr_poly(args.learning_rate, i_iter, args.num_steps, args.power)\n optimizer.param_groups[0]['lr'] = lr\n if args.from_scratch:\n optimizer.param_groups[1]['lr'] = lr\n else:\n optimizer.param_groups[1]['lr'] = lr * 10\n if args.tm:\n optimizer.param_groups[2]['lr'] = lr * 10\n\n\ndef distillation_loss(pred_origin, old_outputs):\n pred_origin_logsoftmax = (pred_origin / 2).log_softmax(dim=1)\n old_outputs = (old_outputs / 2).softmax(dim=1)\n loss_distillation = (-(old_outputs * pred_origin_logsoftmax)).sum(dim=1)\n loss_distillation = loss_distillation.sum() / loss_distillation.flatten().shape[0]\n return loss_distillation\n\n\ndef prob_2_entropy(prob):\n n, c, h, w = prob.size()\n return -torch.mul(prob, torch.log2(prob + 1e-30)) / np.log2(c)\n\n\n\ndef main():\n args = TrainOptions().parse()\n\n seed = args.random_seed\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n cudnn.enabled = True\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else 'cpu')\n log_path = osp.join('./logs', args.dir_name)\n writer = SummaryWriter(log_path)\n source_dataset = ChangeSimDataset(data_path_train, crop_size=input_size, num_classes=num_classes, ignore_label=255, max_iters=args.num_steps * args.batch_size, mode=source_mode)\n target_dataset = ChangeSimDataset(data_path_train, crop_size=input_size, num_classes=num_classes, ignore_label=255, max_iters=args.num_steps * args.batch_size, mode=args.target)\n\n val_dataset = ChangeSimDataset(data_path_test, crop_size=input_size, ignore_label=255, max_iters=200, num_classes=num_classes, mode=args.target)\n\n\n # DataLoader\n sourceloader = data.DataLoader(source_dataset,\n batch_size=args.batch_size, shuffle=True, num_workers=num_workers,\n pin_memory=True, drop_last=True)\n targetloader = data.DataLoader(target_dataset,\n batch_size=args.batch_size, shuffle=True, num_workers=num_workers,\n pin_memory=True, drop_last=True)\n val_loader = data.DataLoader(val_dataset,\n batch_size=4, shuffle=False, num_workers=2,\n pin_memory=True)\n\n source_loader_iter = enumerate(sourceloader)\n target_loader_iter = enumerate(targetloader)\n\n model = Deeplab_multi(args=args)\n\n # init D\n model_D1 = FCDiscriminator(num_classes=args.num_classes).to(device)\n model_D2 = FCDiscriminator(num_classes=args.num_classes).to(device)\n\n model_D1.train()\n model_D1.to(device)\n\n model_D2.train()\n model_D2.to(device)\n\n # implement model.optim_parameters(args) to handle different models' lr setting\n optimizer = optim.SGD(model.optim_parameters(args),\n lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)\n optimizer_D1 = optim.Adam(model_D1.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99))\n optimizer_D2 = optim.Adam(model_D2.parameters(), lr=args.learning_rate_D, betas=(0.9, 0.99))\n\n if args.from_scratch: # training model from pre-trained ResNet\n saved_state_dict = torch.load(args.restore_from_resnet, map_location=device)\n new_params = model.state_dict().copy()\n for i in saved_state_dict:\n # Scale.layer5.conv2d_list.3.weight\n i_parts = i.split('.')\n if not i_parts[1] == 'layer5':\n new_params['.'.join(i_parts[1:])] = saved_state_dict[i]\n model.load_state_dict(new_params)\n else: # training model from pre-trained DeepLabV2 on source & previous target domains\n # saved_state_dict = torch.load(args.pre_trained_seg, map_location=device)\n # new_params = model.state_dict().copy()\n # for i in saved_state_dict:\n # if i in new_params.keys():\n # new_params[i] = saved_state_dict[i]\n # model.load_state_dict(new_params)\n saved_state_dict = torch.load(args.pre_trained_seg, map_location=device)\n\n model = load_from_checkpoint(model, saved_state_dict['state_dict_G'])\n # optimizer = load_from_checkpoint(optimizer, saved_state_dict['optimizer_G'])\n # loaded_iter = saved_state_dict['iter']\n # model_D1 = load_from_checkpoint(model, saved_state_dict['state_dict_D1'])\n # optimizer_D1 = load_from_checkpoint(optimizer, saved_state_dict['optimizer_D1'])\n # model_D2 = load_from_checkpoint(model, saved_state_dict['state_dict_D2'])\n # optimizer_D2 = load_from_checkpoint(optimizer, saved_state_dict['optimizer_D2'])\n\n model.train()\n model.to(device)\n\n # reference model\n if not args.from_scratch:\n ref_model = copy.deepcopy(model) # reference model for knowledge distillation\n for params in ref_model.parameters():\n params.requires_grad = False\n ref_model.eval()\n\n optimizer.zero_grad()\n optimizer_D1.zero_grad()\n optimizer_D2.zero_grad()\n\n interp = nn.Upsample(size=(input_size[1], input_size[0]), mode='bilinear', align_corners=True)\n\n if args.gan == 'Vanilla':\n bce_loss = torch.nn.BCEWithLogitsLoss()\n elif args.gan == 'DHA':\n adversarial_loss_1 = DHA(model_D1, relative=args.relative)\n adversarial_loss_2 = DHA(model_D2, relative=args.relative)\n else:\n raise NotImplementedError('Unavailable GAN option')\n\n # labels for adversarial training\n source_label = 1\n target_label = 0\n\n seg_loss = torch.nn.CrossEntropyLoss(ignore_index=args.ignore_label)\n\n # Snapshots directory\n if not os.path.exists(osp.join(args.snapshot_dir, args.dir_name)):\n os.makedirs(osp.join(args.snapshot_dir, args.dir_name))\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n\n # start training\n for i_iter in range(1, args.num_steps_stop):\n\n loss_seg_value1 = 0\n loss_adv_value1 = 0\n loss_distill_value1 = 0\n loss_D_value1 = 0\n\n loss_seg_value2 = 0\n loss_adv_value2 = 0\n loss_distill_value2 = 0\n loss_D_value2 = 0\n\n optimizer.zero_grad()\n adjust_learning_rate(optimizer, i_iter, args)\n\n optimizer_D1.zero_grad()\n optimizer_D2.zero_grad()\n adjust_learning_rate_D(optimizer_D1, i_iter, args)\n adjust_learning_rate_D(optimizer_D2, i_iter, args)\n\n # train f\n\n # freeze D\n for param in model_D1.parameters():\n param.requires_grad = False\n\n for param in model_D2.parameters():\n param.requires_grad = False\n\n _, batch = source_loader_iter.__next__()\n\n images, labels, _ = batch\n images = images.to(device)\n labels = labels.to(device)\n\n if args.tm:\n pred2, pred1, pred_ori2, pred_ori1, pred2_tm, pred1_tm = model(images, input_size)\n else:\n _, _, pred2, pred1, _, _ = model(images, input_size)\n\n loss_seg1 = seg_loss(pred1, labels)\n loss_seg2 = seg_loss(pred2, labels)\n if args.memory_loss:\n loss_seg1_mem = seg_loss(pred1_tm, labels)\n loss_seg2_mem = seg_loss(pred2_tm, labels)\n loss = args.lambda_seg1 * loss_seg1 + args.lambda_seg2 * loss_seg2 + args.lambda_seg1 * 0.5 * loss_seg1_mem + args.lambda_seg2 * 0.5 * loss_seg2_mem\n else:\n loss = args.lambda_seg1 * loss_seg1 + args.lambda_seg2 * loss_seg2\n\n loss_seg_value1 += loss_seg1.item()\n loss_seg_value2 += loss_seg2.item()\n\n # if not args.from_scratch and args.tm:\n # _, _, old_outputs2, old_outputs1 = ref_model(images, input_size)\n # loss_distill1 = distillation_loss(pred_ori1, old_outputs1)\n # loss_distill2 = distillation_loss(pred_ori2, old_outputs2)\n # loss += args.lambda_distill1 * loss_distill1 + args.lambda_distill2 * loss_distill2\n # loss_distill_value1 += loss_distill1.item()\n # loss_distill_value2 += loss_distill2.item()\n\n if not args.gan == 'DHA':\n loss.backward()\n\n _, batch = target_loader_iter.__next__()\n images_target, _, _ = batch\n images_target = images_target.to(device)\n\n if args.tm:\n pred2_target, pred1_target, _, _, pred2_target_tm, pred1_target_tm = model(images_target, input_size)\n else:\n _, _, pred2_target, pred1_target, _, _ = model(images_target, input_size)\n\n if args.gan == 'DHA':\n if args.ent:\n loss_adv1 = adversarial_loss_1(prob_2_entropy(F.softmax(pred1_target, dim=1)),\n prob_2_entropy(F.softmax(pred1, dim=1)),\n loss_type='adversarial')\n loss_adv2 = adversarial_loss_2(prob_2_entropy(F.softmax(pred2_target, dim=1)),\n prob_2_entropy(F.softmax(pred2, dim=1)),\n loss_type='adversarial')\n else:\n loss_adv1 = adversarial_loss_1(F.softmax(pred1_target, dim=1),\n F.softmax(pred1, dim=1),\n loss_type='adversarial')\n loss_adv2 = adversarial_loss_2(F.softmax(pred2_target, dim=1),\n F.softmax(pred2, dim=1),\n loss_type='adversarial')\n if args.memory_loss:\n loss_adv1_mem = adversarial_loss_1(F.softmax(pred1_target_tm, dim=1),\n F.softmax(pred1_tm, dim=1),\n loss_type='adversarial')\n loss_adv2_mem = adversarial_loss_2(F.softmax(pred2_target_tm, dim=1),\n F.softmax(pred2_tm, dim=1),\n loss_type='adversarial')\n\n loss += args.lambda_adv1 * loss_adv1 + args.lambda_adv2 * loss_adv2 + args.lambda_adv1 * 0.5 * loss_adv1_mem\\\n + args.lambda_adv2 * 0.5 * loss_adv2_mem\n else:\n loss += args.lambda_adv1 * loss_adv1 + args.lambda_adv2 * loss_adv2\n\n elif args.gan == 'Vanilla':\n if args.ent:\n D_out1 = model_D1(prob_2_entropy(F.softmax(pred1_target, dim=1)))\n D_out2 = model_D2(prob_2_entropy(F.softmax(pred2_target, dim=1)))\n else:\n D_out1 = model_D1(F.softmax(pred1_target, dim=1))\n D_out2 = model_D2(F.softmax(pred2_target, dim=1))\n\n loss_adv1 = bce_loss(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(source_label).to(device))\n loss_adv2 = bce_loss(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(source_label).to(device))\n\n loss = args.lambda_adv1 * loss_adv1 + args.lambda_adv2 * loss_adv2\n else:\n raise NotImplementedError('Unavailable GAN option')\n\n loss_adv_value1 += loss_adv1.item()\n loss_adv_value2 += loss_adv2.item()\n loss.backward()\n\n # train D\n\n # bring back requires_grad\n for param in model_D1.parameters():\n param.requires_grad = True\n\n for param in model_D2.parameters():\n param.requires_grad = True\n\n pred1 = pred1.detach()\n pred2 = pred2.detach()\n pred1_target = pred1_target.detach()\n pred2_target = pred2_target.detach()\n\n if args.tm:\n pred1_tm = pred1_tm.detach()\n pred2_tm = pred2_tm.detach()\n pred1_target_tm = pred1_target_tm.detach()\n pred2_target_tm = pred2_target_tm.detach()\n\n if args.gan == 'DHA':\n if args.ent:\n loss_D1 = adversarial_loss_1(prob_2_entropy(F.softmax(pred1_target, dim=1)),\n prob_2_entropy(F.softmax(pred1, dim=1)),\n loss_type='discriminator')\n loss_D2 = adversarial_loss_2(prob_2_entropy(F.softmax(pred2_target, dim=1)),\n prob_2_entropy(F.softmax(pred2, dim=1)),\n loss_type='discriminator')\n else:\n loss_D1 = adversarial_loss_1(F.softmax(pred1_target, dim=1),\n F.softmax(pred1, dim=1),\n loss_type='discriminator')\n loss_D2 = adversarial_loss_2(F.softmax(pred2_target, dim=1),\n F.softmax(pred2, dim=1),\n loss_type='discriminator')\n\n if args.memory_loss:\n loss_D1 += 0.5 * adversarial_loss_1(F.softmax(pred1_target_tm, dim=1),\n F.softmax(pred1_tm, dim=1),\n loss_type='discriminator')\n loss_D2 += 0.5 * adversarial_loss_2(F.softmax(pred2_target_tm, dim=1),\n F.softmax(pred2_tm, dim=1),\n loss_type='discriminator')\n\n # loss_D1 = loss_D1 / 1.5\n # loss_D2 = loss_D2 / 1.5\n\n loss_D1.backward()\n loss_D2.backward()\n\n loss_D_value1 += loss_D1.item()\n loss_D_value2 += loss_D2.item()\n elif args.gan == 'Vanilla':\n # train with source\n if args.ent:\n D_out1 = model_D1(prob_2_entropy(F.softmax(pred1, dim=1)))\n D_out2 = model_D2(prob_2_entropy(F.softmax(pred2, dim=1)))\n else:\n D_out1 = model_D1(F.softmax(pred1, dim=1))\n D_out2 = model_D2(F.softmax(pred2, dim=1))\n\n loss_D1 = bce_loss(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(source_label).to(device))\n loss_D2 = bce_loss(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(source_label).to(device))\n\n loss_D1 = loss_D1 / 2\n loss_D2 = loss_D2 / 2\n\n loss_D1.backward()\n loss_D2.backward()\n\n loss_D_value1 += loss_D1.item()\n loss_D_value2 += loss_D2.item()\n\n # train with target\n if args.ent:\n D_out1 = model_D1(prob_2_entropy(F.softmax(pred1_target, dim=1)))\n D_out2 = model_D2(prob_2_entropy(F.softmax(pred2_target, dim=1)))\n else:\n D_out1 = model_D1(F.softmax(pred1_target, dim=1))\n D_out2 = model_D2(F.softmax(pred2_target, dim=1))\n\n loss_D1 = bce_loss(D_out1, torch.FloatTensor(D_out1.data.size()).fill_(target_label).to(device))\n loss_D2 = bce_loss(D_out2, torch.FloatTensor(D_out2.data.size()).fill_(target_label).to(device))\n\n loss_D1 = loss_D1 / 2\n loss_D2 = loss_D2 / 2\n\n loss_D1.backward()\n loss_D2.backward()\n\n loss_D_value1 += loss_D1.item()\n loss_D_value2 += loss_D2.item()\n else:\n raise NotImplementedError('Unavailable GAN option')\n\n optimizer.step()\n optimizer_D1.step()\n optimizer_D2.step()\n\n print('exp = {}'.format(osp.join(args.snapshot_dir, args.dir_name)))\n print('iter = {0:8d}/{1:8d}'.format(i_iter, args.num_steps))\n print('loss_seg1 = {0:.3f} loss_dist1 = {1:.3f} loss_adv1 = {2:.3f} loss_D1 = {3:.3f}'.format(\n loss_seg_value1, loss_distill_value1, loss_adv_value1, loss_D_value1))\n print('loss_seg2 = {0:.3f} loss_dist2 = {1:.3f} loss_adv2 = {2:.3f} loss_D2 = {3:.3f}'.format(\n loss_seg_value2, loss_distill_value2, loss_adv_value2, loss_D_value2))\n\n if i_iter % 100 == 0:\n writer.add_scalars('Train/Seg_loss_1', {'train': loss_seg_value1}, i_iter)\n writer.add_scalars('Train/Seg_loss_2', {'train': loss_seg_value2}, i_iter)\n writer.add_scalars('Train/Adv_loss_1', {'train': loss_adv_value1}, i_iter)\n writer.add_scalars('Train/Adv_loss_2', {'train': loss_adv_value2}, i_iter)\n writer.add_scalars('Train/D_loss_1', {'train': loss_D_value1}, i_iter)\n writer.add_scalars('Train/D_loss_2', {'train': loss_D_value2}, i_iter)\n\n # if i_iter % 5000 == 0:\n # model.eval()\n # with torch.no_grad():\n # hist = np.zeros((num_classes, num_classes))\n # count = 0\n # total = 0\n # for i, test_batch in enumerate(val_loader):\n # test_images, test_labels, _ = test_batch\n # test_images = test_images.to(device)\n # test_labels = test_labels.to(device)\n #\n # pred2, pred1, pred_ori2, pred_ori1, _, _ = model(test_images, input_size)\n #\n # pred = interp(pred2)\n # _, pred = pred.max(dim=1)\n #\n # test_labels = test_labels.cpu().numpy()\n # pred = pred.cpu().detach().numpy()\n #\n # hist += fast_hist(test_labels.flatten(), pred.flatten(), num_classes)\n #\n # count += (pred == test_labels).sum()\n # total += test_labels.shape[0] * test_labels.shape[1] * test_labels.shape[2]\n #\n # mIoUs = per_class_iu(hist)\n # mIoU = round(np.nanmean(mIoUs) * 100, 2)\n # for ind_class in range(32):\n # print('==>' + source_dataset.seg.idx2name[ind_class] + ':\\t' + str(round(mIoUs[ind_class] * 100, 2)))\n # print('===> mIoU (Test): ' + str(mIoU))\n #\n # print('===> Pixel Accuracy (Test): {}%'.format(float(count/total) * 100))\n #\n # model.train()\n\n if i_iter % args.save_pred_every == 0:\n print('save model ...')\n save_model(osp.join(args.snapshot_dir, args.dir_name), i_iter, model, optimizer, loss_seg_value2, args,\n model_D1, optimizer_D1, model_D2, optimizer_D2)\n\n\ndef fast_hist(a, b, n):\n k = (a >= 0) & (a < n)\n return np.bincount(n * a[k].astype(int) + b[k], minlength=n ** 2).reshape(n, n)\n\ndef per_class_iu(hist):\n return np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))\n\n\nif __name__ == '__main__':\n # os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\n # os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"3\"\n main()\n","sub_path":"train_CUDA.py","file_name":"train_CUDA.py","file_ext":"py","file_size_in_byte":20040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"535291769","text":"import random\n\nclass Stack(object):\n def __init__(self, room):\n self.room = room\n self.elem = [None for i in range(self.room)]\n self.top = 0\n self.size = 0\n\nclass Struct(object):\n\n def init_stack(self,room):#初始化\n self.room = room\n self.s = Stack(self.room)\n return 1\n\n def stackEmpty(self):\n if self.s.elem[0] != None:\n print('栈不为空··')\n else:\n print('栈为空··')\n\n def push_elem(self, e):\n if self.s.top < self.room:\n self.s.elem[self.s.top] = e\n self.s.top += 1\n self.s.size += 1\n print('添加成功··')\n else:\n print('栈已满,若仍需添加请扩充空间··')\n\n def pop_elem(self):\n if self.s.elem[0] != None:\n e, self.s.elem[self.s.top-1] = self.s.elem[self.s.top-1], None\n self.s.size -= 1\n self.s.top -= 1\n print('弹出的元素为:' + str(e))\n return e\n else:\n print('栈为空,无法弹出元素··')\n\n def clear_stack(self):\n if self.s.elem[0] != None:\n while self.s.top > 0:\n self.s.top -= 1\n self.s.elem[self.s.top] = None\n self.s.size -= 1\n print('已清空栈··')\n else:\n print('栈已为空,无需清理··')\n\n def destory_stack(self):\n del self.s\n self.s = 0\n print('已销毁栈')\n\n def get_top_elem(self):\n if self.s.elem[0] != None:\n e = self.s.elem[self.s.top-1]\n print('取得栈顶元素为' + str(e))\n else:\n print('栈为空,无法取得栈顶元素··')\n\n def get_elem_size(self):\n i = 0\n print('栈中共有元素%d个'% self.s.size)\n while i < self.s.size:\n if i != self.s.size - 1:\n print(self.s.elem[i], end=' ')\n else:\n print(self.s.elem[i])\n i += 1\n\n def plus_room(self,room):\n self.room = self.room + room\n self.s.elem += [None for i in range(room)]\n print('空间扩充成功··')\n\nprint('········顺序栈测试·········')\nprint('1.初始化顺序栈')\nprint('2.销毁顺序栈')\nprint('3.顺序栈判空')\nprint('4.清空顺序栈')\nprint('5.输入元素并压进顺序栈')\nprint('6.顺序栈栈顶元素出栈')\nprint('7.获得顺序栈栈顶元素')\nprint('8.获取元素个数并打印')\nprint('9.扩充空间')\nprint('10.退出')\nensure = 0\nwhile 1:\n choose = int(input('请输入相应操作:'))\n if choose == 1:\n room = int(input('请输入顺序栈空间(为大于0的数):'))\n s = Struct()\n ensure = s.init_stack(room)\n if ensure:\n print('初始化成功··')\n elif choose == 2:\n if ensure:\n s.destory_stack()\n ensure = 0\n else:\n print('顺序栈未建立,无法销毁··')\n elif choose == 3:\n if ensure:\n s.stackEmpty()\n else:\n print('顺序栈未建立··')\n elif choose == 4:\n if ensure:\n s.clear_stack()\n else:\n print('顺序栈未建立··')\n elif choose == 5:\n if ensure:\n in_elem = input('请输入元素:')\n s.push_elem(in_elem)\n else:\n print('顺序栈未建立··')\n elif choose == 6:\n if ensure:\n s.pop_elem()\n else:\n print('顺序栈未建立··')\n elif choose == 7:\n if ensure:\n s.get_top_elem()\n else:\n print('顺序栈未建立··')\n elif choose == 8:\n if ensure:\n s.get_elem_size()\n else:\n print('顺序栈未建立··')\n elif choose == 9:\n if ensure:\n up_room = int(input('请输入需要扩充数量:'))\n s.plus_room(up_room)\n else:\n print('顺序栈未建立··')\n else:\n print('已退出··')\n break","sub_path":"python数据结构/栈.py","file_name":"栈.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"68980644","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\nbl_info = {\n \"name\": \"tkit\",\n \"author\": \"dustractor@gmail.com\",\n \"version\": (5,1),\n \"blender\": (2,6,7),\n \"location\": \"Hotkeys \\\\, [, ],\\\", & ' \",\n \"description\": \"various edge selection operators\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"Tools\",\n }\n\nimport bpy\nimport bmesh\n\nselected = lambda _: _.select\nnotselected = lambda _: not _.select\ntagged = lambda _: _.tag\nnottagged = lambda _: not _.tag\n\n\nclass tk_op:\n bl_options = {'REGISTER','UNDO'}\n def execute(self,context):\n bm = bmesh.from_edit_mesh(context.active_object.data)\n self.f(bm)\n bm.select_flush_mode()\n context.area.tag_redraw()\n return {'FINISHED'}\n @classmethod\n def poll(self,context):\n return context.active_object and \\\n context.active_object.type == 'MESH' and \\\n context.active_object.mode == 'EDIT' and \\\n context.scene.tool_settings.mesh_select_mode[1]\n\n\ndef op(f):\n return type('TAPU_OT_' + f.__name__,\n (tk_op,bpy.types.Operator),{\n 'bl_label': f.__name__,\n 'bl_idname': 'tapu.' + f.__name__,\n 'f': f})\n\n@op\ndef ie(s,bm):\n for e in bm.edges:\n e.tag = len(list(filter(selected,e.link_faces))) == 1\n for e in filter(tagged,bm.edges):\n e.select_set(0)\n e.tag = 0\n\n@op\ndef oe(s,bm):\n for e in bm.edges:\n e.tag = len(list(filter(selected,e.link_faces))) == 2\n for e in filter(tagged,bm.edges):\n e.select_set(0)\n e.tag = 0\n\n@op\ndef lon(s,bm):\n for e in filter(selected,bm.edges):\n for v in e.verts:\n v.tag ^= 1\n for f in e.link_faces:\n f.tag = 1\n efs = {f.index for f in filter(tagged,bm.faces)}\n for v in filter(tagged,bm.verts):\n v.tag = 0\n for e in filter(notselected,v.link_edges):\n e.tag = {f.index for f in e.link_faces}.isdisjoint(efs)\n for e in filter(tagged,bm.edges):\n e.tag = 0\n e.select_set(1)\n for f in bm.faces:\n f.tag = 0\n\n@op\ndef lun(s,bm):\n for e in filter(selected,bm.edges):\n for v in e.verts:\n v.tag ^= 1\n for v in filter(tagged,bm.verts):\n v.tag = 0\n for e in filter(selected,v.link_edges):\n e.select_set(0)\n\n@op\ndef epz(s,bm):\n for e in filter(selected,bm.edges):\n for v in e.verts:\n v.tag ^= 1\n for v in filter(tagged,bm.verts):\n for e in v.link_edges:\n e.select ^=1\n for e in bm.edges:\n e.select_set(e.select)\n for v in bm.verts:\n v.tag = 0\n\n@op\ndef ef1n(s,bm):\n for e in filter(selected,bm.edges):\n for f in filter(notselected,e.link_faces):\n for fe in filter(notselected,f.edges):\n fe.tag = len(list(filter(selected,fe.verts))) == 1\n for e in bm.edges:\n e.select_set(e.tag)\n e.tag = 0\n\n@op\ndef ef2n(s,bm):\n for e in filter(selected,bm.edges):\n for f in filter(notselected,e.link_faces):\n for fe in filter(notselected,f.edges):\n fe.tag = len(list(filter(notselected,fe.verts))) == 2\n for e in bm.edges:\n e.select_set(e.tag)\n e.tag = 0\n\n@op\ndef ef2np(s,bm):\n for e in filter(selected,bm.edges):\n for f in filter(notselected,e.link_faces):\n for fe in filter(notselected,f.edges):\n fe.tag ^= len(list(filter(notselected,fe.verts))) == 2\n for e in bm.edges:\n e.select_set(e.tag)\n e.tag = 0\n\n@op\ndef ef2nx(s,bm):\n for e in filter(selected,bm.edges):\n for f in filter(notselected,e.link_faces):\n for fe in filter(notselected,f.edges):\n fe.tag = 1\n for e in bm.edges:\n e.select_set(e.tag)\n e.tag = 0\n\ndef register():\n list(map(bpy.utils.register_class,tk_op.__subclasses__()))\n keymaps = bpy.context.window_manager.keyconfigs['Blender'].keymaps\n km = keymaps['Mesh'].keymap_items.new\n km('tapu.ie',type='QUOTE',value='PRESS')\n km('tapu.oe',type='QUOTE',shift=True,value='PRESS')\n km('tapu.lon',type='RIGHT_BRACKET',value='PRESS')\n km('tapu.lun',type='LEFT_BRACKET',value='PRESS')\n km('tapu.epz',type='END',ctrl=True,alt=True,shift=True,value='PRESS')\n km('tapu.ef1n',type='BACK_SLASH',value='PRESS')\n km('tapu.ef2n',type='BACK_SLASH',shift=True,value='PRESS')\n km('tapu.ef2np',type='BACK_SLASH',ctrl=True,shift=True,value='PRESS')\n km('tapu.ef2nx',type='BACK_SLASH',ctrl=True,alt=True,shift=True,value='PRESS')\n\ndef unregister():\n list(map(bpy.utils.unregister_class,tk_op.__subclasses__()))\n\nif __name__ == '__main__':\n register()\n\n","sub_path":"scripts/addons_extern/tkit.py","file_name":"tkit.py","file_ext":"py","file_size_in_byte":5396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28975486","text":"# -*- coding: utf-8 -*-\nfrom datetime import timedelta\n\nimport airflow\nfrom airflow import DAG\nfrom airflow.models import Variable\nfrom airflow.contrib.operators.emr_add_steps_operator \\\n import EmrAddStepsOperator\nfrom airflow.contrib.sensors.emr_step_sensor import EmrStepSensor\n\nDEFAULT_ARGS = {\n 'owner': 'shubham',\n 'depends_on_past': False,\n 'start_date': airflow.utils.dates.days_ago(2),\n 'email': ['shubham.gupta@scripbox.com'],\n 'email_on_failure': False,\n 'email_on_retry': False\n}\n\nSTEPS = [\n {\n \"Name\": \"Hello World\",\n \"HadoopJarStep\": {\n \"Args\": [\"{}/hello_world.sh\".format(Variable.get(\"bash_dir\"))],\n \"Jar\": \"s3://ap-south-1.elasticmapreduce/libs/script-runner/script-runner.jar\"\n },\n \"ActionOnFailure\": \"CONTINUE\"\n }\n]\n\ncluster_key = Variable.get('cluster_key', default_var=None)\n\ndag = DAG(\n 'bash_emr_step',\n default_args=DEFAULT_ARGS,\n dagrun_timeout=timedelta(hours=2),\n schedule_interval='0 8 * * *'\n)\n\n\nstep_adder = EmrAddStepsOperator(\n task_id='add_steps',\n job_flow_id=cluster_key,\n aws_conn_id='aws_default',\n steps=STEPS,\n dag=dag\n)\n\nstep_checker = EmrStepSensor(\n task_id='watch_step',\n job_flow_id=cluster_key,\n step_id=\"{{ task_instance.xcom_pull('add_steps', key='return_value')[0] }}\",\n aws_conn_id='aws_default',\n dag=dag\n)\n\nstep_adder.set_downstream(step_checker)\n","sub_path":"dags/bash_emr_step.py","file_name":"bash_emr_step.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"434433404","text":"import torch\nimport torch.nn as nn\n\n\nclass DnCNN(nn.Module):\n def __init__(self, channels, num_of_layers=17, tanh_out=False,\n built_in_residual=False):\n super(DnCNN, self).__init__()\n kernel_size = 3\n padding = 1\n features = 64\n layers = []\n self.tanh_out = tanh_out\n self.built_in_residual = built_in_residual\n layers.append(nn.Conv2d(in_channels=channels, out_channels=features,\n kernel_size=kernel_size, padding=padding,\n bias=False))\n layers.append(nn.LeakyReLU(inplace=True))\n for _ in range(num_of_layers - 2):\n layers.append(nn.Conv2d(in_channels=features, out_channels=features,\n kernel_size=kernel_size, padding=padding,\n bias=False))\n layers.append(nn.BatchNorm2d(features))\n layers.append(nn.LeakyReLU(inplace=True))\n layers.append(nn.Conv2d(in_channels=features, out_channels=channels,\n kernel_size=kernel_size, padding=padding,\n bias=False))\n self.dncnn = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.dncnn(x)\n if self.built_in_residual:\n out = x - out\n if self.tanh_out:\n out = torch.tanh(out)\n return out\n","sub_path":"model/dncnn.py","file_name":"dncnn.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584806281","text":"\"\"\"This module contains Unit Tests for Server app models\"\"\"\n\nfrom django.test import TestCase\n\nfrom registration.models import CustomUser\nfrom server.models import Server\n\n\nclass TestServer(TestCase):\n \"\"\"Tests for Contact view\"\"\"\n\n def setUp(self):\n CustomUser.objects.create(\n id=1,\n first_name=\"Frank\",\n second_name=\"Sinatra\",\n email=\"TestEmail@gmail.com\",\n is_active=True,\n )\n\n CustomUser.objects.create(\n id=2,\n first_name=\"Leonard\",\n second_name=\"Cohen\",\n email=\"TestEmail2@gmail.com\",\n is_active=True,\n )\n\n Server.objects.create(\n id=2,\n name=\"Server2\",\n address=\"address2\",\n state=\"NotSelected\",\n user=CustomUser.objects.get(id=1)\n )\n\n Server.objects.create(\n id=3,\n name=\"Server3\",\n address=\"address3\",\n state=\"Production\",\n user=CustomUser.objects.get(id=1)\n )\n\n Server.objects.create(\n id=4,\n name=\"Server4\",\n address=\"address4\",\n state=\"Staging\",\n user=CustomUser.objects.get(id=2)\n )\n\n def test_create(self):\n \"\"\"Ensure that creat method craetes server\"\"\"\n\n user = CustomUser.objects.get(id=1)\n data = {\n \"name\": \"ServerName1\",\n \"address\": \"ServerAddress1\",\n \"state\": \"Production\"\n }\n\n result = Server.create(user=user, **data)\n expected = Server.objects.get(id=1)\n\n self.assertEqual(result, expected)\n\n def test_get_by_id(self):\n \"\"\"Ensure that get by id method returns specific server using id\"\"\"\n\n result = Server.get_by_id(2)\n expected = Server.objects.get(id=2)\n\n self.assertEqual(result, expected)\n\n def test_update(self):\n \"\"\"Ensure that update method updates specific server\"\"\"\n\n server = Server.objects.get(id=2)\n server.update(name='TestName', address='127.0.0.1', state='Production')\n result = Server.objects.get(id=2)\n\n self.assertEqual(result.name, 'TestName')\n self.assertEqual(result.address, '127.0.0.1')\n self.assertEqual(result.state, 'Production')\n\n def test_get_by_id_none(self):\n \"\"\"Ensure that get_by_id method returns none if server does not exist\"\"\"\n\n result = Server.get_by_id(66)\n self.assertEqual(result, None)\n\n def test_get_by_user_id(self):\n \"\"\"Ensure that get_by_user_id returns all servers for specific user_id\"\"\"\n\n result = Server.get_by_user_id(1)\n self.assertEqual(len(result), 2)\n\n def test_to_dict(self):\n \"\"\"Ensure that to_dict methods builds a proper dict from server\"\"\"\n\n server = Server.objects.get(id=2)\n result = server.to_dict()\n expected = {\n 'id': 2,\n 'name': \"Server2\",\n 'address': \"address2\",\n 'state': \"NotSelected\",\n 'user_id': 1\n }\n\n self.assertDictEqual(result, expected)\n\n def test___str__(self):\n \"\"\"Ensure that __str__ method builds a proper str representation of a server\"\"\"\n\n server = Server.objects.get(id=2)\n result = str(server)\n expected = 'ServerId: {}, ServerName: {}, ServerAddress: {},' \\\n ' ServerState {}'.format(server.id,\n server.name,\n server.address,\n server.state)\n\n self.assertEqual(result, expected)\n","sub_path":"moninag/tests/unittests/server/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"491197713","text":"import types\n\nclass Session:\n\tdef __init__(self):\n\t\tself.messages = []\n\t\tself.listeners = []\n\t\n\tdef get_last(self):\n\t\treturn self.messages[-1]\n\t\n\tdef broadcast(self, msg, sender):\n\t\tprint(sender.name, \"broadcasted\", msg)\n\t\tif self.messages == []:\n\t\t\tprint(sender.name, 'has set it as the public key')\n\t\tself.messages.append(msg)\n\t\tif len(self.messages) == 1:\n\t\t\treturn;\n\t\tfor i in self.listeners:\n\t\t\tif i.name != sender.name:\n\t\t\t\ti.receive(msg, sender)\n\t\n\tdef get_pub(self):\n\t\tif self.messages == []:\n\t\t\tprint('cannot get pub')\n\t\treturn self.messages[0]\n\n\tdef listen(self, me):\n\t\tself.listeners.append(me)\n\nsession = Session()\n\n'''\nstage 1:\n\tsend common message encrypted with private key\nstage 2:\n\ttarget receives message and sends common message with their\n\tprivate key\nstage 2:\n\tadd private key to recieved message (both)\nstage 3:\n\trecipient has common key\n\n'''\n\nclass Message:\n\tdef __init__(self, stage, val):\n\t\tself.data = [stage, val]\n\n\nclass Person:\n\tdef __init__(self, name, priv, pub):\n\t\tself.name = name\n\t\tself.priv = priv\n\t\tself.pub = pub\n\t\tself.common = None\n\n\tdef receive(self, msg, sender):\n\t\tprint(self.name, \"recieved\", msg, 'from', sender.name)\n\t\tcommon = self.common\n#if common != None:\n#\t\t\tprint('will try to decode with common =', common)\n#\t\t\tprint(msg / common)\n\t\n\tdef send(self, msg):\n\t\tsession.broadcast(msg, self)\n\neve = Person(\"Eve\", 2, .5)\nsession.listen(eve)\n\nbob = Person(\"Bob\", 3, .5)\nsession.listen(bob)\n\nalice = Person(\"Alice\", 2, .5)\nsession.listen(alice)\n\ndef alice_receive(self, msg, sender):\n\tprint(self.name, \"recieved\", msg, 'from', sender.name)\n\tval = msg[1]\n\tif msg[0] == 0:\n\t\tself.send([1, session.get_pub() * self.priv])\n\tif msg[0] == 1:\n\t\tself.send([2, session.get_pub() * self.priv])\n\tif msg[0] == 2:\n\t\tself.common = val * self.priv\n\t\tprint(self.name,'common = ',self.common)\n\t\tself.send([3, session.get_pub() * self.priv])\n\tif msg[0] == 3:\n\t\tself.common = val * self.priv\n\t\tprint(self.name,'common = ',self.common)\n\n\nalice.receive = types.MethodType(alice_receive, alice)\nbob.receive = types.MethodType(alice_receive, bob)\n\n#SEND PUB\nbob.send(int(input('common? ')))\nbob.send([0, session.get_pub() * bob.priv]);\nwhile 1:\n\tmsg = [99, int(input(\"bob sends? \")) * bob.common]\n\n\tbob.send(msg)\n\t\n","sub_path":"encr.py","file_name":"encr.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"73711454","text":"# -*- coding:utf-8 -*-\n# file: PyQtLabel.py\n#\nimport sys\t\t\t\t\t\t\t# 导入sys模块\nfrom PyQt4 import QtCore, QtGui\t\t\t\t\t# 导入PyQt模块\nclass MyWindow(QtGui.QMainWindow):\t\t\t\t# 通过继承QtGui.QMainWindow创建类\n\tdef __init__(self):\t\t\t\t\t# 初始化方法\n\t\tQtGui.QMainWindow.__init__(self)\t\t# 调用父类初始化方法\n\t\tself.setWindowTitle('PyQt')\t\t\t# 设置窗口标题\n\t\tself.resize(300,200)\t\t\t\t# 设置窗口大小\n\t\tlabel = QtGui.QLabel('PyQt\\nLabel')\t# 创建标签\n\t\tlabel.setAlignment(QtCore.Qt.AlignCenter)\t# 设置标签文字对齐样式\n\t\tself.setCentralWidget(label)\t\t\t# 向窗口中添加标签\napp = QtGui.QApplication(sys.argv)\t\t\t\t# 创建QApplication对象\nmywindow = MyWindow()\t\t\t\t\t\t# 创建MyWindow对象\nmywindow.show()\t\t\t\t\t\t\t# 显示窗口\napp.exec_()\t\t\t\t\t\t\t# 进入消息循环\n","sub_path":"Python/PYTHON source code/第15章/PyQtLabel.py","file_name":"PyQtLabel.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"84135634","text":"import json\nimport logging\nfrom typing import Dict, List\n\nimport gevent\nimport pytest\nfrom eth_account import Account\nfrom tests.constants import KEYSTORE_FILE_NAME, KEYSTORE_PASSWORD\nfrom web3 import Web3\n\nfrom raiden_contracts.contract_manager import ContractManager, contracts_precompiled_path\nfrom raiden_libs.events import Event\nfrom raiden_libs.states import BlockchainState\n\nlog = logging.getLogger(__name__)\n\n\n@pytest.fixture(scope=\"session\")\ndef wait_for_blocks(web3):\n \"\"\"Returns a function that blocks until n blocks are mined\"\"\"\n\n def f(n):\n web3.testing.mine(n)\n gevent.sleep()\n\n return f\n\n\n@pytest.fixture(scope=\"session\")\ndef contracts_manager():\n \"\"\"Overwrites the contracts_manager from raiden_contracts to use compiled contracts \"\"\"\n return ContractManager(contracts_precompiled_path())\n\n\n@pytest.fixture\ndef keystore_file(tmp_path) -> str:\n filename = tmp_path / KEYSTORE_FILE_NAME\n\n account = Account.create()\n keystore_json = Account.encrypt(private_key=account.privateKey, password=KEYSTORE_PASSWORD)\n with open(filename, \"w\") as f:\n json.dump(keystore_json, f)\n\n return filename\n\n\n@pytest.fixture\ndef mockchain(monkeypatch):\n state: Dict[str, List[List[Event]]] = dict(block_events=[])\n\n def get_events(\n web3: Web3,\n contract_manager: ContractManager,\n chain_state: BlockchainState,\n to_block: int,\n query_ms: bool = True,\n ): # pylint: disable=unused-argument\n from_block = chain_state.latest_commited_block + 1\n blocks = state[\"block_events\"][from_block : to_block + 1]\n events = [ev for block in blocks for ev in block] # flatten\n return chain_state, events\n\n def set_events(events):\n state[\"block_events\"] = events\n\n monkeypatch.setattr(\"monitoring_service.service.get_blockchain_events\", get_events)\n monkeypatch.setattr(\"pathfinding_service.service.get_blockchain_events\", get_events)\n return set_events\n","sub_path":"tests/libs/fixtures/web3.py","file_name":"web3.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121564345","text":"# ---\n# jupyter:\n# jupytext:\n# text_representation:\n# extension: .py\n# format_name: percent\n# format_version: '1.2'\n# jupytext_version: 1.1.3\n# kernelspec:\n# display_name: Python 3\n# language: python\n# name: python3\n# ---\n\n# %%\n# this notebook was created to convert rectanfular coo\n\n# %load_ext autoreload\n# %autoreload 2\n\n# %%\nimport flexpart_management.modules.FLEXOUT as FO\nimport flexpart_management.modules.flx_array as fa\nfrom useful_scit.imps import *\n\n# %%\n# doms = ['d01','d02']\n# root_path = '/Volumes/mbProD/Downloads/flex_out/run_2019-06-02_20-42-05_/*-*-*'\nroot_path = '/homeappl/home/aliagadi/wrk/DONOTREMOVE/flexpart_management_data/runs/run_2019-06-10_11-10-03_/*-*-*'\npath_out = '/homeappl/home/aliagadi/wrk/DONOTREMOVE/flexpart_management_data/runs/run_2019-06-10_11-10-03_/log_pol'\n\nroot_path = sys.argv[1]\ndom = sys.argv[2]\nrun_name = 'run_2019-06-10_11-10-03_'\npaths = glob.glob(root_path)\npaths.sort()\n\n# %%\nfo_base_dic = dict(\n # dom = 'd01',\n # folder_path = '/Volumes/mbProD/Downloads/flex_out/run_2019-06-02_20-42-05_/2017-12-10',\n folder_path_out=path_out,\n run_name=run_name,\n)\n\n# %%\nfor p in paths:\n print('starting', dom, p)\n new_dic = dict(dom=dom, folder_path=p)\n fo_dic = {**fo_base_dic, **new_dic}\n\n try:\n fo = FO.FLEXOUT(**fo_dic)\n fo.export_log_polar_coords(keep_z=True)\n print('done', dom, p)\n except:\n print('failed when', dom, p)\n","sub_path":"flexpart_management/notebooks/run_2019-06-10_11-10-03_/get_flx_log_pol_coords_taito_srun.py","file_name":"get_flx_log_pol_coords_taito_srun.py","file_ext":"py","file_size_in_byte":1469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"422972436","text":"\"\"\"\nGiven a non-empty, singly linked list with head node head, return a middle node of linked list.\n\nIf there are two middle nodes, return the second middle node.\n\n\"\"\"\n\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n return\n\ndef printNodeList(node):\n while node:\n print(node.val)\n node = node.next\n\nhead = ListNode(2)\nnode2 = ListNode(0)\nnode3 = ListNode(1)\nnode4 = ListNode(3)\n\nhead.next = node2\nnode2.next = node3\nnode3.next = node4\n\ndef middleNode(head):\n nodelist = list()\n len = 0\n while(head):\n nodelist.append(head)\n head = head.next\n len += 1\n\n if len%2 ==0:\n len = int(len/2)\n else:\n len = int(round(len/2))\n\n return nodelist[len]\n\nprintNodeList(middleNode(head))\n\n","sub_path":"LeetCode-Python/876 Middle of the Linked List.py","file_name":"876 Middle of the Linked List.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"447323400","text":"# https://github.com/StephenOman/TensorFlowExamples/blob/master/xor%20nn/xor_nn.py\nimport tensorflow as tf\nimport os\n\nX = tf.placeholder(tf.float32, shape=[None,2], name = 'X')\nY = tf.placeholder(tf.float32, shape=[None,1], name = 'Y')\nW = tf.Variable(tf.random_uniform([2,2], -1, 1), name = \"W\")\nw = tf.Variable(tf.random_uniform([2,1], -1, 1), name = \"w\")\nW_bias = tf.Variable(tf.zeros([2]), name = \"c\")\nw_bias = tf.Variable(tf.zeros([1]), name = \"b\")\n\nh = tf.nn.sigmoid(tf.add(tf.matmul(X, W),W_bias))\ny_estimated = tf.sigmoid(tf.add(tf.matmul(h,w),w_bias), name = \"MyOutput\")\nloss = tf.reduce_mean(( (Y * tf.log(y_estimated)) + \n ((1 - Y) * tf.log(1.0 - y_estimated)) ) * -1)\ntrain_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)\n\nINPUT_XOR = [[0,0],[0,1],[1,0],[1,1]]\nOUTPUT_XOR = [[0],[1],[1],[0]]\n\ninit = tf.global_variables_initializer()\nsess = tf.Session()\nsess.run(init)\n\nfor epoch in range(10000):\n sess.run(train_step, feed_dict={X: INPUT_XOR, Y: OUTPUT_XOR})\n if epoch % 1000 == 0:\n print(sess.run(y_estimated, feed_dict={X: INPUT_XOR, Y: OUTPUT_XOR}))\n\noutput_names = 'MyOutput'\noutput_graph_def = tf.graph_util.convert_variables_to_constants(\n sess, # The session is used to retrieve the weights\n tf.get_default_graph().as_graph_def(), # The graph_def is used to retrieve the nodes\n output_names.split(\",\") # The output node names are used to select the usefull nodes\n )\n\nabsolute_model_dir = os.getcwd()+\"/\"\noutput_graph = absolute_model_dir + \"frozen_model.pb\"\n\nwith tf.gfile.GFile(output_graph, \"wb\") as f:\n f.write(output_graph_def.SerializeToString())\n\n# https://github.com/onnx/tensorflow-onnx\n# python -m tf2onnx.convert --input frozen_model.pb --inputs X:0 --outputs MyOutput:0 --output modelxor.onnx --verbose\n\n\n\n\n","sub_path":"pythonexamples/xor.py","file_name":"xor.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"525745408","text":"from yasikphotowebsite.apps.photos.models import *\nfrom rest_framework import serializers\n\nclass CameraInfoSerializer(serializers.ModelSerializer):\n class Meta:\n model = CameraInfo\n fileds = ('name', 'lens', 'aperture', 'exposure', 'film', 'iso')\n\nclass RecreationAreaSerializer(serializers.ModelSerializer):\n class Meta:\n model = RecreationArea\n fileds = ('title')\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fileds = ('slug', 'title')\n\nclass AwardSerializer(serializers.ModelSerializer):\n class Meta:\n model = Award\n fields = ('title', 'year')\n\nclass PhotoSerializer(serializers.ModelSerializer):\n camera_info = CameraInfoSerializer(many=False, read_only=True)\n recreation_area = RecreationAreaSerializer(many=False, read_only=True)\n categories = CategorySerializer(many=True, read_only=True)\n awards = AwardSerializer(many=True, read_only=True)\n light_condition_display = serializers.SerializerMethodField('get_light_condition_display')\n medium_format_display = serializers.SerializerMethodField('get_medium_format_display')\n available_sizes_display = serializers.SerializerMethodField('get_available_sizes_display')\n\n def get_light_condition_display(self, obj):\n return obj.get_light_condition_display()\n\n def get_medium_format_display(self, obj):\n return obj.get_medium_format_display()\n\n def get_available_sizes_display(self, obj):\n return obj.get_available_sizes_display()\n\n class Meta:\n model = Photo\n fields = (\n 'slug',\n 'title',\n 'keywords',\n 'description',\n 'image_large',\n 'image_thumb',\n 'date_taken',\n 'date_added',\n 'country',\n 'state_province',\n 'city',\n 'recreation_area',\n 'other_location',\n 'island',\n 'longitude',\n 'latitude',\n 'light_condition',\n 'light_condition_display',\n 'medium_format',\n 'medium_format_display',\n 'hike_distance',\n 'hike_duration',\n 'hike_directions',\n 'drive_directions',\n 'best_time',\n 'camera_info',\n 'categories',\n 'awards',\n 'enable_location',\n 'enable_tech_info',\n 'available_sizes_display'\n )\n\nclass CategoryWithPhotoSerializer(serializers.Serializer):\n category = CategorySerializer(many=False, read_only=True)\n photo = PhotoSerializer(many=False, read_only=True)\n","sub_path":"yasikphotowebsite/apps/api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72103024","text":"####################################################################################################\nimport tldextract\nfrom urllib.parse import urlparse\nfrom urllib.request import urlopen\nimport http.client\nimport re\nimport os\nimport socket\nimport requests\nimport time\nimport pygeoip\nfrom xml.dom import minidom\nfrom . import utils\n#import utils\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef virusTotalScan(get_link):\n\ttry:\n\t\tif get_link!='':\n\t\t\tApi_Key = 'a9257cef19dda83244bae88c842a30efc31fea185224dd0121083a94ea263b11'\n\t\t\turl = 'https://www.virustotal.com/vtapi/v2/url/scan'\n\t\t\tparams = {'apikey': Api_Key, 'url': get_link}\n\t\t\tresponse = requests.post(url, data=params)\n\t\t\tif(response.json().get(\"response_code\") == 1):\n\t\t\t\ttime.sleep(15)\n\t\t\t\turl = 'https://www.virustotal.com/vtapi/v2/url/report'\n\t\t\t\tparams = {'apikey': Api_Key, 'resource': get_link}\n\t\t\t\tresponse = requests.post(url, data=params)\n\t\t\t\tif(response.json().get(\"response_code\") == 1):\n\t\t\t\t\ttime.sleep(15)\n\t\t\t\t\tpositives = response.json().get(\"positives\")\n\t\t\t\t\ttotal = response.json().get(\"total\")\n\t\t\t\t\tmxresult = response.json().get(\"scans['CLEAN MX['result']']\")\n\t\t\t\t\tmlresult = response.json().get(\"scans['MalwarePatrol['result']']\")\n\t\t\t\t\tprint (\"MX result : \" + str(mxresult) + \", Malware result : \" + str(mlresult))\n\t\t\t\t\tprint (\"Positives : \" + str(positives) + \", Total : \" + str(total))\n\t\t\t\t\tprint (\"Ratio of positives to total : \" + str(float(positives)/float(total)))\n\t\t\t\t\treturn positives\n\t\t\t\telse:\n\t\t\t\t\tprint (\"Error: -1 (REPORT not loading)\")\n\t\t\t\t\treturn -1\n\t\t\telse:\n\t\t\t\tprint (\"Error: -2 (Virustotal SCAN Unavailable)\")\n\t\t\t\treturn -2\n\t\telse:\n\t\t\tprint (\"Error: -3 (NO URL TO SCAN)\")\n\t\t\treturn -3\n\texcept Exception as ex:\n\t\ttemplate = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n\t\tmessage = template.format(type(ex).__name__, ex.args)\n\t\tprint (message)\n\t\tprint (\"virustotalscan value : -4\")\n\t\treturn -4\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef find_ele_with_attribute(dom,ele,attribute):\n\tfor subelement in dom.getElementsByTagName(ele):\n\t\tif subelement.hasAttribute(attribute):\n\t\t\treturn subelement.attributes[attribute].value\n\treturn -1\n\n\ndef alexa_rank(host):\n\txmlpath='http://data.alexa.com/data?cli=10&dat=snbamz&url='+host\n\ttry:\n\t\txml = urlopen(xmlpath)\n\t\tdom = minidom.parse(xml)\n\t\trank = find_ele_with_attribute(dom,'REACH','RANK')\n\t\tprint('alexa rank : ' + str(rank))\n\t\treturn int(rank) #\n\texcept Exception as ex:\n\t\ttemplate = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n\t\tmessage = template.format(type(ex).__name__, ex.args)\n\t\tprint (message)\n\t\tprint (\"alexa rank : -1\")\n\t\treturn int(-1)\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\n#https://stackoverflow.com/questions/2890896/extract-ip-address-from-an-html-string-python\ndef is_ip(domain):\n\ttry:\n\t\tip = re.findall(r'(?:(?:1\\d\\d|2[0-5][0-5]|2[0-4]\\d|0?[1-9]\\d|0?0?\\d)\\.){3}(?:1\\d\\d|2[0-5][0-5]|2[0-4]\\d|0?[1-9]\\d|0?0?\\d)',domain)\n\t\tprint ('is_ip output : ' + str(len(ip)))\n\t\treturn len(ip)\n\texcept Exception as ex:\n\t\ttmp = \"An error of type {0} occured. Agruments:\\n{1!r}\"\n\t\tmsg = tmp.format(type(ex).__name__, ex.args)\n\t\tprint(msg)\n\t\tprint ('is _ip output: -1')\n\t\treturn int(-1)\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef getASN(host):\n\ttry:\n\t\tip = socket.gethostbyname(host)\n\t\tg = pygeoip.GeoIP('pucgonline/lib/assets/GeoIPASNum.dat')\n\t\tasn_no = int(g.asn_by_name(ip).split()[0][2:])\n\t\tprint ('ASN No. : ' + str(asn_no))\n\t\treturn asn_no\n\texcept Exception as ex:\n\t\ttemplate = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n\t\tmessage = template.format(type(ex).__name__, ex.args)\n\t\tprint (message)\n\t\tprint (\"ASN No. : -1\")\n\t\treturn int(-1)\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef is_image(url):\n\timage_formats = (\"image/png\", \"image/jpeg\", \"image/gif\")\n\ttry:\n\t\tsite = urlopen(url)\n\t\tmeta = site.info() # get header of the http request\n\t\tif meta[\"content-type\"] in image_formats: # check if the content-type is a image\n\t\t\tprint('is_img : ' + str(meta[\"content-type\"]))\n\t\t\treturn int(0)\n\t\telse:\n\t\t\tprint('is_img : Not an image')\n\t\t\treturn int(1)\n\texcept Exception as ex:\n\t\ttemplate = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n\t\tmessage = template.format(type(ex).__name__, ex.args)\n\t\tprint (message)\n\t\tprint (\"is_img : -1\")\n\t\treturn -1\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef un_shorten_url(url):\n\ttry:\n\t\tparsed = urlparse(url)\n\t\th = http.client.HTTPConnection(parsed.netloc)\n\t\th.request('HEAD', parsed.path)\n\t\tresponse = h.getresponse()\n\t\tif response.status//100 == 3 and response.getheader('Location'):\n\t\t\treturn [response.getheader('Location'),int(1)]\n\t\telse:\n\t\t\treturn [url,int(0)]\n\texcept:\n\t\treturn ['',int(-1)]\n\n\ndef checkRedirects(url):\n\tcode = 0\n\tres = ''\n\ttier1,code1 = un_shorten_url(url)\n\ttier2,code2 = un_shorten_url(tier1)\n\ttier3,code3 = un_shorten_url(tier2)\n\tif code3 == -1:\n\t\tcode = -1\n\t\tres = ''\n\telif tldextract.extract(tier3).registered_domain == tldextract.extract(url).registered_domain:\n\t\tcode = 0\n\t\tres = url\n\telse:\n\t\tcode = 1\n\t\tres = tier3\n\tprint ('Redirected? : ' + str(code))\n\tprint ('Final URL : ' + str(tier3) + ' | URL used in extraction : ' + str(res))\n\treturn [res, int(code)]\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef crawlforfeatures(url):\n\tutils.crawl(url)\n\ttry:\n\t\treturn utils.extractURLs(open('pucgonline/lib/tmp/crawled.txt').read())\n\texcept:\n\t\treturn []\n\n\n#Same Domain Ratio\ndef calculateSDratio(root_url):\n\ttry:\n\t\tsd = 0\n\t\tcount = 0\n\t\turls_list = crawlforfeatures(root_url)\n\t\tos.system('rm pucgonline/lib/tmp/crawled.txt')\n\t\troot_domain = (tldextract.extract(root_url)).domain\n\t\tfor i in range(len(urls_list)):\n\t\t\turl = urls_list[i]\n\t\t\tdomain = (tldextract.extract(url)).domain\n\t\t\tif domain != '':\n\t\t\t\tcount = count + 1\n\t\t\t\tif domain == root_domain:\n\t\t\t\t\tsd = sd + 1\n\t\tprint ('No of SD = ' + str(sd))\n\t\tif count != 0:\n\t\t\tprint (\"No. of URLs : \" + str(len(urls_list)))\n\t\t\tprint (\"SDratio = \" + str(float(sd)/float(count)))\n\t\t\treturn float(sd)/float(count)\n\t\telse:\n\t\t\tprint (\"SDratio = -1 (PAGE downloaded but NO urls present OR PAGE NOT DOWNLOADED)\")\n\t\t\treturn float(-1)\n\texcept Exception as ex:\n\t\ttmp = \"An error of type {0} has occured. Arguments \\n{1!r}\"\n\t\tmsg = tmp.format(type(ex).__name__,ex.args)\n\t\tprint (msg)\n\t\tprint ('Saving SD ratio as : -1')\n\t\treturn float(-1)\n\n\n#Same Sub Domain Ratio\ndef calculateSSDratio(root_url):\n\ttry:\n\t\tssd = 0\n\t\tcount = 0\n\t\turls_list = crawlforfeatures(root_url)\n\t\tos.system('rm pucgonline/lib/tmp/crawled.txt')\n\t\troot_subdomain = (tldextract.extract(root_url)).subdomain\n\t\tfor i in range(len(urls_list)):\n\t\t\turl = urls_list[i]\n\t\t\tdomain = (tldextract.extract(url)).domain\n\t\t\tif domain != '':\n\t\t\t\tcount = count + 1\n\t\t\t\tif (root_subdomain.find(domain))>=0:\n\t\t\t\t\tssd = ssd + 1\n\t\tprint ('No of SSD = ' + str(ssd))\n\t\tif count != 0:\n\t\t\tprint (\"No. of URLs : \" + str(len(urls_list)))\n\t\t\tprint (\"SSDratio = \" + str(float(ssd)/float(count)))\n\t\t\treturn float(ssd)/float(count)\n\t\telse:\n\t\t\tprint (\"SSDratio = -1 (PAGE downloaded but NO urls present OR PAGE NOT DOWNLOADED)\")\n\t\t\treturn float(-1)\n\texcept Exception as ex:\n\t\ttmp = \"An error of type{0} has occured. Arguments\\n{1!r}\"\n\t\tmsg = tmp.format(type(ex).__name__,ex.args)\n\t\tprint (msg)\n\t\tprint ('Saving SSD ratio as : -1')\n\t\treturn float(-1)\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef extract(url):\n\tfeature = {}\n\n\turl,redirects = checkRedirects(url)\n\n\ttoken_words = re.split('\\W+',url)\n\n\thost_name = urlparse(url).netloc\n\tpath_name = urlparse(url).path\n\n\text = tldextract.extract(url)\n\tsub_domain = ext.subdomain\n\tdomain_name = ext.domain\n\ttl_domain = ext.suffix\n\treg_domain = ext.registered_domain\n\n\tfeature['redirects'] = int(redirects) # -1 -> 404, 0 ->not shortened, 1 -> redirected\n\tprint('-'*40);feature['dots_in_sub_domain'] = sub_domain.count('.'); print('Dots in sub domain : ' + str(sub_domain.count('.')))\n\tprint('-'*40);feature['len_of_sub_domain'] = len(sub_domain);print('Length of sub domain : ' + str(len(sub_domain)))\n\n\tprint('-'*40);feature['is_ip'] = is_ip(domain_name) #\n\tprint('-'*40);feature['alexa_rank'] = int(alexa_rank(host_name)) #\n\t#print('-'*40);feature['virustotal'] = int(virusTotalScan(url)) #\n\tprint('-'*40);feature['asn_no'] = int(getASN(host_name)) #\n\tprint('-'*40);feature['is_img'] = int(is_image(url)) #\n\tprint('-'*40);feature['sdratio'] = calculateSDratio(url) #\n\tprint('-'*40);feature['ssdratio'] = calculateSSDratio(url) #\n\tprint('-'*40);return feature\n####################################################################################################\n\n\n\n\n\n\n####################################################################################################\ndef read_url_list(set):\n\ti = 1\n\tfeatures = {}\n\tfeatures_list = []\n\twith open(set) as file:\n\t\tfor line in file:\n\t\t\turl = line.split(',')[0].strip()\n\t\t\tlabel = line.split(',')[1].strip()\n\t\t\tif url != '':\n\t\t\t\tprint (\"=\"*100)\n\t\t\t\tprint ('URL no: ' + str(i) + ' working on: ' + url)\n\t\t\t\ti = i + 1\n\t\t\t\tfeatures = extract(url)\n\t\t\t\ttry:\n\t\t\t\t\tfeatures['label'] = int(label)\n\t\t\t\texcept Exception as ex:\n\t\t\t\t\ttemplate = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n\t\t\t\t\tmessage = template.format(type(ex).__name__, ex.args)\n\t\t\t\t\tprint (message)\n\t\t\t\t\tprint (\"Set label : -1\")\n\t\t\t\t\tfeatures['label'] = int(-1)\n\t\t\t\tprint (\"=\"*100)\n\t\t\t\tfeatures_list.append(features)\n\treturn features_list\n####################################################################################################\n\n\n\n\n\n####################################################################################################\ndef read_url(url):\n\tfeatures = {}\n\tprint (\"=\"*100)\n\tprint ('Working on: ' + url)\n\tfeatures = extract(url)\n\treturn features\n####################################################################################################\n","sub_path":"pucgonline/lib/extractfeatures.py","file_name":"extractfeatures.py","file_ext":"py","file_size_in_byte":11080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"184960203","text":"from django.contrib.auth import get_user_model\nfrom django.core.management import BaseCommand\nfrom .parser import parse_forum\n\nfrom forum.models import (\n Chapter, SubSection, Theme, Comment\n)\n\n\nUser = get_user_model()\n\n\nclass Command(BaseCommand):\n help = 'Создание разделов, подразделов, тем и комментариев.' \\\n 'Запускать команду после запуска команды create_user'\n\n def handle(self, *args, **options):\n try:\n parse_forum.main(chapter=Chapter, subsection=SubSection, theme=Theme, comment=Comment, user=User)\n print('Форум создан')\n except:\n print('Созданы ли пользователи')\n","sub_path":"forum/management/commands/create_forum_data.py","file_name":"create_forum_data.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"484842424","text":"# Copyright (c) 2016 Universidade Federal Fluminense (UFF)\n# Copyright (c) 2016 Polytechnic Institute of New York University.\n# This file is part of noWorkflow.\n# Please, consult the license terms in the LICENSE file.\n\"\"\"Slicing Variable Model\"\"\"\nfrom __future__ import (absolute_import, print_function,\n division, unicode_literals)\n\nfrom sqlalchemy import Column, Integer, Text, TIMESTAMP\nfrom sqlalchemy import PrimaryKeyConstraint, ForeignKeyConstraint\n\nfrom ...utils.prolog import PrologDescription, PrologTrial, PrologAttribute\nfrom ...utils.prolog import PrologRepr, PrologTimestamp\n\nfrom .base import AlchemyProxy, proxy_class, many_ref, many_viewonly_ref\nfrom .base import backref_one, backref_many\nfrom .slicing_dependency import SlicingDependency\n\n\n@proxy_class\nclass SlicingVariable(AlchemyProxy):\n \"\"\"Represent a variable captured during program slicing\"\"\"\n\n __tablename__ = \"slicing_variable\"\n __table_args__ = (\n PrimaryKeyConstraint(\"trial_id\", \"activation_id\", \"id\"),\n ForeignKeyConstraint([\"trial_id\", \"activation_id\"],\n [\"function_activation.trial_id\",\n \"function_activation.id\"], ondelete=\"CASCADE\"),\n ForeignKeyConstraint([\"trial_id\"], [\"trial.id\"], ondelete=\"CASCADE\"),\n )\n trial_id = Column(Integer, index=True)\n activation_id = Column(Integer, index=True)\n id = Column(Integer, index=True) # pylint: disable=invalid-name\n name = Column(Text)\n line = Column(Integer)\n value = Column(Text)\n time = Column(TIMESTAMP)\n\n slicing_usages = many_ref(\"variable\", \"SlicingUsage\")\n\n # dependencies in which this variable is the dependent\n suppliers_dependencies = many_viewonly_ref(\n \"dependent\", \"SlicingDependency\",\n primaryjoin=(\n (id == SlicingDependency.m.dependent_id) &\n (activation_id == SlicingDependency.m.dependent_activation_id) &\n (trial_id == SlicingDependency.m.trial_id))\n )\n\n # dependencies in which this variable is the supplier\n dependents_dependencies = many_viewonly_ref(\n \"supplier\", \"SlicingDependency\",\n primaryjoin=(\n (id == SlicingDependency.m.supplier_id) &\n (activation_id == SlicingDependency.m.supplier_activation_id) &\n (trial_id == SlicingDependency.m.trial_id)))\n\n suppliers = many_viewonly_ref(\n \"dependents\", \"SlicingVariable\",\n secondary=SlicingDependency.__table__,\n primaryjoin=(\n (id == SlicingDependency.m.dependent_id) &\n (activation_id == SlicingDependency.m.dependent_activation_id) &\n (trial_id == SlicingDependency.m.trial_id)),\n secondaryjoin=(\n (id == SlicingDependency.m.supplier_id) &\n (activation_id == SlicingDependency.m.supplier_activation_id) &\n (trial_id == SlicingDependency.m.trial_id)))\n\n trial = backref_one(\"trial\") # Trial.slicing_variables\n activation = backref_one(\"activation\") # Activation.variables\n dependents = backref_many(\"dependents\") # SlicingVariable.suppliers\n\n prolog_description = PrologDescription(\"variable\", (\n PrologTrial(\"trial_id\"),\n PrologAttribute(\"activation_id\"),\n PrologAttribute(\"id\"),\n PrologRepr(\"name\"),\n PrologAttribute(\"line\"),\n PrologRepr(\"value\"),\n PrologTimestamp(\"timestamp\", attr_name=\"time\"),\n ))\n\n def __repr__(self):\n return (\n \"SlicingVariable({0.trial_id}, {0.activation_id}, \"\n \"{0.id}, {0.name}, {0.line})\"\n ).format(self)\n\n def __str__(self):\n return \"(L{0.line}, {0.name}, {0.value})\".format(self)\n","sub_path":"capture/noworkflow/now/persistence/models/slicing_variable.py","file_name":"slicing_variable.py","file_ext":"py","file_size_in_byte":3720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"56556895","text":"import math\n# divides only by 1 and itself\ndef is_prime(number):\n if number <= 1: return False\n\n for i in range(2, int(math.sqrt(number)) + 1):\n if number % i == 0: return False\n\n return True\n\nprint(is_prime(7))","sub_path":"wars/is_prime.py","file_name":"is_prime.py","file_ext":"py","file_size_in_byte":227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"571156974","text":"import csv\r\nimport json\r\nfrom itertools import groupby\r\n\r\n\r\ndef group(lst, pk, fk):\r\n keyfunc = lambda x: x[pk]\r\n data = sorted(lst, key=keyfunc)\r\n result = {}\r\n\r\n for k, g in groupby(data, keyfunc):\r\n result[k] = [d[fk] for d in g]\r\n\r\n return result\r\n\r\n\r\ndef csv_to_dict(lst, key):\r\n result = {}\r\n\r\n for row in lst:\r\n id_ = row[key]\r\n del row[key]\r\n result[id_] = row\r\n\r\n return result\r\n\r\n\r\ndef load_csv(file):\r\n with open(file, encoding='utf-8') as f:\r\n return list(csv.DictReader(f))\r\n\r\n\r\ndef join(col, lst, pk, key, default=None):\r\n if default is None:\r\n default = []\r\n\r\n for obj in col:\r\n obj[key] = lst.get(obj[pk], default)\r\n\r\n\r\ndef ajusta_disciplinas(disciplinas):\r\n for d in disciplinas:\r\n nome, _ = d['nome'].split(' :: ')\r\n d['nome'] = nome\r\n d['capacidade'] = int(d['capacidade'])\r\n d['carga'] = int(d['carga']) // 18\r\n d['ofertada'] = bool(d['ofertada'])\r\n\r\n\r\ndef ajusta_periodo_turma(lst):\r\n for d in lst:\r\n d['periodo'] += f\"-{d['turma']}:{d['curso']}\"\r\n d['turma'] += f\"-{d['curso']}\"\r\n\r\n\r\ndef main():\r\n alunos_file = 'alunos.csv'\r\n competencias_file = 'competencias.csv'\r\n cursadas_file = 'cursadas.csv'\r\n disciplinas_file = 'disciplinas.csv'\r\n equivalencias_file = 'equivalencias.csv'\r\n professores_file = 'professores.csv'\r\n restantes_file = 'restantes.csv'\r\n output_file = 'input.new.json'\r\n\r\n alunos = load_csv(alunos_file)\r\n disciplinas = load_csv(disciplinas_file)\r\n professores = load_csv(professores_file)\r\n competencias_csv = load_csv(competencias_file)\r\n cursadas_csv = load_csv(cursadas_file)\r\n equivalencias_csv = load_csv(equivalencias_file)\r\n restantes_csv = load_csv(restantes_file)\r\n\r\n competencias = group(competencias_csv, 'id_prof', 'id_disc')\r\n cursadas = group(cursadas_csv, 'id_aluno', 'id_disc')\r\n restantes = group(restantes_csv, 'id_aluno', 'id_disc')\r\n equivalencias = group(equivalencias_csv, 'id_disc', 'id_equiv')\r\n\r\n join(alunos, cursadas, 'id', 'cursadas')\r\n join(alunos, restantes, 'id', 'restantes')\r\n join(professores, competencias, 'id', 'competencias')\r\n join(disciplinas, equivalencias, 'id', 'equivalencias')\r\n\r\n ajusta_periodo_turma(disciplinas)\r\n ajusta_periodo_turma(alunos)\r\n\r\n ajusta_disciplinas(disciplinas)\r\n\r\n result = {\r\n 'disciplinas': disciplinas,\r\n 'professores': professores,\r\n 'alunoperfis': alunos\r\n }\r\n\r\n with open(output_file, 'w', encoding='utf-8') as f:\r\n json.dump(result, f)\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"scripts/converte_relatorio_csv.py","file_name":"converte_relatorio_csv.py","file_ext":"py","file_size_in_byte":2646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"430095782","text":"import random\nimport unio as async_library\nimport time\n\nfrom basic_animations import animation\n\nprint()\n\ndef print_blinkies(family):\n for blinky in family:\n print(blinky, end=' ')\n print(end='\\r')\n\nclass Blinky:\n def __init__(self, family, async_library):\n self.face = '(o.o)'\n self.family = family\n self.async_library = async_library\n\n def __str__(self):\n return self.face\n\n async def show_face(self, new_face, delay):\n self.face = new_face\n print_blinkies(self.family)\n await self.async_library.sleep(delay)\n\n async def run(self):\n while True:\n await animation(self)\n\n\nasync def run_all():\n family = []\n family.extend(Blinky(family, async_library) for i in range(10))\n\n tasks = []\n for blinky in family:\n task = async_library.create_task(blinky.run())\n tasks.append(task)\n\n for task in tasks:\n await task\n\n\nasync_library.run(run_all())\n\nprint()\n","sub_path":"2019-06-pycon-cz/090_blinkies_unio.py","file_name":"090_blinkies_unio.py","file_ext":"py","file_size_in_byte":975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"544355716","text":"import json\r\nfrom datetime import datetime\r\n\r\nimport os\r\n\r\nfrom processor.parser import ExportFileParser\r\n\r\n\r\nclass MonefyStatementMapper:\r\n def execute(self) -> list:\r\n data = ExportFileParser().parse()\r\n return self.map(data)\r\n\r\n @staticmethod\r\n def map(data) -> list:\r\n accumulator = []\r\n for el in data:\r\n\r\n result = {\r\n 'transaction_date': datetime.strptime(el.get('date'), '%d/%m/%Y'),\r\n 'account': el.get('account'),\r\n 'category': el.get('category'),\r\n 'amount': float(el.get('amount').lstrip('-').replace(',', '')),\r\n 'currency': el.get('currency'),\r\n 'converted_amount': float(el.get('converted amount').lstrip('-').replace(',', '')),\r\n 'converted_currency': el.get('currency'),\r\n 'description': el.get('description'),\r\n 'is_debet': float(el.get('amount').replace(',', '')) > 0,\r\n }\r\n accumulator.append(result)\r\n return accumulator\r\n\r\n\r\nclass MonobankStatementsMapper:\r\n def __init__(self, statements, account):\r\n self.statements = statements\r\n self.account = account\r\n\r\n def execute(self) -> list:\r\n return self.map()\r\n\r\n def map(self) -> list:\r\n accumulator = []\r\n mcc_map_results = self._mcc_mapper()\r\n for el in self.statements:\r\n result = {\r\n 'transaction_date': datetime.fromtimestamp(el.get('time')).date(),\r\n 'account': self.account,\r\n 'category': self._cathegory_mapper(el, mcc_map_results),\r\n 'amount': float(abs(el.get('amount'))) / 100,\r\n 'currency': el.get('currencyCode'),\r\n 'converted_amount': float(abs(el.get('amount'))) / 100,\r\n 'converted_currency': el.get('currencyCode'),\r\n 'description': el.get('description'),\r\n 'is_debet': float(el.get('amount')) / 100 > 0,\r\n }\r\n accumulator.append(result)\r\n return accumulator\r\n\r\n def _mcc_mapper(self):\r\n path = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'static/mcc_codes.json')\r\n with open(path) as outfile:\r\n return json.load(outfile)\r\n\r\n @staticmethod\r\n def _cathegory_mapper(el, mcc_map_results):\r\n result = [i.get('irs_description') for i in mcc_map_results if int(i.get('mcc')) == el.get('mcc')]\r\n if not result:\r\n return el.get('mcc')\r\n return result[0]\r\n","sub_path":"processor/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"479322709","text":"import numpy as np\nimport cv2\nfrom theano_rbm.data_process import getFiles_flags, getFiles_flags_back, getFiles_flags_all, getFiles_jpg\n\n\ndef get_file_name_all(url):\n filename = url.split('/')[-1][:-8]\n return filename\n\n\nif __name__ == '__main__':\n counter_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/'\n test_counter_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/patch_size_25_new/test_counter/'\n # counter_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/HB03344.003_10_10diliate.jpg'\n # counter_url = '/home/aurora/hdd/workspace/data/MSTAR_data_liang_processed/target_chips_128x128_normalized_wei_counter/patch_size_25/dialit_background_patch/HB19986.018_70_88@4.jpg'\n # img = cv2.imread(counter_url)\n # b,g,r = cv2.split(img)\n # dilated = cv2.dilate(r, np.ones((10, 10)))\n # print b.shape\n # cv2.imshow('test', r)\n # cv2.waitKey(0)\n\n target_counter_list = getFiles_flags_all(test_counter_url, 'all')\n for target_counter in target_counter_list:\n img_counter = cv2.imread(target_counter, cv2.IMREAD_ANYDEPTH | cv2.IMREAD_ANYCOLOR)\n file_name = get_file_name_all(target_counter)\n # print file_name\n dilated = cv2.dilate(img_counter, np.ones((3, 3)))\n cv2.imwrite(test_counter_url + file_name + '_' + str(3) + '_' + str(3) + 'diliate' + '.jpg', dilated)","sub_path":"tf_sda/dilate_gen.py","file_name":"dilate_gen.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"391424","text":"from div_frame import div, path\nfrom threshold_ import grey\nfrom re_video import remake\nimport cv2\nimport os\n\n\ndef save_image(image, addr):\n address = addr\n cv2.imwrite(address, image)\n\n\ndef change(img_path):\n ig = grey(img_path)\n\n def num(m, n):\n count_pix = 0\n for ii in range(0, 4):\n for jj in range(0, 4):\n if ig[4 * m + ii, 4 * n + jj] <= 10:\n count_pix = count_pix + 1\n return count_pix\n\n def draw_black(m, n):\n for ii in range(0, 4):\n for jj in range(0, 4):\n ig[4 * m + ii, 4 * n + jj] = 255\n for ii in range(1, 3):\n for jj in range(1, 3):\n ig[4 * m + ii, 4 * n + jj] = 0\n\n def draw_white(m, n):\n for ii in range(0, 4):\n for jj in range(0, 4):\n ig[4 * m + ii, 4 * n + jj] = 255\n\n for j in range(0, 360):\n for i in range(0, 270):\n if num(i, j) >= 8:\n draw_black(i, j)\n else:\n draw_white(i, j)\n return ig\n\n\nif __name__ == \"__main__\":\n path_video = \"bad apple.mp4\"\n print(\"————开始抽取视频帧————\")\n div(path_video)\n print(\"————视频帧抽取完毕————\")\n file_list = os.listdir(path)\n file_list.sort(key=lambda x: int(x[:-4]))\n print(\"————开始转换帧图像————\")\n for file in file_list:\n file_path = path + file\n print(\"已转换:\" + file_path)\n save_image(change(file_path), file_path)\n print(\"————帧图像转换完毕————\")\n print(\"————开始制作视频————\")\n remake(path, \"BadApple\", 30, 1440, 1080)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"395550867","text":"# ----------------------------------------------------------------------\r\n# TDEF2DynaML.py\r\n# ----------------------------------------------------------------------\r\n# Author: Kent Wheeler\r\n# Date: 14 May 2020\r\n# Purpose: Script to convert Trimble data exchange files to DynaML \r\n# format, identify and export trivial baselines as a kml\r\n# ----------------------------------------------------------------------\r\n# Usage: cmd:\\> python TDEF2DynaML.py\r\n# ----------------------------------------------------------------------\r\n# Notes: - All files with the extension *.asc in the cwd will be\r\n# attempted to convert to DynaML\r\n# - only supports G type measurements, other measurement\r\n# are ignored\r\n# - the following baseline details are hardcoded\r\n# ControlRec.TimeStatus='E'\r\n# ControlRec.EphemerisType='B'\r\n# ControlRec.AtReceiver='TRIM'\r\n# ControlRec.ToReceiver='TRIM'\r\n# ControlRec.FrequencyMode='D'\r\n# ControlRec.SurveyTechnique='S'\r\n# ControlRec.Solution='FX'\r\n# ControlRec.EpochInterval=15\r\n#\r\nimport subprocess\r\nimport math\r\nimport numpy\r\nfrom numpy import matmul, matrix\r\nfrom math import sin, cos, radians, sqrt\r\nimport datetime, os, sqlite3\r\n\r\nclass DnaStation:\r\n def __init__(self):\r\n self.name = ''\r\n self.Constraint=''\r\n self.W_Constraint=''\r\n self.Type=''\r\n self.XAxis=''\r\n self.YAxis=''\r\n self.Height=''\r\n self.Description=''\r\n self.aAxis=0\r\n self.bAxis=0\r\n self.ErrAz=0\r\n self.HorizCoordMethod=''\r\n self.RelativeHorizAccuracy=''\r\n self.NonGSNumber=''\r\n self.SelectPoint='true'\r\n self.SelectRL='true'\r\nclass AdditionalInfoMsr:\r\n def __init__(self):\r\n #Additional information is included as a comment in the DynaML file. This can be used for database import\r\n self.StartDateTime=datetime.datetime(1994, 1, 1, 00, 00,00)\r\n self.Duration=datetime.datetime(1966, 1, 1, 00, 00,00)\r\n self.FinishDateTime=datetime.datetime(1994, 1, 1, 00, 00,00)\r\n self.TimeStatus=''\r\n self.EphemerisType=''\r\n self.AtReceiver=''\r\n self.ToReceiver=''\r\n self.FrequencyMode=''\r\n self.SurveyTechnique='SLEV'\r\n self.Solution=''\r\n self.EpochInterval=''\r\n self.Class='LC'\r\n self.LevelDistance='0.01'\r\n self.InstrumentModel=''\r\n self.Derivation='MEAS'\r\n self.NonGSNumber=''\r\nclass DnaMeasurement:\r\n def __init__(self):\r\n self.type = ''\r\n self.vscale='1'\r\n self.pscale='1'\r\n self.lscale='1'\r\n self.hscale='1'\r\n self.first=''\r\n self.second=''\r\n self.stddev=''\r\n self.total=''\r\n self.instheight=0\r\n self.targheight=0\r\n self.targets=''\r\n self.value=''\r\n self.targetstddevs=''\r\n self.dx=''\r\n self.dy=''\r\n self.dz=''\r\n self.MatrixType=''\r\n self.Vs=numpy.zeros([3,3])\r\n self.Ds=numpy.zeros([3,3])\r\n \r\nclass DeviceHeight:\r\n def __init__(self):\r\n #Device Height might be the height of instrument at a point or Height of target\r\n self.StnName=[]\r\n self.RefHeight=[]\r\ndef add_DeviceHeight(self,Stn,Hgt):\r\n self.StnName.append(Stn)\r\n self.RefHeight.append(Hgt)\r\ndef hms2hp(HMS_Ang):\r\n #Input: HH MM SS.ssss used by Geolab\r\n #Output: HH.MMSSSsssss used by DynAdjust\r\n sign=1\r\n HMS_Ang=HMS_Ang.upper()\r\n if HMS_Ang.find('S')!=-1 or HMS_Ang.find('-')!=-1:\r\n sign=-1\r\n while HMS_Ang.find(' ')!=-1:\r\n HMS_Ang=HMS_Ang.replace(' ',' ')\r\n HMS_Ang=HMS_Ang.replace('S','')\r\n HMS_Ang=HMS_Ang.replace('E','')\r\n HMS_Ang=HMS_Ang.replace('.',' ')\r\n aAng=HMS_Ang.split()\r\n aAng[0]=str(sign*abs(int(aAng[0])))\r\n aAng[1]=\"%02d\" % float(aAng[1])\r\n aAng[2]=\"%02d\" % float(aAng[2])\r\n return aAng[0] + '.' + aAng[1] + aAng[2]+ aAng[3]\r\n\r\ndef dec2hp(dec):\r\n minute, second = divmod(abs(dec) * 3600, 60)\r\n degree, minute = divmod(minute, 60)\r\n hp = degree + (minute / 100) + (second / 10000)\r\n return hp if dec >= 0 else -hp\r\n\r\ndef hp2dec(hp):\r\n #Input: HH.MMSSsss\r\n #Output: dd.dddddd\r\n degmin, second = divmod(abs(hp) * 1000, 10)\r\n degree, minute = divmod(degmin, 100)\r\n dec = degree + (minute / 60) + (second / 360)\r\n return dec if hp >= 0 else -dec\r\n\r\ndef FindJobNumber(strg):\r\n #search a string for 8 consecutive numbers, this is probably the Job Number\r\n JN=''\r\n i=0\r\n while i+7!=len(strg):\r\n if strg[i:i+8].isnumeric()==True:\r\n JN=strg[i:i+8]\r\n i=i+1\r\n return JN\r\ndef kmlFooter():\r\n return '\\n'\r\ndef kmlHeader(nme):\r\n strg = '\\n'\r\n strg = strg + '\\n'\r\n strg = strg + ' \\n'\r\n strg = strg + ' ' + nme + '\\n'\r\n strg = strg + ' '\r\ndef MkLine(ln):\r\n strg = ' \\n'\r\n strg = strg + ' ' + str(ln[1])+' - - ' + str(ln[2])+ '\\n'\r\n strg = strg + ' Start: ' + str(ln[7]) + '\\nFinish: ' + str(ln[8]) + '\\n'\r\n strg = strg + ' #trivlineStyle\\n'\r\n strg = strg + ' \\n'\r\n strg = strg + ' 1\\n'\r\n strg = strg + ' 1\\n'\r\n strg = strg + ' ClampToGround\\n'\r\n strg = strg + ' \\n'\r\n strg = strg + ' ' + str(hp2dec(ln[4])) + ',' + str(hp2dec(ln[3])) + ',0 ' + str(hp2dec(ln[6])) + ',' + str(hp2dec(ln[5])) + ',0 \\n'\r\n strg = strg + ' \\n'\r\n strg = strg + ' \\n'\r\n return strg +' '\r\ndef Stn_xml_str(Stn):\r\n #Output: String for printing to xml that is one complete station\r\n xml_str='\\n'\r\n xml_str=xml_str+'' + Stn.Name + '\\n'\r\n xml_str=xml_str+'' + Stn.Constraint + '\\n'\r\n xml_str=xml_str+'' + Stn.Type + '\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'' + Stn.Name + '\\n'\r\n xml_str=xml_str+'' + str(Stn.XAxis) + '\\n'\r\n xml_str=xml_str+'' + str(Stn.YAxis) + '\\n'\r\n xml_str=xml_str+'' + str(Stn.Height) + '\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+''+ Stn.Description+'\\n'\r\n xml_str=xml_str+'\\n' \r\n xml_str=xml_str+'\\n'\r\n return xml_str\r\n\r\ndef Msr_xml_str(row,re_scale=1):\r\n #Output: xml string for printing to file. Caters for type G, D, S, B, D, L, H\r\n Msr=DnaMeasurement()\r\n Msr.type = row[1]\r\n Msr.vscale=str(row[2]); Msr.pscale=str(row[3]); Msr.lscale=str(row[4]); Msr.hscale=str(row[5])\r\n Msr.first=row[6]; Msr.second=row[7]\r\n Msr.value=row[8]; Msr.stddev=row[9]; Msr.total=row[10]\r\n Msr.instheight=row[11]; Msr.targheight=row[12]\r\n Msr.targets=row[13]; Msr.targetstddevs=row[14]\r\n Msr.dx=str(row[15]); Msr.dy=str(row[16]); Msr.dz=str(row[17])\r\n Msr.Vs=matrix([[row[18],row[19],row[20]],[row[19],row[21],row[22]],[row[20],row[22],row[23]]])\r\n \r\n Msr.value=Msr.value.split(',')\r\n Msr.targets=Msr.targets.split(',')\r\n Msr.targetstddevs=Msr.targetstddevs.split(',')\r\n if Msr.vscale!='': Msr.vscale=str(sqrt(float(Msr.vscale)*sqrt(re_scale))**2)\r\n if Msr.stddev!='':Msr.stddev=str(float(row[9])*sqrt(re_scale))\r\n if Msr.targetstddevs!='':\r\n for i in range(1,len(Msr.targetstddevs)):\r\n Msr.targetstddevs[i]=str(float(Msr.targetstddevs[i])*sqrt(re_scale))\r\n \r\n ControlRec=AdditionalInfoMsr()\r\n ControlRec.StartDateTime=datetime.datetime.strptime(row[24],\"%Y-%m-%d %H:%M:%S\")\r\n if Msr.type=='G':ControlRec.Duration=datetime.datetime.strptime(row[26],\"%Y-%m-%d %H:%M:%S\")\r\n ControlRec.TimeStatus=row[26]; ControlRec.EphemerisType=row[28]\r\n ControlRec.AtReceiver=row[28]; ControlRec.ToReceiver=row[30]\r\n ControlRec.FrequencyMode=row[30]; ControlRec.SurveyTechnique=row[32]\r\n ControlRec.Solution=row[32]; ControlRec.EpochInterval=row[34]\r\n ControlRec.Class=row[34]; ControlRec.LevelDistance=row[36]\r\n ControlRec.InstrumentModel=row[36]; ControlRec.Derivation=row[38]\r\n ControlRec.NonGSNumber=row[39]\r\n \r\n xml_str='\\n'\r\n xml_str=xml_str+'' + Msr.type + '\\n'\r\n xml_str=xml_str+'\\n'\r\n\r\n if Msr.type == 'G':\r\n xml_str=xml_str+'' + GNSSdate2Ref(ControlRec.StartDateTime) + '\\n'\r\n xml_str=xml_str+'' + ControlRec.StartDateTime.strftime('%d.%m.%Y') + '\\n'\r\n xml_str=xml_str+'' + Msr.vscale + '\\n'\r\n xml_str=xml_str+'' + Msr.pscale + '\\n'\r\n xml_str=xml_str+'' + Msr.lscale + '\\n'\r\n xml_str=xml_str+'' + Msr.hscale + '\\n'\r\n xml_str=xml_str+'' + Msr.first + '\\n'\r\n if Msr.second != '':\r\n xml_str=xml_str+'' + Msr.second + '\\n'\r\n if Msr.type != 'G' and Msr.type != 'D':\r\n xml_str=xml_str+'' + Msr.value[1] + '\\n'\r\n xml_str=xml_str+'' + Msr.stddev + '\\n'\r\n if Msr.type == 'G':\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'' + Msr.dx + '\\n'\r\n xml_str=xml_str+'' + Msr.dy + '\\n'\r\n xml_str=xml_str+'' + Msr.dz + '\\n'\r\n xml_str=xml_str+'' + str(Msr.Vs[0,0]) + '\\n'\r\n xml_str=xml_str+'' + str(Msr.Vs[0,1]) + '\\n'\r\n xml_str=xml_str+'' + str(Msr.Vs[0,2]) + '\\n'\r\n xml_str=xml_str+'' + str(Msr.Vs[1,1]) + '\\n'\r\n xml_str=xml_str+'' + str(Msr.Vs[1,2]) + '\\n'\r\n xml_str=xml_str+'' + str(Msr.Vs[2,2]) + '\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'\\n'\r\n \r\n if Msr.type == 'L':\r\n xml_str=xml_str+'\\n'\r\n \r\n if Msr.type == 'S':\r\n xml_str=xml_str+'' + str(Msr.instheight) + '\\n'\r\n xml_str=xml_str+'' + str(Msr.targheight) + '\\n'\r\n xml_str=xml_str+'\\n'\r\n\r\n if Msr.type == 'D':\r\n xml_str=xml_str+'' + Msr.value[1] + '\\n'\r\n xml_str=xml_str+'' + Msr.targetstddevs[1] + '\\n'\r\n xml_str=xml_str+'' + str(Msr.total-1) + '\\n'\r\n ObsNumber=2\r\n while ObsNumber<=Msr.total:\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'' + Msr.targets[ObsNumber] + '\\n'\r\n xml_str=xml_str+'' + Msr.value[ObsNumber] + '\\n'\r\n xml_str=xml_str+'' + Msr.targetstddevs[ObsNumber] + '\\n'\r\n xml_str=xml_str+'\\n'\r\n ObsNumber=ObsNumber+1\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'\\n'\r\n return xml_str\r\n\r\nc_vac = 299792.458\r\nk_0 = 0.9996\r\n\r\n# Ellipsoid Constants\r\nclass Ellipsoid(object):\r\n def __init__(self, semimaj, inversef):\r\n self.semimaj = semimaj\r\n self.inversef = inversef\r\n self.f = 1 / self.inversef\r\n self.semimin = float(self.semimaj * (1 - self.f))\r\n self.ecc1sq = float(self.f * (2 - self.f))\r\n self.ecc2sq = float(self.ecc1sq / (1 - self.ecc1sq))\r\n self.ecc1 = sqrt(self.ecc1sq)\r\n self.n = float(self.f / (2 - self.f))\r\n self.n2 = self.n ** 2\r\n\r\n# Geodetic Reference System 1980\r\ngrs80 = Ellipsoid(6378137, 298.25722210088)\r\n\r\ndef llh2xyz(lat, lng, ellht, ellipsoid=grs80):\r\n # Add input for ellipsoid (default: grs80)\r\n # Convert lat & long to radians\r\n lat = radians(hp2dec(float(lat)))\r\n lng = radians(hp2dec(float(lng)))\r\n ellht=float(ellht)\r\n # Calculate Ellipsoid Radius of Curvature in the Prime Vertical - nu\r\n if lat == 0:\r\n nu = grs80.semimaj\r\n else:\r\n nu = ellipsoid.semimaj/(sqrt(1 - ellipsoid.ecc1sq * (sin(lat)**2)))\r\n # Calculate x, y, z\r\n x = (nu + ellht) * cos(lat) * cos(lng)\r\n y = (nu + ellht) * cos(lat) * sin(lng)\r\n z = ((ellipsoid.semimin**2 / ellipsoid.semimaj**2) * nu + ellht) * sin(lat)\r\n return x, y, z\r\n\r\ndef ErrEllip2Ycluster(Stn,w_H):\r\n #Input: Supply a station with coordinates and error ellipse for coordinate uncertainty\r\n #Output: xml string for point cluster (Y-type observation)\r\n x, y, z = llh2xyz(Stn.XAxis, Stn.YAxis, Stn.Height)\r\n\r\n a=Stn.aAxis/2.44774683068\r\n b=Stn.bAxis/2.44774683068\r\n Az=90-Stn.ErrAz\r\n \r\n rAz=math.radians(Az)\r\n rlat=math.radians(float(Stn.XAxis))\r\n rlng=math.radians(float(Stn.YAxis))\r\n\r\n rl=numpy.zeros([3,3])\r\n rl[0,0]=-sin(rlng)\r\n rl[0,1]=-sin(rlat)*cos(rlng)\r\n rl[0,2]=cos(rlat)*cos(rlng)\r\n rl[1,0]=cos(rlng)\r\n rl[1,1]=-sin(rlat)*sin(rlng)\r\n rl[1,2]=cos(rlat)*sin(rlng)\r\n rl[2,1]=cos(rlat)\r\n rl[2,2]=sin(rlat)\r\n\r\n iA=numpy.zeros([3,3])\r\n iA[0,0]=(cos(rAz)*cos(rAz)*a*a)+(b*b*sin(rAz)*sin(rAz))\r\n iA[0,1]=(a*a-b*b)*cos(rAz)*sin(rAz)\r\n iA[1,0]=iA[0,1]\r\n iA[1,1]=(a*a*sin(rAz)*sin(rAz))+(b*b*cos(rAz)*cos(rAz))\r\n iA[2,2]=w_H**2\r\n \r\n Wt=matmul(matmul(rl,iA),rl.transpose())\r\n \r\n xml_str='\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'Y\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'GDA2020\\n'\r\n xml_str=xml_str+'01.01.2020\\n'\r\n xml_str=xml_str+'1.000\\n'\r\n xml_str=xml_str+'1.000\\n'\r\n xml_str=xml_str+'1.000\\n'\r\n xml_str=xml_str+'1.000\\n'\r\n xml_str=xml_str+'XYZ\\n'\r\n xml_str=xml_str+'1\\n'\r\n xml_str=xml_str+'' + Stn.Name + '\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+''+str(x)+'\\n'\r\n xml_str=xml_str+''+str(y)+'\\n'\r\n xml_str=xml_str+''+str(z)+'\\n'\r\n xml_str=xml_str+''+str(Wt[0,0])+'\\n'\r\n xml_str=xml_str+''+str(Wt[0,1])+'\\n'\r\n xml_str=xml_str+''+str(Wt[0,2])+'\\n'\r\n xml_str=xml_str+''+str(Wt[1,1])+'\\n'\r\n xml_str=xml_str+''+str(Wt[1,2])+'\\n'\r\n xml_str=xml_str+''+str(Wt[2,2])+'\\n'\r\n xml_str=xml_str+'\\n'\r\n xml_str=xml_str+'\\n'\r\n \r\n return xml_str\r\n\r\ndef stn_header():\r\n xml_str='\\n'\r\n xml_str=xml_str+'\\n'\r\n return xml_str\r\ndef msr_header():\r\n xml_str='\\n'\r\n xml_str=xml_str+'\\n'\r\n return xml_str\r\ndef dML_footer():\r\n xml_str='\\n'\r\n return xml_str\r\ndef GNSSdate2Ref(obsDate):\r\n #Use the date of GNSS baseline obsedrvation to determine the reference frame used by broadcast ephemeris\r\n if obsDate >= datetime.datetime(1900, 1, 1) and obsDate < datetime.datetime(1994, 1, 2): \r\n Ref = 'ITRF1991'\r\n if obsDate >= datetime.datetime(1994, 1, 2) and obsDate < datetime.datetime(1995, 1, 1): \r\n Ref = 'ITRF1992'\r\n if obsDate >= datetime.datetime(1995, 1, 1) and obsDate < datetime.datetime(1996, 6, 30): \r\n Ref = 'ITRF1993'\r\n if obsDate >= datetime.datetime(1996, 6, 30) and obsDate < datetime.datetime(1998, 3, 1): \r\n Ref = 'ITRF1994'\r\n if obsDate >= datetime.datetime(1998, 3, 1) and obsDate < datetime.datetime(1999, 8, 1): \r\n Ref = 'ITRF1996'\r\n if obsDate >= datetime.datetime(1999, 8, 1) and obsDate < datetime.datetime(2001, 12, 2): \r\n Ref = 'ITRF1997'\r\n if obsDate >= datetime.datetime(2001, 12, 2) and obsDate < datetime.datetime(2006, 11, 5): \r\n Ref = 'ITRF2000'\r\n if obsDate >= datetime.datetime(2006, 11, 5) and obsDate < datetime.datetime(2011, 4, 17): \r\n Ref = 'ITRF2005'\r\n if obsDate >= datetime.datetime(2011, 4, 17): \r\n Ref = 'ITRF2008'\r\n return Ref\r\n#####################################################################################\r\n#### Input:Trimble Business Centre *.asc file #####\r\n#### Output: DynaML stn and msr file, #####\r\n#### if the GNSS network contains trivial baslines, it will output #####\r\n#### these to a google earth kml #####\r\n#####################################################################################\r\n\r\ndef create_connection(db_file):\r\n \"\"\" create a database connection to a SQLite database \"\"\"\r\n try:\r\n conn = sqlite3.connect(db_file)\r\n print(sqlite3.version)\r\n except:\r\n print (\"Cannot create a database'\")\r\n finally:\r\n conn.close()\r\nfor f in os.listdir(os.getcwd()):\r\n if f.endswith('.asc'):\r\n adjustment_name = f.replace('.asc','')\r\n # Connect to Sqlite and Open a new database\r\n dbname = adjustment_name+'.db'\r\n if not os.path.exists(dbname):\r\n create_connection(dbname)\r\n conn = sqlite3.connect(dbname) # or use :memory: to put it in RAM\r\n else:\r\n conn = sqlite3.connect(dbname) # or use :memory: to put it in RAM \r\n cursor = conn.cursor()\r\n # Create 4 Tables, Store Stations, Observations, Stations in discrete networks, Stations connected with directions\r\n cursor.execute('DROP TABLE IF EXISTS STATIONS')\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS STATIONS (\r\n ID integer PRIMARY KEY, STATION_NAME short text, COORD_TYPE text,\r\n CONSTRAIN text, W_CONSTRAIN text, LATITUDE double, LONGITUDE double, HEIGHT double, E_HEIGHT double,\r\n DESC text, GES_NAME text, GES94_LATITUDE double, GES94_LONGITUDE double, GES_HEIGHT double, HT_ACCURACY text, HT_METHOD text,\r\n HZ_ORDER text, HZ_ACCURACY text, HZ_METHOD text, CE double, A_AXIS double, B_AXIS double, ERR_AZ double,\r\n GES2020_LATITUDE double, GES2020_LONGITUDE double);\"\"\")\r\n conn.commit()\r\n cursor.execute('DROP TABLE IF EXISTS OBSERVATIONS')\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS OBSERVATIONS (\r\n ID integer PRIMARY KEY, TYPE text, VSCALE double, PSCALE double, LSCALE double, HSCALE double, FIRST short text, SECOND short text,\r\n VALUE text, SDEV text, TOTAL integer, INST_HEIGHT double, TARG_HEIGHT double, TARGETS text, TARGETS_SDEV text, DX double,\r\n DY double, DZ double, VS_1_1 double, VS_1_2 double, VS_1_3 double, VS_2_1 double, VS_2_2 double, VS_3_1 double,\r\n StartDateTime date,FinishDateTime date, Duration time, TimeStatus text, EphemerisType text, AtReceiver text, ToReceiver text, FrequencyMode text,\r\n SurveyTechnique text, Solution text, EpochInterval text, Class text, LevelDistance text, InstrumentModel text,\r\n Derivation text, NON_GS text, SESSION integer, TRIVIAL boolean);\"\"\")\r\n conn.commit()\r\n cursor.execute('DROP TABLE IF EXISTS DIR_TARGETS')\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS DIR_TARGETS (\r\n ID integer PRIMARY KEY, OBSERVATIONS_ID integer, TARGETS short text, VALUE text, TARGETS_SDEV text);\"\"\")\r\n conn.commit()\r\n cursor.execute('DROP TABLE IF EXISTS NETWORKS')\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS NETWORKS (\r\n ID integer PRIMARY KEY, STATION_NAME text, NETWORK integer);\"\"\")\r\n conn.commit()\r\n cursor.execute('DROP TABLE IF EXISTS GNSS_SESSIONS')\r\n cursor.execute(\"\"\"CREATE TABLE IF NOT EXISTS GNSS_SESSIONS (\r\n ID integer PRIMARY KEY, NETWORK integer, SESSION date, STATION_NAME text, StartDateTime text, FinishDateTime text);\"\"\")\r\n conn.commit()\r\n f = open(adjustment_name+'.asc', 'r')\r\n # Run through each line of the Trimble Data Exchange file and extract the relevant lines\r\n lineCount=0; idcount=0; Tgtidcount=0; GNSSmarks='';time_zero = datetime.datetime.strptime('00:00:00', '%H:%M:%S')\r\n InstHts=DeviceHeight(); TgtHts=DeviceHeight(); CurrentMsr=DnaMeasurement(); ControlRec=AdditionalInfoMsr()\r\n jobNumber=FindJobNumber(os.getcwd())\r\n print('Reading the Trimble Data Exchange (.asc) File...')\r\n for linestr in f.readlines():\r\n if linestr.startswith('Station='):\r\n ilinestr=linestr.split(':')\r\n CurrentStn=DnaStation()\r\n CurrentStn.Name=ilinestr[2].strip()\r\n if ilinestr[9].strip()=='0':CurrentStn.Constraint='FF'\r\n if ilinestr[9].strip()=='1':CurrentStn.Constraint='CC'\r\n if ilinestr[10].strip()=='1' or ilinestr[11].strip()=='1':CurrentStn.Constraint=CurrentStn.Constraint+'C'\r\n else:CurrentStn.Constraint=CurrentStn.Constraint+'F'\r\n CurrentStn.Constraint=CurrentStn.Constraint.strip()\r\n CurrentStn.Type='LLH'\r\n CurrentStn.XAxis=dec2hp(-1*float(ilinestr[3].strip()[:-1]))\r\n CurrentStn.YAxis=dec2hp(float(ilinestr[4].strip()[:-1]))\r\n CurrentStn.Height=ilinestr[8].strip()\r\n CurrentStn.NonGSNumber='E'+jobNumber\r\n stnRec=('||||||||||||||').split('|')\r\n cursor.execute(\"INSERT INTO STATIONS (COORD_TYPE, STATION_NAME, CONSTRAIN, W_CONSTRAIN, LATITUDE, LONGITUDE, HEIGHT, DESC, GES_NAME, GES94_LATITUDE, GES94_LONGITUDE, GES_HEIGHT, HT_ACCURACY, HT_METHOD, HZ_ORDER, HZ_ACCURACY, HZ_METHOD, CE, A_AXIS, B_AXIS, ERR_AZ, GES2020_LATITUDE, GES2020_LONGITUDE) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)\", \r\n [CurrentStn.Type,CurrentStn.Name,CurrentStn.Constraint,CurrentStn.W_Constraint, CurrentStn.XAxis,CurrentStn.YAxis, CurrentStn.Height, CurrentStn.Description, stnRec[0],stnRec[1],stnRec[2],stnRec[3],stnRec[4],stnRec[5],stnRec[6],stnRec[7],stnRec[8],stnRec[9],stnRec[10],stnRec[11],stnRec[12],stnRec[13],stnRec[14]])\r\n conn.commit()\r\n \r\n # Scrape information for Landgate GESMAR control Records\r\n if linestr.startswith('Vector='):\r\n ControlRec=AdditionalInfoMsr()\r\n ilinestr=linestr.split(':')\r\n stdate=ilinestr[19]\r\n sttime=ilinestr[20]\r\n istdate=stdate.split()\r\n isttime=sttime.split()\r\n yr=int(istdate[2])\r\n mth=int(istdate[1])\r\n ddy=int(istdate[0])\r\n hr=int(isttime[0])\r\n mn=int(isttime[1])\r\n sc=int(float(isttime[2]))\r\n ControlRec.StartDateTime=datetime.datetime(yr, mth, ddy, hr, mn,sc)\r\n ControlRec.NonGSNumber='E' + jobNumber\r\n fhdate=ilinestr[21]\r\n fhtime=ilinestr[22]\r\n ifhdate=fhdate.split()\r\n ifhtime=fhtime.split()\r\n yr=int(ifhdate[2])\r\n mth=int(ifhdate[1])\r\n ddy=int(ifhdate[0])\r\n hr=int(ifhtime[0])\r\n mn=int(ifhtime[1])\r\n sc=int(float(ifhtime[2]))\r\n ControlRec.FinishDateTime=datetime.datetime(yr, mth, ddy, hr, mn,sc)\r\n ControlRec.TimeStatus='E'\r\n ControlRec.EphemerisType='B'\r\n ControlRec.AtReceiver='TRIM'\r\n ControlRec.ToReceiver='TRIM'\r\n ControlRec.FrequencyMode='D'\r\n ControlRec.SurveyTechnique='S'\r\n ControlRec.Solution='FX'\r\n ControlRec.EpochInterval=15\r\n ControlRec.Class=''\r\n \r\n CurrentMsr=DnaMeasurement()\r\n CurrentMsr.type='G'\r\n CurrentMsr.first=ilinestr[2]\r\n CurrentMsr.second=ilinestr[3]\r\n CurrentMsr.dx=ilinestr[4]\r\n CurrentMsr.dy=ilinestr[5]\r\n CurrentMsr.dz=ilinestr[6]\r\n GNSSmarks=GNSSmarks + ';' + CurrentMsr.first + ';' + CurrentMsr.second\r\n \r\n CurrentMsr.MatrixType='COV'\r\n CurrentMsr.vscale=1\r\n CurrentMsr.Vs[0,0]=ilinestr[7]; CurrentMsr.Vs[1,0]=ilinestr[8]; CurrentMsr.Vs[2,0]=ilinestr[9]\r\n CurrentMsr.Vs[0,1]=ilinestr[10]; CurrentMsr.Vs[1,0]=ilinestr[11]\r\n CurrentMsr.Vs[1,1]=ilinestr[12]\r\n cursor.execute(\"INSERT INTO OBSERVATIONS (TYPE, VSCALE, PSCALE, LSCALE, HSCALE, FIRST, SECOND, VALUE, SDEV, TOTAL, INST_HEIGHT, TARG_HEIGHT, TARGETS, TARGETS_SDEV, DX, DY, DZ, VS_1_1, VS_1_2, VS_1_3, VS_2_1, VS_2_2, VS_3_1, StartDateTime, FinishDateTime, Duration, TimeStatus, EphemerisType, AtReceiver, ToReceiver, FrequencyMode, SurveyTechnique, Solution, EpochInterval, Class, LevelDistance, InstrumentModel, Derivation, NON_GS) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\", \r\n [CurrentMsr.type, CurrentMsr.vscale, CurrentMsr.pscale, CurrentMsr.lscale, CurrentMsr.hscale, CurrentMsr.first, CurrentMsr.second, CurrentMsr.value, CurrentMsr.stddev, CurrentMsr.total, CurrentMsr.instheight, CurrentMsr.targheight, CurrentMsr.targets, CurrentMsr.targetstddevs, CurrentMsr.dx, CurrentMsr.dy, CurrentMsr.dz, CurrentMsr.Vs[0,0], CurrentMsr.Vs[0,1], CurrentMsr.Vs[0,2], CurrentMsr.Vs[1,1], CurrentMsr.Vs[1,2], CurrentMsr.Vs[2,2],ControlRec.StartDateTime,ControlRec.FinishDateTime, ControlRec.Duration, ControlRec.TimeStatus, ControlRec.EphemerisType, ControlRec.AtReceiver, ControlRec.ToReceiver, ControlRec.FrequencyMode, ControlRec.SurveyTechnique, ControlRec.Solution, ControlRec.EpochInterval, ControlRec.Class, ControlRec.LevelDistance, ControlRec.InstrumentModel, ControlRec.Derivation, ControlRec.NonGSNumber])\r\n conn.commit() \r\n \r\n lineCount=lineCount+1\r\n f.close\r\n \r\n ### Write the database to DynaML formats ###\r\n print('\\nWriting DynaML Files...')\r\n stnout = open(adjustment_name + '.stn.xml', 'w')\r\n msrout = open(adjustment_name + '.msr.xml', 'w')\r\n stnout.write(stn_header())\r\n msrout.write(msr_header())\r\n sqlstring=\"SELECT STATIONS.STATION_NAME, STATIONS.CONSTRAIN, STATIONS.COORD_TYPE, STATIONS.LATITUDE, STATIONS.LONGITUDE, STATIONS.HEIGHT, STATIONS.E_HEIGHT, STATIONS.DESC, STATIONS.HZ_METHOD, STATIONS.HZ_ACCURACY, STATIONS.A_AXIS, STATIONS.B_AXIS, STATIONS.ERR_AZ \\\r\n FROM STATIONS \\\r\n ORDER BY STATIONS.ID;\"\r\n qry=cursor.execute(sqlstring).fetchall()\r\n print(' Writing:' + stnout.name)\r\n for row in qry:\r\n Stn=DnaStation()\r\n Stn.Name=row[0]; Stn.Constraint=row[1]; Stn.Type=row[2]\r\n Stn.XAxis=row[3]; Stn.YAxis=row[4]; Stn.Height=row[5]; Stn.Description=row[7]\r\n Stn.aAxis=row[10]; Stn.bAxis=row[11]; Stn.ErrAz=row[12]\r\n Stn.HorizCoordMethod=row[8]; Stn.RelativeHorizAccuracy=row[9]; Stn.NonGSNumber='E'+jobNumber\r\n stnout.write(Stn_xml_str(Stn))\r\n sqlstring=\"SELECT OBSERVATIONS.* \\\r\n FROM OBSERVATIONS \\\r\n ORDER BY OBSERVATIONS.ID;\"\r\n qry=cursor.execute(sqlstring).fetchall()\r\n print(' Writing:' + msrout.name)\r\n for row in qry:\r\n msrout.write(Msr_xml_str(row))\r\n stnout.write(dML_footer())\r\n msrout.write(dML_footer())\r\n stnout.close()\r\n msrout.close()\r\n \r\n ##################################################################################\r\n ### If this is a final adjustment, break the adjustment into discrete networks ###\r\n ##################################################################################\r\n if adjustment_name.lower().find('final')!=-1 and cursor.execute(\"SELECT Count(STATIONS.A_Axis) AS CountOfErrorEllipses FROM STATIONS where STATIONS.A_AXIS<>'';\").fetchall()!=0:\r\n print('\\nBreaking adjustment into discrete networks...')\r\n network_num=1; prevCnt=0\r\n while prevCnt!=cursor.execute(\"SELECT Count(STATIONS.STATION_NAME) AS CountOfSTATION_NAME FROM STATIONS;\").fetchall():\r\n n_adjustment_name=adjustment_name +'_' + str(network_num)\r\n nW_adjustment_name=adjustment_name +'_' + str(network_num) +'W'\r\n print(' Network: ' + n_adjustment_name)\r\n sqlstring=\"INSERT INTO NETWORKS ( STATION_NAME, NETWORK ) \\\r\n SELECT STATIONS.STATION_NAME, \"+ str(network_num) + \" AS NETWORK \\\r\n FROM STATIONS LEFT JOIN NETWORKS ON STATIONS.STATION_NAME = NETWORKS.STATION_NAME \\\r\n WHERE (((NETWORKS.STATION_NAME) Is Null)) \\\r\n LIMIT 1;\"\r\n cursor.execute(sqlstring)\r\n conn.commit()\r\n while prevCnt!=cursor.execute(\"SELECT Count(NETWORKS.STATION_NAME) AS CountOfSTATION_NAME FROM NETWORKS;\").fetchall():\r\n prevCnt = cursor.execute(\"SELECT Count(NETWORKS.STATION_NAME) AS CountOfSTATION_NAME FROM NETWORKS;\").fetchall()\r\n sqlstring=\"INSERT INTO NETWORKS ( STATION_NAME, NETWORK ) \\\r\n SELECT TMP.STATION_NAME, \"+ str(network_num) + \" AS NETWORK \\\r\n FROM (SELECT OBSERVATIONS.SECOND AS STATION_NAME \\\r\n FROM NETWORKS INNER JOIN OBSERVATIONS ON NETWORKS.STATION_NAME = OBSERVATIONS.FIRST \\\r\n UNION \\\r\n SELECT OBSERVATIONS.FIRST AS STATION_NAME \\\r\n FROM NETWORKS INNER JOIN OBSERVATIONS ON NETWORKS.STATION_NAME = OBSERVATIONS.SECOND \\\r\n UNION \\\r\n SELECT DIR_TARGETS.TARGETS AS STATION_NAME \\\r\n FROM (NETWORKS INNER JOIN OBSERVATIONS ON NETWORKS.STATION_NAME = OBSERVATIONS.FIRST) INNER JOIN DIR_TARGETS ON OBSERVATIONS.ID = DIR_TARGETS.OBSERVATIONS_ID \\\r\n UNION \\\r\n SELECT OBSERVATIONS.FIRST AS STATION_NAME \\\r\n FROM NETWORKS INNER JOIN (OBSERVATIONS INNER JOIN DIR_TARGETS ON OBSERVATIONS.ID = DIR_TARGETS.OBSERVATIONS_ID) ON NETWORKS.STATION_NAME = DIR_TARGETS.TARGETS \\\r\n UNION \\\r\n SELECT NETWORKS.STATION_NAME AS STATION_NAME \\\r\n FROM NETWORKS) AS TMP \\\r\n LEFT JOIN NETWORKS ON TMP.STATION_NAME = NETWORKS.STATION_NAME \\\r\n WHERE (((TMP.STATION_NAME)<>\\\"\\\") AND ((NETWORKS.STATION_NAME) Is Null));\"\r\n cursor.execute(sqlstring)\r\n conn.commit()\r\n ### Create non weighted adjustment files for each discrete network ###\r\n n_stnout = open(adjustment_name +'_' + str(network_num) + '.stn.xml', 'w')\r\n n_msrout = open(adjustment_name +'_' + str(network_num) + '.msr.xml', 'w')\r\n n_stnout.write(stn_header())\r\n n_msrout.write(msr_header())\r\n sqlstring=\"SELECT STATIONS.STATION_NAME, STATIONS.CONSTRAIN, STATIONS.COORD_TYPE, STATIONS.LATITUDE, STATIONS.LONGITUDE, STATIONS.HEIGHT, STATIONS.E_HEIGHT, STATIONS.DESC, STATIONS.HZ_METHOD, STATIONS.HZ_ACCURACY, TMP.VALUE, TMP.SDEV, STATIONS.A_AXIS, STATIONS.B_AXIS, STATIONS.ERR_AZ \\\r\n FROM STATIONS INNER JOIN (NETWORKS LEFT JOIN (SELECT OBSERVATIONS.* \\\r\n FROM OBSERVATIONS WHERE (((OBSERVATIONS.TYPE)=\\\"H\\\"))) AS TMP ON NETWORKS.STATION_NAME = TMP.FIRST) ON STATIONS.STATION_NAME = NETWORKS.STATION_NAME \\\r\n WHERE (((NETWORKS.NETWORK)=\"+ str(network_num) + \"));\"\r\n qry=cursor.execute(sqlstring).fetchall()\r\n for row in qry:\r\n Stn=DnaStation()\r\n Stn.Name=row[0]; Stn.Constraint=row[1]; Stn.Type=row[2]\r\n Stn.XAxis=row[3]; Stn.YAxis=row[4]; Stn.Height=row[5]; Stn.Description=row[7]\r\n Stn.aAxis=row[12]; Stn.bAxis=row[13]; Stn.ErrAz=row[14]\r\n Stn.HorizCoordMethod=row[8]; Stn.RelativeHorizAccuracy=row[9]; Stn.NonGSNumber='E'+jobNumber\r\n n_stnout.write(Stn_xml_str(Stn))\r\n \r\n sqlstring=\"SELECT OBSERVATIONS.* \\\r\n FROM (NETWORKS INNER JOIN OBSERVATIONS ON NETWORKS.STATION_NAME = OBSERVATIONS.FIRST) \\\r\n WHERE (((NETWORKS.NETWORK)=\"+ str(network_num) + \")) \\\r\n ORDER BY OBSERVATIONS.ID;\"\r\n qry=cursor.execute(sqlstring).fetchall()\r\n for row in qry:\r\n n_msrout.write(Msr_xml_str(row))\r\n \r\n n_stnout.write(dML_footer())\r\n n_msrout.write(dML_footer())\r\n n_stnout.close()\r\n n_msrout.close()\r\n # Run Dynadjust on the network '_n'\r\n print(' Adjusting: ' + n_adjustment_name)\r\n subprocess.run(\"import -n \" + n_adjustment_name + \" \" + n_adjustment_name + \".msr.xml \"+ n_adjustment_name + \".stn.xml --flag-unused-stations --remove-ignored-msr\")\r\n subprocess.run(\"geoid \" + n_adjustment_name + \" -g \\\"X:\\GA_Processing - AUSGeoid2020\\AusGeoid2020_V1.7\\AUSGeoid2020_20170908.gsb\\\" --convert\")\r\n subprocess.run(\"segment \" + n_adjustment_name + \" --min-inner-stns 500 --max-block-stns 500\")\r\n subprocess.run(\"adjust \" + n_adjustment_name + \" --staged --create-stage-files --output-adj-msr --output-pos-uncertainty --output-adj-gnss-units 1 --max-iterations 20 --free-stn-sd 5 --iteration-threshold 0.0005 --stn-coord-types ENzPLHhXYZ\")\r\n \r\n # Open the adjusted files and extract the sigma 0 and the Ellipse Height\r\n n_adj = open(n_adjustment_name + \".phased-stage.adj\", 'r')\r\n lineCount=0\r\n read_height=False\r\n for linestr in n_adj.readlines():\r\n if linestr[:35].strip() =='Rigorous Sigma Zero':sigma_0=float(linestr[-20:].strip())\r\n if linestr.strip() == 'Adjusted Coordinates': lineCount=0; read_height=True\r\n if lineCount>=5 and read_height==True and linestr!='\\n':\r\n sqlstring='UPDATE STATIONS \\\r\n SET E_HEIGHT = ' + linestr[103:113].strip() +' \\\r\n WHERE STATION_NAME = \\'' + linestr[:20].strip() +'\\';'\r\n cursor.execute(sqlstring)\r\n conn.commit()\r\n lineCount=lineCount+1\r\n n_adj.close\r\n ### Create weighted adjustment files, scaled by the sigma_0 for each discrete network ###\r\n w_stnout = open(nW_adjustment_name + '.stn.xml', 'w')\r\n w_msrout = open(nW_adjustment_name + '.msr.xml', 'w')\r\n w_stnout.write(stn_header())\r\n w_msrout.write(msr_header())\r\n sqlstring=\"SELECT STATIONS.STATION_NAME, STATIONS.W_CONSTRAIN, STATIONS.COORD_TYPE, STATIONS.LATITUDE, STATIONS.LONGITUDE, STATIONS.HEIGHT, STATIONS.E_HEIGHT, STATIONS.DESC, STATIONS.HZ_METHOD, STATIONS.HZ_ACCURACY, TMP.VALUE, TMP.SDEV, STATIONS.A_AXIS, STATIONS.B_AXIS, STATIONS.ERR_AZ, STATIONS.CONSTRAIN \\\r\n FROM STATIONS INNER JOIN (NETWORKS LEFT JOIN (SELECT OBSERVATIONS.* \\\r\n FROM OBSERVATIONS WHERE (((OBSERVATIONS.TYPE)=\\\"H\\\"))) AS TMP ON NETWORKS.STATION_NAME = TMP.FIRST) ON STATIONS.STATION_NAME = NETWORKS.STATION_NAME \\\r\n WHERE (((NETWORKS.NETWORK)=\"+ str(network_num) + \"));\"\r\n qry=cursor.execute(sqlstring).fetchall()\r\n for row in qry:\r\n Stn=DnaStation()\r\n Stn.Name=row[0]; Stn.Constraint=row[1]; Stn.Type=row[2]\r\n Stn.XAxis=row[3]; Stn.YAxis=row[4]; Stn.Height=row[5]; Stn.Description=row[7]\r\n Stn.aAxis=row[12]; Stn.bAxis=row[13]; Stn.ErrAz=row[14]\r\n Stn.HorizCoordMethod=row[8]; Stn.RelativeHorizAccuracy=row[9]; Stn.NonGSNumber='E'+jobNumber\r\n if Stn.HorizCoordMethod=='' and GNSSmarks.find(';'+Stn.name+';')!=-1: Stn.HorizCoordMethod='GNSS'\r\n else: Stn.HorizCoordMethod='GEOD'\r\n w_stnout.write(Stn_xml_str(Stn))\r\n if row[15][:2] =='CC':\r\n if Stn.Constraint=='FFC':\r\n w_msrout.write(ErrEllip2Ycluster(CurrentStn,0.001))\r\n else:\r\n w_msrout.write(ErrEllip2Ycluster(CurrentStn,100))\r\n sqlstring=\"SELECT OBSERVATIONS.* \\\r\n FROM (NETWORKS INNER JOIN OBSERVATIONS ON NETWORKS.STATION_NAME = OBSERVATIONS.FIRST) \\\r\n WHERE (((NETWORKS.NETWORK)=\"+ str(network_num) + \")) \\\r\n ORDER BY OBSERVATIONS.ID;\"\r\n qry=cursor.execute(sqlstring).fetchall()\r\n for row in qry:\r\n w_msrout.write(Msr_xml_str(row,sigma_0))\r\n \r\n w_stnout.write(dML_footer())\r\n w_msrout.write(dML_footer())\r\n w_stnout.close()\r\n w_msrout.close()\r\n # Run Dynadjust on the network '_n'\r\n print(' Adjusting: ' + nW_adjustment_name)\r\n subprocess.run(\"import -n \" + nW_adjustment_name + \" \" + nW_adjustment_name + \".msr.xml \"+ nW_adjustment_name + \".stn.xml --flag-unused-stations --remove-ignored-msr\")\r\n subprocess.run(\"geoid \" + nW_adjustment_name + \" -g \\\"X:\\GA_Processing - AUSGeoid2020\\AusGeoid2020_V1.7\\AUSGeoid2020_20170908.gsb\\\" --convert\")\r\n subprocess.run(\"segment \" + nW_adjustment_name + \" --min-inner-stns 500 --max-block-stns 500\")\r\n subprocess.run(\"adjust \" + nW_adjustment_name + \" --staged --create-stage-files --output-adj-msr --output-pos-uncertainty --output-adj-gnss-units 1 --max-iterations 20 --free-stn-sd 5 --iteration-threshold 0.0005 --stn-coord-types ENzPLHhXYZ\")\r\n \r\n network_num=network_num+1\r\n \r\n ####################################\r\n ### Search for Trivial Baselines ###\r\n ####################################\r\n w_kml = open(adjustment_name+'.kml', 'w')\r\n w_kml.write(kmlHeader(adjustment_name))\r\n \r\n #Break into individual networks for each session (event)\r\n sqlstring=\"SELECT DISTINCT OBSERVATIONS.StartDateTime AS EventTime \\\r\n FROM OBSERVATIONS WHERE ((OBSERVATIONS.TYPE)='G') \\\r\n ORDER BY EventTime;\"\r\n Events=cursor.execute(sqlstring).fetchall()\r\n for e in Events:\r\n network_num=0; prevNtCnt=0\r\n while prevNtCnt!=cursor.execute(\"SELECT Count(GNSS_SESSIONS.STATION_NAME) AS CountOfSTATION_NAME FROM GNSS_SESSIONS;\").fetchall():\r\n prevNtCnt=cursor.execute(\"SELECT Count(GNSS_SESSIONS.STATION_NAME) AS CountOfSTATION_NAME FROM GNSS_SESSIONS;\").fetchall()\r\n network_num=network_num+1\r\n sqlstring=\"INSERT INTO GNSS_SESSIONS (NETWORK, SESSION, STATION_NAME, StartDateTime, FinishDateTime) \\\r\n SELECT \"+ str(network_num) + \" AS NETWORK, '\"+ str(e[0]) + \"' AS SESSION, OBSERVATIONS.FIRST AS STATION_NAME, OBSERVATIONS.StartDateTime, OBSERVATIONS.FinishDateTime \\\r\n FROM OBSERVATIONS LEFT JOIN GNSS_SESSIONS ON OBSERVATIONS.FIRST = GNSS_SESSIONS.STATION_NAME \\\r\n WHERE (((OBSERVATIONS.StartDateTime)<='\"+ e[0] + \"') AND ((OBSERVATIONS.FinishDateTime)>='\"+ e[0] + \"') AND ((OBSERVATIONS.TYPE)='G') AND ((GNSS_SESSIONS.STATION_NAME) Is Null)) \\\r\n LIMIT 1\"\r\n cursor.execute(sqlstring)\r\n conn.commit()\r\n prevCnt=0\r\n while prevCnt!=cursor.execute(\"SELECT Count(GNSS_SESSIONS.STATION_NAME) AS CountOfSTATION_NAME FROM GNSS_SESSIONS;\").fetchall():\r\n prevCnt = cursor.execute(\"SELECT Count(GNSS_SESSIONS.STATION_NAME) AS CountOfSTATION_NAME FROM GNSS_SESSIONS;\").fetchall()\r\n sqlstring=\"INSERT INTO GNSS_SESSIONS (NETWORK, SESSION, STATION_NAME, StartDateTime, FinishDateTime) \\\r\n SELECT C.NETWORK, C.SESSION, C.STATION_NAME, C.StartDateTime, C.FinishDateTime \\\r\n FROM (SELECT C_FIRST.NETWORK, C_FIRST.SESSION, C_FIRST.STATION_NAME, C_FIRST.StartDateTime, C_FIRST.FinishDateTime \\\r\n FROM( \\\r\n SELECT MARKS_IN_SESS.NETWORK, MARKS_IN_SESS.SESSION, BASE_IN_SESS.FIRST AS STATION_NAME, BASE_IN_SESS.StartDateTime, BASE_IN_SESS.FinishDateTime \\\r\n FROM (SELECT GNSS_SESSIONS.NETWORK, GNSS_SESSIONS.SESSION, GNSS_SESSIONS.STATION_NAME \\\r\n FROM GNSS_SESSIONS \\\r\n WHERE (((GNSS_SESSIONS.NETWORK)=\"+ str(network_num) + \") AND ((GNSS_SESSIONS.SESSION)='\"+ e[0] + \"')) \\\r\n ) AS MARKS_IN_SESS \\\r\n INNER JOIN (SELECT OBSERVATIONS.FIRST, OBSERVATIONS.SECOND, OBSERVATIONS.StartDateTime, OBSERVATIONS.FinishDateTime \\\r\n FROM OBSERVATIONS \\\r\n WHERE (((OBSERVATIONS.TYPE)='G') AND ((OBSERVATIONS.StartDateTime)<='\"+ e[0] + \"') AND ((OBSERVATIONS.FinishDateTime)>='\"+ e[0] + \"')) \\\r\n ) AS BASE_IN_SESS ON MARKS_IN_SESS.STATION_NAME = BASE_IN_SESS.SECOND) AS C_FIRST \\\r\n UNION \\\r\n SELECT C_SECOND.NETWORK, C_SECOND.SESSION, C_SECOND.STATION_NAME, C_SECOND.StartDateTime, C_SECOND.FinishDateTime \\\r\n FROM( \\\r\n SELECT MARKS_IN_SESS.NETWORK, MARKS_IN_SESS.SESSION, BASE_IN_SESS.SECOND AS STATION_NAME, BASE_IN_SESS.StartDateTime, BASE_IN_SESS.FinishDateTime \\\r\n FROM (SELECT GNSS_SESSIONS.NETWORK, GNSS_SESSIONS.SESSION, GNSS_SESSIONS.STATION_NAME \\\r\n FROM GNSS_SESSIONS \\\r\n WHERE (((GNSS_SESSIONS.NETWORK)=\"+ str(network_num) + \") AND ((GNSS_SESSIONS.SESSION)='\"+ e[0] + \"')) \\\r\n ) AS MARKS_IN_SESS INNER JOIN (SELECT OBSERVATIONS.FIRST, OBSERVATIONS.SECOND, OBSERVATIONS.StartDateTime, OBSERVATIONS.FinishDateTime \\\r\n FROM OBSERVATIONS \\\r\n WHERE (((OBSERVATIONS.TYPE)='G') AND ((OBSERVATIONS.StartDateTime)<='\"+ e[0] + \"') AND ((OBSERVATIONS.FinishDateTime)>='\"+ e[0] + \"')) \\\r\n ) AS BASE_IN_SESS ON MARKS_IN_SESS.STATION_NAME = BASE_IN_SESS.FIRST) AS C_SECOND) AS C LEFT JOIN (SELECT GNSS_SESSIONS.STATION_NAME \\\r\n FROM GNSS_SESSIONS \\\r\n WHERE (((GNSS_SESSIONS.NETWORK)=\"+ str(network_num) + \") AND ((GNSS_SESSIONS.SESSION)='\"+ e[0] + \"')) \\\r\n ) AS MARKS_IN_SESS ON C.STATION_NAME = MARKS_IN_SESS.STATION_NAME \\\r\n WHERE (((MARKS_IN_SESS.STATION_NAME) Is Null))\"\r\n cursor.execute(sqlstring)\r\n conn.commit()\r\n \r\n ##Print the trivials to kml\r\n sqlstring=\"SELECT DISTINCT NETWORK, SESSION \\\r\n FROM GNSS_SESSIONS \\\r\n ORDER BY SESSION, NETWORK\"\r\n session=cursor.execute(sqlstring).fetchall()\r\n for s in session:\r\n baseCntSql = \"SELECT COUNT(OBSERVATIONS.ID) AS BASE_CNT \\\r\n FROM (GNSS_SESSIONS INNER JOIN OBSERVATIONS ON GNSS_SESSIONS.STATION_NAME = OBSERVATIONS.FIRST) \\\r\n INNER JOIN GNSS_SESSIONS AS GNSS_SESSIONS_1 ON OBSERVATIONS.SECOND = GNSS_SESSIONS_1.STATION_NAME \\\r\n WHERE (((OBSERVATIONS.TYPE)='G') AND ((GNSS_SESSIONS.NETWORK)=\"+ str(s[0]) + \") AND ((GNSS_SESSIONS.SESSION)='\"+ str(s[1]) + \"')AND ((GNSS_SESSIONS_1.NETWORK)=\"+ str(s[0]) + \") AND ((GNSS_SESSIONS_1.SESSION)='\"+ str(s[1]) + \"'))\"\r\n stationCntSql = \"SELECT COUNT(ID) AS STATION_CNT \\\r\n FROM GNSS_SESSIONS \\\r\n WHERE ((GNSS_SESSIONS.NETWORK)=\"+ str(s[0]) + \") AND ((GNSS_SESSIONS.SESSION)='\"+ str(s[1]) + \"')\"\r\n stationCnt = cursor.execute(stationCntSql).fetchall()\r\n baseCnt = cursor.execute(baseCntSql).fetchall()\r\n if stationCnt[0][0]-1', strict_slashes=False)\ndef states(id=None):\n \"\"\"Route display a HTML page: (inside the tag BODY)\"\"\"\n if id is not None:\n id = \"State.\" + id\n states = storage.all('State')\n cities = storage.all('City')\n return render_template('9-states.html',\n states=states, cities=cities, id=id)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port='5000', debug=True)\n","sub_path":"web_flask/9-states.py","file_name":"9-states.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588221066","text":"import globalstuff\nimport time\n\ndef opensheet(key,user):\n\tsheet = user.open_by_key(key)\n\treturn sheet\n\ndef openworksheet(sheet,worksheetname):\n\tworksheet = sheet.worksheet(worksheetname)\n\treturn worksheet\n\ndef getallworksheets(sheet):\n\tallworksheets = sheet.worksheets()\n\treturn allworksheets\n\ndef updatesheet(rowdata):\n\temployeesheet = opensheet(globalstuff.employeesheetkey,globalstuff.userdata)\n\tworksheets = getallworksheets(employeesheet)\n\tworksheet = openworksheet(employeesheet,rowdata[5])\n\tcolumn = worksheet.col_values(1)\n\tcolumnlength = len(column)\n\trowdata.pop(2)\n\ttemp = rowdata[:4]\n\ttemp.append('AM')\n\tfor i in range(5):\n\t\tworksheet.update_cell(columnlength+1, i+1, temp[i])\n\tworksheet.update_cell(columnlength+1,8,'Work On Progress')\n\ndef assignsheet(row):\n\tperson = row[4]\n\tworksheet = openworksheet(globalstuff.employeesheetdata,person)\n\tcolumn = worksheet.col_values(1)\t\n\tcolumnlength = len(column)\n\ttempraw = row\n\ttempraw.pop(4)\n\ttempraw.pop(4)\n\tfor i in range(5):\n\t\tworksheet.update_cell(columnlength+1, i+1, tempraw[i])\n\tworksheet.update_cell(columnlength+1,8,'Work On Progress')\n\ndef finishedemployeesheet(row,assignedby):\n\tperson = row[4]\n\tpersonworksheet = openworksheet(globalstuff.employeesheetdata,person)\n\tnewassigned = 'You to ' + assignedby\n\ttemprow = row\n\ttemprow[4] = newassigned\n\t# tempo = \"Waiting for Approval\"\n\ttemprow.pop(5)\n\t# temprow.insert(5,tempo)\n\tcolumn = personworksheet.col_values(1)\t\n\tcolumnlength = len(column)\n\tfor i in range(5):\n\t\tpersonworksheet.update_cell(columnlength+1, i+1, temprow[i])\n\tpersonworksheet.update_cell(columnlength+1, 8, 'Waiting for Approval')\n\t# print \"Finished: \" + str(temprow)\n\treturn temprow\n\ndef finishedbrandsheet(row,assignedby):\n\tbrand = row[0]\n\tdescription = row[2]\n\tbrandsheetkey = globalstuff.brandsheets[brand]\n\tbrandsheet = opensheet(brandsheetkey,globalstuff.employeeuserdata)\n\tworksheet = openworksheet(brandsheet,'Retainer Flow')\n\n\tcellmatch = worksheet.find(description)\n\tentirerow = worksheet.row_values(cellmatch.row)\n\n\tworksheet.update_cell(cellmatch.row, 7, 'Waiting for Approval')\n\treturn row\n\ndef approveemployeesheet(row):\n\tpersoncell = row[4]\n\tperson = personcell[7:]\n\tpersonworksheet = openworksheet(globalstuff.employeesheetdata,person)\n\tdescription = row[2]\n\tfoundcell = personworksheet.find(description)\n\tpersonworksheet.update_cell(foundcell.row, 8, 'Completed')\n\ndef approvebrandsheet(row,brand):\n\t# print \"Hello\"\n\tperson = row[5]\n\tdescription = row[3]\n\temployeesheet = opensheet(globalstuff.employeesheetkey,globalstuff.userdata)\n\tworksheet = openworksheet(employeesheet,person)\n\tcellmatch = worksheet.findall(description)\n\tfor obj in cellmatch:\n\t\t# if worksheet.cell(obj.row,1) == brand:\n\t\tif worksheet.cell(obj.row,1).value == brand:\n\t\t\tworksheet.update_cell(obj.row, 8, 'Completed')\n\t\t\tbreak\n\t# if cell(cellmatch.row,1) == brand:\n\t# \tworksheet.update_cell(cellmatch.row, 8, 'Completed')\n\ndef preparelogfinished(row,target):\n\ttemprow = row\n\ttemprow[5] = target\n\ttempexec = temprow[4]\n\ttemprow[4] = tempexec[6:]\n\ttemprow.append('Finished')\n\tnow = time.strftime(\"%c\")\n\ttemprow.append(now)\n\tprintlog(temprow)\n\ndef preparelogfinishedam(row,executer):\n\ttemprow = row\n\ttemprow[5] = temprow[4]\n\ttemprow[4] = executer\n\ttemprow[6] = 'Finished'\n\ttemprow.append(time.strftime(\"%c\"))\n\tprintlog(temprow)\n\ndef preparelogassign(row,target):\n\ttemprow = row\n\ttemprow.append(target)\n\ttemprow.append('Assign')\n\ttemprow.append(time.strftime(\"%c\"))\n\tprintlog(temprow)\n\ndef printlog(row):\n\tlogsheetkey = globalstuff.logfile\n\tuser = globalstuff.employeeuserdata\n\tlogsheet = opensheet(logsheetkey,user)\n\tworksheet = openworksheet(logsheet,'Log')\n\tcol = worksheet.col_values(1)\n\tlencol = len(col)\n\tworksheet.insert_row(row,lencol+1)\n","sub_path":"sheetaccess.py","file_name":"sheetaccess.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"629257850","text":"import shelve\n\n# CMD click on a module to learn more about it !! ***\n\n# print(dir())\n#\n# # List of Python 3.6 Built-In Functions:\n# # https://docs.python.org/3/library/functions.html\n#\n# #print(dir(__builtins__))\n#\n# for m in dir(__builtins__):\n# print(m)\n\nprint(dir(shelve))\n\nfor obj in dir(shelve.Shelf):\n if obj[0] != \"_\":\n print(obj)\n\n\nhelp(shelve)\n","sub_path":"python_modules/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"213134481","text":"if __name__ == '__main__':\n import sys\n sys.path.insert(0, 'C:\\\\Users\\\\James Jiang\\\\Documents\\\\Project Euler')\n\nfrom functions import *\n\nfrom progress import Progress\nanswers_list = ['dummy']\nwith open('C:\\\\Users\\\\James Jiang\\\\Documents\\\\Project Euler\\\\answers.txt') as answers:\n for line in answers:\n answers_list.append(int(line))\nprogress_ = Progress(\"Problem 027: Quadratic primes\", 0, 2000)\n\nmax_number = 0\nmax_number_ab = 0\nfor a in range(-1000, 1000):\n progress_.count = 1000 + a\n progress_.progress()\n for b in range(-1000, 1000):\n if (a != 0) and (b != 0):\n count = 0\n n = 0\n while True:\n if is_prime(n**2 + a*n + b):\n count += 1\n else:\n if count > max_number:\n max_number = count\n max_number_ab = a*b\n break\n n += 1\n\nprogress_.count = max_number_ab\nprogress_.total = answers_list[27]\nprogress_.progress()\n\nif __name__ == '__main__':\n input()\n","sub_path":"python/problem_27.py","file_name":"problem_27.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"272285235","text":"# Roman to numbers\n# (I = 1) (V = 5) (X = 10) (L = 50) (C = 100) (D = 500) (M = 1000)\n\ndef romanToInt (s):\n # key value = roman, decimal\n table = {\n 'I': 1,\n 'V': 5,\n 'X': 10,\n 'L': 50,\n 'C': 100,\n 'D': 500,\n 'M': 1000\n } \n # integer value\n number = 0\n\n # scan each integer value\n for i, char in enumerate(s):\n # add value\n number += table[char]\n\n if i and (table[char] > table[s[i-1]]):\n # adjustment for IV,IX,XL,XC,CD,CM\n number -= 2 * table [s[i-1]]\n return number","sub_path":"Easy/05-Roman to Integer.py","file_name":"05-Roman to Integer.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"349550751","text":"\"\"\"\nThis is an example of a very simple graph which prints hello for each even number x in the input stream,\nusing a conditional RuleBasedModel node and a HelloPrinter h1.Action.\n\"\"\"\n\nimport h1st.core as h1\nfrom rule_based_model import RuleBasedModel\n\nclass HelloPrinter(h1.Action):\n \"\"\"Print hello to the inputs value\"\"\"\n def call(self, command, inputs):\n # Note that H1st does the conditional/filtering orchestration already.\n # All we need to do here is just to print.\n for d in inputs[\"predictions\"]:\n print(\"Hello world {}!\".format(d[\"value\"]))\n\n\ndef create_graph():\n \"\"\"Create a graph which prints hello for each even number x in the input stream,\n using a conditional RuleBasedModel node and a HelloPrinter h1.Action.\"\"\"\n graph = h1.Graph()\n graph.start()\\\n .add(h1.Decision(RuleBasedModel(), result_field=\"predictions\"))\\\n .add(yes=HelloPrinter(), no=h1.NoOp())\n graph.end()\n return graph\n\nif __name__ == \"__main__\":\n graph = create_graph()\n results = graph.predict({\"values\": range(6)})\n # Should get:\n # Hello world 0!\n # Hello world 2!\n # Hello world 4!\n","sub_path":"examples/HelloWorld/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233905248","text":"from app_blog.app import db\nfrom flask import Blueprint\nfrom flask import render_template\n\nfrom app_blog.models import Post\n\nfrom .forms import PostForm\n\nfrom flask import request, redirect, url_for\n\n\n\nposts = Blueprint('posts', __name__, template_folder='templates')\n\n# http:/localhost:5000/blog/create\n@posts.route('/create', methods=['POST','GET'])\ndef create_post():\n if request.method == 'POST':\n title = request.form['title']\n body = request.form['body']\n\n try:\n post = Post(title=title, body=body)\n db.session.add(post)\n db.session.commit()\n except:\n print('error')\n\n return redirect( url_for('posts.index'))\n\n form = PostForm()\n return render_template('posts/create_post.html', form=form)\n\n\n\n\n\n@posts.route('/')\ndef index():\n q = request.args.get('q')\n\n if q:\n posts = Post.query.filter(Post.title.contains(q) | Post.body.contains(q)).all()\n else:\n posts = Post.query.order_by(Post.created.desc())\n return render_template('posts/index.html', posts=posts)\n\n\n@posts.route('/')\ndef post_detail(slug):\n post = Post.query.filter(Post.slug == slug).first()\n return render_template('posts/post_detail.html', post=post)\n","sub_path":"app_blog/posts/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"227533143","text":"# Proxy creation to deter Amazon issues, code created by:\n# https://www.scrapehero.com/how-to-rotate-proxies-and-ip-addresses-using-python-3/\n###################################################################################\n\ndef get_proxies():\n proxy_url = 'https://free-proxy-list.net/'\n proxy_response = requests.get(proxy_url)\n parser = fromstring(proxy_response.text)\n set_proxies = set()\n for i in parser.xpath('//tbody/tr')[:3]:\n if i.xpath('.//td[7][contains(text(),\"yes\")]'):\n find_proxy = \":\".join([i.xpath('.//td[1]/text()')[0], i.xpath('.//td[2]/text()')[0]])\n set_proxies.add(find_proxy)\n return set_proxies\n\n\nproxies = get_proxies()\nproxy_pool = cycle(proxies)\n\nurl = 'https://httpbin.org/ip'\nfor i in range(1,4):\n # Get a proxy from the pool\n proxy = next(proxy_pool)\n print(\"Request #%d\"%i)\n try:\n response = requests.get(url,proxies={\"http\": proxy, \"https\": proxy})\n print(response.json())\n except:\n # Skip retries as its beyond the scope of this tutorial and we are only downloading a single url\n print(\"Skipping. Connnection error\")\n\n\n\n#################################################################################","sub_path":"proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"625991428","text":"if __name__ == '__main__':\n\n MAX_SIZE = 100\n\n ITEM_LIST = []\n s = [6, 8, 6, 5, 8, 6, 1, 6, 6, 1, 2, 8, 8, 5, 5, 4, 3, 6, 7, 7]\n v = [1, 1, 2, 6, 9, 8, 5, 6, 10, 3, 3, 7, 2, 3, 6, 6, 1, 3, 10, 10]\n\n for idx in range(20):\n ITEM_LIST.append((s[idx], v[idx]))\n\n napzak = [(0, 0)] * 20\n\n for i in range(len(ITEM_LIST)):\n item = ITEM_LIST[i]\n\n for m in range(MAX_SIZE):\n\n if item[0] + sum([item_n[0] for item_n in napzak]) <= MAX_SIZE:\n napzak[i] = item\n break\n\n print(napzak)\n","sub_path":"algorism/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186853878","text":"import numpy as np\nimport os,json,sys\nfrom typing import Dict, Sequence, Type, Callable, List, Optional\nimport torch\nfrom torch import nn\nsys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))\n\nfrom modules.transformers import BertModel,BertConfig,BertTokenizer\nfrom modules.MLP import MLP\nfrom modules.Pooling import Pooling\nfrom log import logging\nlogger=logging.getLogger(__name__)\n\n'''\nheads下的所有modelhead返回的都是logits,骨架中的module和具体的head model都用head进行load和save .head的输入是\n就是input_ids这些特征,因此module和heads其实是绑在一起的\n\n在语义相似度匹配任务中,骨架的设定是BiEncoders,heads的设定主要是不同的loss model,包括(softmaxloss,\nContrastiveLoss等)\n\n每一个heads的forward返回的都是logits,比如在SoftmaxLossHead中的logits就是两个句子Pooling后的两个sentence_embeddings拼接后\n传入到最终的classifier得到的logits,由于是两个句子,因此为了简便,forward的参数不是input_ids1和input_ids2这样区分\n,而是[{'input_ids':tensor,'attention_mask':tensor,'token_type_ids':tensor},{'input_ids':tensor,'attention_mask':tensor,'token_type_ids':tensor}]\n'''\n\nclass SoftmaxLossHead(nn.Module):\n \"\"\"\n model应该根据对应的features给出句子的embedding,\n \"\"\"\n def __init__(self,\n model_path,\n num_labels: int,\n concatenation_sent_rep: bool = True,\n concatenation_sent_difference: bool = True,\n concatenation_sent_multiplication: bool = False,\n pooling_mode_cls_token: bool = False,\n pooling_mode_max_tokens: bool = False,\n pooling_mode_mean_tokens: bool = True,\n pooling_mode_mean_sqrt_len_tokens: bool = False,\n pooling_mode_mean_last_2_tokens: bool = False,\n pooling_mode_mean_first_last_tokens: bool = False,\n is_finetune=False,\n ):\n super(SoftmaxLossHead, self).__init__()\n self.num_labels = num_labels\n self.concatenation_sent_rep = concatenation_sent_rep\n self.concatenation_sent_difference = concatenation_sent_difference\n self.concatenation_sent_multiplication = concatenation_sent_multiplication\n self.pooling_mode_cls_token = pooling_mode_cls_token\n self.pooling_mode_max_tokens = pooling_mode_max_tokens\n self.pooling_mode_mean_tokens = pooling_mode_mean_tokens\n self.pooling_mode_mean_sqrt_len_tokens = pooling_mode_mean_sqrt_len_tokens\n self.pooling_mode_mean_last_2_tokens = pooling_mode_mean_last_2_tokens\n self.pooling_mode_mean_first_last_tokens = pooling_mode_mean_first_last_tokens\n\n self.num_vectors_concatenated = 0\n if concatenation_sent_rep:\n self.num_vectors_concatenated += 2\n if concatenation_sent_difference:\n self.num_vectors_concatenated += 1\n if concatenation_sent_multiplication:\n self.num_vectors_concatenated += 1\n\n self.config_keys = ['concatenation_sent_rep','concatenation_sent_difference','concatenation_sent_multiplication',\n 'sentence_embedding_dimension', 'pooling_mode_cls_token', 'pooling_mode_mean_tokens',\n 'pooling_mode_max_tokens', 'pooling_mode_mean_sqrt_len_tokens',\n 'pooling_mode_mean_last_2_tokens','pooling_mode_mean_first_last_tokens']\n self.pooling_config = {'pooling_mode_cls_token':self.pooling_mode_cls_token,\n 'pooling_mode_max_tokens':self.pooling_mode_max_tokens,\n 'pooling_mode_mean_tokens':self.pooling_mode_mean_tokens,\n 'pooling_mode_mean_sqrt_len_tokens':self.pooling_mode_mean_sqrt_len_tokens,\n 'pooling_mode_mean_last_2_tokens':self.pooling_mode_mean_last_2_tokens,\n 'pooling_mode_mean_first_last_tokens':self.pooling_mode_mean_first_last_tokens\n }\n\n if is_finetune==False:\n logger.info(\"Loading model from {}, which is from huggingface model\".format(model_path))\n self.load_huggingface_model(bert_model_path=model_path)\n else:\n self.load_finetuned_model(model_path=model_path)\n logger.info(\"Loading model from {}, which has been finetuned.\".format(model_path))\n \n #logger.info(\"Pooling config : {}\".format(self.pooling_config))\n logger.info(\"Softmax loss: #Vectors concatenated: {}\".format(self.num_vectors_concatenated))\n #logger.info(\"Pooling policy is \")\n logger.info(\"After pooling, each sentence embedding has dim: {}\".format(self.pooling_layer.pooling_output_dimension))\n def load_huggingface_model(self,bert_model_path):\n self.config=BertConfig.from_pretrained(bert_model_path)\n self.bert=BertModel.from_pretrained(bert_model_path)\n self.tokenizer=BertTokenizer.from_pretrained(bert_model_path)\n self.pooling_config.update({'word_embedding_dimension':self.bert.config.hidden_size})\n self.pooling_layer=Pooling(**self.pooling_config)\n self.head_layer=MLP(in_features=self.num_vectors_concatenated*self.pooling_layer.pooling_output_dimension,out_features=self.num_labels)\n\n def load_finetuned_model(self,model_path):\n bert_save_path=os.path.join(model_path,\"BERT\")#save的时候将BERT保存在model_path下的BERT文件夹中\n self.config=BertConfig.from_pretrained(bert_save_path)\n self.bert=BertModel.from_pretrained(bert_save_path)\n self.tokenizer=BertTokenizer.from_pretrained(bert_save_path)\n\n pooling_save_path=os.path.join(model_path,'Pooling')\n self.pooling_layer.load(pooling_save_path)\n\n head_save_path=os.path.join(model_path,'MLP')\n self.head_layer=MLP.load(input_path=head_save_path)\n\n def save(self,output_path):\n bert_save_path=os.path.join(output_path,\"BERT\")\n self.bert.save_pretrained(bert_save_path,save_config=False)#下面已经save,不用save两次,虽然没什么影响\n self.config.save_pretrained(bert_save_path)\n self.tokenizer.save_pretrained(bert_save_path)\n\n pooling_save_path=os.path.join(output_path,'Pooling')\n self.pooling_layer.save(pooling_save_path)#主要是保存config.json,Pooling没有参数\n\n head_save_path=os.path.join(output_path,'MLP')\n self.head_layer.save(head_save_path)\n\n def forward(self,sentence_features_of_1,sentence_features_of_2=None,output_all_encoded_layers=False,encode_pattern=False):\n '''\n each_features like : {'input_ids':tensor,'attention_mask':tensor,'token_type_ids':tensor},\n input_ids.size()==attention_mask.size()==token_type_ids.size()==position_ids.size()==(batch_size,seq_length)\n label_ids.size()==(batch_size,)\n '''\n #只有在encode模式下的single_batch才是有意义的,不然如果不是encode模式,只传入一个句子,有没有标签,无法返回任何值\n single_batch=False\n if sentence_features_of_2 is None:\n single_batch=True\n try:\n assert encode_pattern\n except:\n raise Exception(\"只传入了一个batch的句子,然而又不是encode模式,函数无法执行\")\n pair_sentence_features=[sentence_features_of_1]\n else:\n pair_sentence_features=[sentence_features_of_1,sentence_features_of_2]\n batch_size,seq_len_1=sentence_features_of_1['input_ids'].size()\n\n pair_sentence_embeddings=[]\n for sentence_features in pair_sentence_features:\n input_ids=sentence_features['input_ids']\n token_type_ids=sentence_features['token_type_ids']\n attention_mask=sentence_features['attention_mask']\n\n (sequence_outputs,pooler_output)=self.bert(input_ids=input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n #要注意到sequence_output[0]与pooled_output的区别在于pooler_output是经过一层tanh的\n if output_all_encoded_layers:\n all_layer_embeddings=sequence_outputs\n token_embeddings=sequence_outputs[-1]\n else:\n all_layer_embeddings=None\n token_embeddings=sequence_outputs\n\n cls_token_embeddings=pooler_output\n sentence_embedding=self.pooling_layer(token_embeddings=token_embeddings,\n cls_token_embeddings=cls_token_embeddings,\n attention_mask=attention_mask,\n all_layer_embeddings=all_layer_embeddings)\n pair_sentence_embeddings.append(sentence_embedding)\n assert sentence_embedding.size()==(batch_size,self.pooling_layer.pooling_output_dimension)\n\n if single_batch:\n rep_a=pair_sentence_embeddings[0]\n else:\n rep_a,rep_b=pair_sentence_embeddings\n\n if encode_pattern == True:\n if single_batch:\n return rep_a\n else:\n return rep_a,rep_b\n try:\n assert sentence_features_of_2 is not None and len(pair_sentence_embeddings)==2\n except:\n raise Exception(\"encode pattern是False,那么第二个batch不能为空\")\n\n assert rep_a.size()==(batch_size,self.pooling_layer.pooling_output_dimension)==rep_b.size()\n vectors_concat=[]\n if self.concatenation_sent_rep:\n vectors_concat.append(rep_a)\n vectors_concat.append(rep_b)\n if self.concatenation_sent_difference:\n vectors_concat.append(torch.abs(rep_a-rep_b))\n if self.concatenation_sent_multiplication:\n vectors_concat.append(rep_a*rep_b)\n \n concat_embeddings = torch.cat(vectors_concat, 1)\n assert concat_embeddings.size()==(batch_size,self.num_vectors_concatenated*self.pooling_layer.pooling_output_dimension)\n logits=self.head_layer(concat_embeddings)#(batch_size,num_labels)\n return logits\n\nclass STSHead(nn.Module):\n '''\n ClsHead不区分是单句子还是双句子,因为处理逻辑是一样的\n '''\n def __init__(self, model_path,\n num_labels,\n state_dict=None,\n is_finetune=False):\n super().__init__()\n self.num_labels=num_labels\n\n if is_finetune==False:\n logger.info(\"Loading model from {}, which is from huggingface model\".format(model_path))\n self.load_huggingface_model(bert_model_path=model_path)\n else:\n self.load_finetuned_model(model_path=model_path)\n logger.info(\"Loading model from {}, which has been finetuned.\".format(model_path))\n\n # bert_model_path=os.path.join(model_path,\"BERT\")#save的时候将BERT保存在model_path下的BERT文件夹中\n # self.config=BertConfig.from_pretrained(bert_model_path)\n # self.bert=BertModel.from_pretrained(bert_model_path)\n # self.tokenizer=BertTokenizer.from_pretrained(bert_model_path)\n\n def load_huggingface_model(self,bert_model_path):\n self.config=BertConfig.from_pretrained(bert_model_path)\n self.bert=BertModel.from_pretrained(bert_model_path)\n self.tokenizer=BertTokenizer.from_pretrained(bert_model_path)\n self.head_layer=MLP(in_features=self.config.hidden_size,out_features=self.num_labels)\n\n def load_finetuned_model(self,model_path):\n bert_save_path=os.path.join(model_path,\"BERT\")#save的时候将BERT保存在model_path下的BERT文件夹中\n self.config=BertConfig.from_pretrained(bert_save_path)\n self.bert=BertModel.from_pretrained(bert_save_path)\n self.tokenizer=BertTokenizer.from_pretrained(bert_save_path)\n\n head_save_path=os.path.join(model_path,'MLP')\n self.head_layer=MLP.load(input_path=head_save_path)\n\n def save(self,output_path):\n bert_save_path=os.path.join(output_path,\"BERT\")\n self.bert.save_pretrained(bert_save_path,save_config=False)#下面已经save,不用save两次,虽然没什么影响\n self.config.save_pretrained(bert_save_path)\n self.tokenizer.save_pretrained(bert_save_path)\n head_save_path=os.path.join(output_path,'MLP')\n self.head_layer.save(head_save_path)\n \n def forward(self,input_ids,attention_mask=None,token_type_ids=None,label_ids=None,output_all_encoded_layers=False):\n '''\n input_ids.size()==attention_mask.size()==token_type_ids.size()==position_ids.size()==(batch_size,seq_length)\n label_ids.size()==(batch_size,)\n '''\n (sequence_outputs,pooled_output)=self.bert(input_ids=input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n output_all_encoded_layers=output_all_encoded_layers)\n #要注意到sequence_output[0]与pooled_output的区别在于pooled_output是经过一层tanh的\n assert len(pooled_output.size())==2 and pooled_output.size(1)==self.config.hidden_size\n logits=self.head_layer(pooled_output)#(batch_size,num_labels)\n return logits\n # predictions=torch.argmax(logits,dim=1)\n # if label_ids is not None:\n # loss=nn.CrossEntropyLoss(reduction=\"mean\")(input=logits,target=label_ids)\n # accuracy=(predictions==label_ids).float().mean()\n # return loss,accuracy\n # else:\n # return logits,predictions","sub_path":"nlp_basictasks/heads/sts.py","file_name":"sts.py","file_ext":"py","file_size_in_byte":13977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505188693","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# 流程控制\n\n# 条件判断\nage = 18\n\nif age > 18:\n print(\"哈哈哈...\")\n print(\"成年了...\")\n\nelif age == 18:\n print(\"吼吼吼...\")\n print(\"刚好成年...\")\n\nelse:\n print(\"嘻嘻嘻...\")\n print(\"未成年...\")\n\n# if 条件还可以简写,比如:\nif 1:\n print(\"True\")\n\nxx = []\nif xx:\n print(\"非空...\")\nelse:\n print(\"空...\")\n\n# -------------\nbirth = input('birth: ')\n\nbirth = int(birth)\n\nif birth < 2000:\n print('00前')\nelse:\n print('00后')\n\nprint(\"\")\nprint(\"\")\n\n# 循环\n\n# list\nnames = ['CYX1', 'CYX2', 'CYX3']\nfor name in names:\n print(name)\n\n# tuple\nclassTypes = ('Z', 'A', 'Q')\nfor classType in classTypes:\n print(classType)\n\nages = (12,)\nfor age in ages:\n print(age)\n\n# 累加\nsum = 0\nfor x in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]:\n sum = sum + x\n\nprint(\"sum\", sum)\n\nnumbers = list(range(11))\nprint(numbers)\n\nnums = 0\nfor number in numbers:\n nums = nums + number\n\nprint(\"nums\", nums)\n\nprint(\"\")\nprint(\"\")\n\n# while 循环\nsum = 0\nn = 100\n\nwhile n > 0:\n sum = sum + n\n n = n - 1\n\nprint(sum)\n\nnames = ['cyx', 'cyx1', 'cyx2']\nfor name in names:\n print(name)\n break\n\nn = 0\nwhile n < 10:\n n = n + 1\n if n % 2 == 0: # 如果n是偶数,执行continue语句\n continue # continue语句直接继续下一轮循环,后续的print()语句不会执行\n\n print(n)\n","sub_path":"01-PythonBasicCode/01-basic/04-ProcessControl.py","file_name":"04-ProcessControl.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87049159","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Post',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('add_date', models.DateTimeField(auto_now_add=True)),\n ('comment', models.TextField(db_index=True, null=True, blank=True)),\n ('image', models.ImageField(null=True, upload_to=b'post_image', blank=True)),\n ('post_date', models.DateField()),\n ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","sub_path":"server/project/apps/posts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647628653","text":"import os\n \ndef analyse (lawn, n, m):\n \n for i in range(n):\n for j in range (m):\n if lookH (lawn, i,j, n, m) and lookV(lawn,i,j, n, m):\n return(\"NO\")\n return (\"YES\")\n\ndef lookH(lawn, i, j, n, m):\n startval = lawn[i][j]\n result = False\n for a in range (n):\n if lawn [a][j] > startval:\n result = True\n return(result)\n \ndef lookV (lawn, i, j, n, m):\n startval = lawn[i][j]\n result = False\n for a in range (m):\n if lawn [i][a] > startval:\n result = True\n return (result)\n\n \n \nfilein = open ('B-small-attempt1.in', 'r') \noutfile = open ('sample.out', 'wt')\ninstances = int(filein.readline())\n\nfor i in range (instances):\n l = filein.readline()\n n,m = l.split()\n n = int(n)\n m = int(m)\n lawn = []\n for rows in range (n):\n row = list(filein.readline())\n while row.count(' ')>0:\n row.remove(' ')\n while row.count('\\n')>0:\n row.remove('\\n')\n lawn.append(row)\n print (lawn)\n stringStart = str('Case #' + str(i+1) + ': ')\n answer = analyse(lawn, n, m)\n print(stringStart + answer)\n outfile.write(stringStart + answer)\n outfile.write('\\n')\nfilein.close()\noutfile.close()\n","sub_path":"solutions_python/Problem_117/1460.py","file_name":"1460.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347076727","text":"from typing import Optional\r\nimport esphome.codegen as cg\r\nimport esphome.config_validation as cv\r\nfrom esphome.components import time as time_, light\r\nfrom esphome.const import CONF_ID\r\n\r\nDEPENDENCIES = [\"network\"]\r\n\r\nCONF_TIME_ID = 'time_id'\r\nCONF_CLOCK_ADDRESSABLE_LIGHT_ID = 'clock_addressable_light_id'\r\n\r\nlight_ns = cg.esphome_ns.namespace(\"light\")\r\nLightState = light_ns.class_(\"LightState\", cg.Nameable, cg.Component)\r\nAddressableLightState = light_ns.class_(\"LightState\", LightState)\r\n\r\ntimemasheen_ns = cg.esphome_ns.namespace('timemasheen')\r\nTimeMasheen = timemasheen_ns.class_('TimeMasheen', cg.Component)\r\n\r\n\r\nCONFIG_SCHEMA = cv.Schema({\r\n #id\r\n cv.GenerateID(): cv.declare_id(TimeMasheen),\r\n #references\r\n cv.Required(CONF_TIME_ID): cv.use_id(time_.RealTimeClock),\r\n cv.Required(CONF_CLOCK_ADDRESSABLE_LIGHT_ID): cv.use_id(light.AddressableLightState),\r\n}).extend(cv.COMPONENT_SCHEMA)\r\n\r\ndef to_code(config):\r\n var = cg.new_Pvariable(config[CONF_ID])\r\n wrapped_time = yield cg.get_variable(config[CONF_TIME_ID])\r\n wrapped_clock_leds = yield cg.get_variable(config[CONF_CLOCK_ADDRESSABLE_LIGHT_ID])\r\n cg.add(var.set_time(wrapped_time))\r\n cg.add(var.set_clock_addressable_lights(wrapped_clock_leds))\r\n yield cg.register_component(var, config)\r\n","sub_path":"components/timemasheen/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"538163847","text":"#!/usr/bin/python\n# Copyright 2015 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Collectd plugin for getting statistics from Cinder\nif __name__ == '__main__':\n import collectd_fake as collectd\nelse:\n import collectd\n\nimport collectd_openstack as openstack\n\nPLUGIN_NAME = 'openstack_cinder'\nINTERVAL = openstack.INTERVAL\n\n\nclass CinderStatsPlugin(openstack.CollectdPlugin):\n \"\"\" Class to report the statistics on Cinder objects.\n\n number of volumes broken down by state\n total size of volumes usable and in error state\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(CinderStatsPlugin, self).__init__(*args, **kwargs)\n self.plugin = PLUGIN_NAME\n self.interval = INTERVAL\n self.pagination_limit = 500\n\n def itermetrics(self):\n\n volumes_details = self.get_objects('cinderv2', 'volumes',\n params={'all_tenants': 1},\n detail=True)\n\n def groupby(d):\n return d.get('status', 'unknown').lower()\n\n def count_size_bytes(d):\n return d.get('size', 0) * 10 ** 9\n\n status = self.count_objects_group_by(volumes_details,\n group_by_func=groupby)\n for s, nb in status.iteritems():\n yield {\n 'plugin_instance': 'volumes',\n 'values': nb,\n 'meta': {'state': s, 'discard_hostname': True}\n }\n\n sizes = self.count_objects_group_by(volumes_details,\n group_by_func=groupby,\n count_func=count_size_bytes)\n for s, size in sizes.iteritems():\n yield {\n 'plugin_instance': 'volumes_size',\n 'values': size,\n 'meta': {'state': s, 'discard_hostname': True}\n }\n\n snaps_details = self.get_objects('cinderv2', 'snapshots',\n params={'all_tenants': 1})\n status_snaps = self.count_objects_group_by(snaps_details,\n group_by_func=groupby)\n for s, nb in status_snaps.iteritems():\n yield {\n 'plugin_instance': 'snapshots',\n 'values': nb,\n 'meta': {'state': s, 'discard_hostname': True}\n }\n\n sizes = self.count_objects_group_by(snaps_details,\n group_by_func=groupby,\n count_func=count_size_bytes)\n for n, size in sizes.iteritems():\n yield {\n 'plugin_instance': 'snapshots_size',\n 'values': size,\n 'meta': {'state': s, 'discard_hostname': True}\n }\n\n\nplugin = CinderStatsPlugin(collectd, PLUGIN_NAME, disable_check_metric=True)\n\n\ndef config_callback(conf):\n plugin.config_callback(conf)\n\n\ndef notification_callback(notification):\n plugin.notification_callback(notification)\n\n\ndef read_callback():\n plugin.conditional_read_callback()\n\nif __name__ == '__main__':\n import time\n collectd.load_configuration(plugin)\n plugin.read_callback()\n collectd.info('Sleeping for {}s'.format(INTERVAL))\n time.sleep(INTERVAL)\n plugin.read_callback()\n plugin.shutdown_callback()\nelse:\n collectd.register_config(config_callback)\n collectd.register_notification(notification_callback)\n collectd.register_read(read_callback, INTERVAL)\n","sub_path":"collectd/files/plugin/openstack_cinder.py","file_name":"openstack_cinder.py","file_ext":"py","file_size_in_byte":4052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400966268","text":"__author__ = 'Vinayak'\r\n\r\ndef swap(str,index):\r\n ch_list=[]\r\n for ch in str[:index+1]:\r\n if ch == \"+\":\r\n ch_list.append(\"-\")\r\n else:\r\n ch_list.append(\"+\")\r\n for ch in str[index+1:]:\r\n ch_list.append(ch)\r\n str=\"\".join(ch_list)\r\n #print(str)\r\n return str\r\n\r\ndata=list()\r\noutput_data=''\r\n\r\nwith open(\"B-large.in\",'r') as f:\r\n for line in f.readlines():\r\n data.append(line)\r\n\r\n\r\ntest_case=int(data.pop(0))\r\ni=0\r\nwhile i= year >= 0:\n return f'Проверка прошла успешно'\n else:\n return f'Неправильный год'\n else:\n return f'Неправильный месяц'\n else:\n return f'Неправильный день'\n\n def __str__(self):\n return f'Дата: {Data.extract(self.day_month_year)}'\n\n\ntoday = Data('13 - 5 - 2020')\nprint(today)\nprint(Data.valid(\"11 - 11 - 2022\"))\nprint(today.valid(\"11 - 13 - 2011\"))\nprint(Data.extract('11 - 11 - 2011'))\nprint(today.extract('11 - 11 - 2020'))\nprint(Data.valid('1 - 11 - 2000'))\n","sub_path":"1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"187294204","text":"from django.conf.urls import url, include\nfrom django.contrib import admin\nfrom . import views\n\nurlpatterns = [\n url(r'^$', views.index),\n url(r'^login', views.login),\n url(r'^info', views.info),\n url(r'^admin/', admin.site.urls, name='admin'),\n url(r'^logout$', views.logout, name='logout'),\n]\n\nhandler404 = 'frontend.views.notfound'\n","sub_path":"project/frontend/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"176145908","text":"#!/usr/bin/env python3\n#\n# MAREK BEČVÁŘ, MFF UK 2020/21\n#\n\nimport pygame as PG\n\n# Basic UI class - defines drawing\nclass UI:\n def __init__(self):\n self.image = None # can be drawn as an image\n self.items = [] # collection text items\n self.itemPos = PG.Vector2() # text positions\n self.inputs = [] # collection of text inputs\n self.radioButtons = [] # collection of radio buttons\n\n self.FONT = PG.font.SysFont('Arial', 20, False, False) # default font\n\n def Draw(self, screen, pos):\n pos = PG.Vector2(pos)\n\n if (self.image != None):\n screen.blit(self.image, pos)\n\n for index, t in enumerate(self.items): # Printing text items\n textSurface = self.FONT.render(t, True, PG.Color(\"white\"))\n screen.blit(textSurface, (pos.x+self.itemPos.x, \n pos.y+self.itemPos.y + 25*index))\n\n # Draw all text inputs and buttons\n for inp in self.inputs: inp.Draw(screen)\n for rb in self.radioButtons: rb.Draw(screen)\n\nclass ObjPopup(UI):\n def __init__(self):\n super().__init__()\n self.obj = None # Save which object (planet) is being affected\n\n self.itemPos = PG.Vector2(25, 15) \n self.items = [\"Object Values:\",\n \"Mass:\",\n \"Velocity:\",\n \"X:\",\n \"Y:\",\n \"Central object:\", \n \"RGB Golor:\"] \n self.dragStart = None\n\n def Draw(self, screen, obj):\n # Drawing with permanently set pos\n height = screen.get_height()\n pos = PG.Vector2(15, height-240)\n\n self.obj = obj # Save the object\n\n if (self.inputs == []): # First draw -> load inputs (reloads on each object)\n self.SetupInput(pos + self.itemPos, self.obj)\n\n # Draw colored rectangle as bg\n rect = PG.Rect(pos.x, pos.y, 250,225)\n PG.draw.rect(screen, PG.Color(40,40,40), rect, border_radius=10, \n border_top_right_radius=50)\n\n # Original draw method (takes care of drawing all the inputs, texts)\n super().Draw(screen, pos) \n\n def SetupInput(self, pos, obj):\n x = pos.x\n y = pos.y\n \n # InputField class - (xPos, yPos, width, height, default, xPadding, pointer, minValue (-1=None), maxValue (-1=None), decimal places)\n self.inputs = [InputField(x+120, y+25,80,22, str(obj.mass),5,\"mass\", 1, -1,0),\n InputField(x+120, y+75,80,22,str(obj.simSteps[0].vel.x),5,\"xvel\",-1, -1,2),\n InputField(x+120,y+100,80,22,str(obj.simSteps[0].vel.y),5,\"yvel\",-1, -1,2),\n InputField(x, y+175,45,22, str(obj.color[0]),5, \"r\", 0,255,0),\n InputField(x+50, y+175,45,22, str(obj.color[1]),5, \"g\", 0,255,0),\n InputField(x+100,y+175,45,22, str(obj.color[2]),5, \"b\", 0,255,0)]\n\n # RadioButton class - (xPos, yPos, size, default)\n self.radioButtons = [RadioButton(x+178,y+125,22,self.obj.static)]\n\n def Event_handler(self, event, objectList):\n # Handler for all defined events\n\n # RadioButton event\n for rb in self.radioButtons:\n out = rb.Event_handler(event)\n if (out == 1):\n for obj in objectList:\n obj.SetStatic(False)\n\n self.obj.SetStatic(True)\n elif (out == 0):\n self.obj.SetStatic(False)\n\n # InputField events\n for inp in self.inputs:\n # Input background color changes - drag/hover\n if (self.dragStart == None and inp.field.collidepoint(PG.mouse.get_pos())) or (inp.drag):\n inp.bgcolor = inp.HOVER_COLOR\n else:\n inp.bgcolor = inp.BG_COLOR\n \n if (event.type == PG.MOUSEBUTTONUP): # Selecting input (on click up)\n # Deselect all + select field if under cursor\n self.dragStart = None\n inp.selected = False\n inp.drag = False\n\n if (inp.field.collidepoint(event.pos)):\n inp.selected = True\n inp.cursor = 0\n \n # Continuous mouse press = Drag behaviour\n if(PG.mouse.get_pressed()[0]): \n # Deselect all (but not self while dragging)\n for other in self.inputs:\n if not (other.drag):\n other.selected = False\n \n # Check if mouse is already in drag mode\n dragging = False\n for _inp in self.inputs:\n if (_inp.drag):\n dragging = True\n break\n\n mousePos = PG.Vector2(PG.mouse.get_pos())\n\n # If first frame of drag - get window drag start\n if (self.dragStart == None): self.dragStart = mousePos\n\n # Start drag on input field under held cursor\n if not (dragging):\n if (inp.field.collidepoint(self.dragStart)):\n inp.SetText(inp.text)\n inp.drag = True\n inp.selected = True\n inp.dragOrigPos = self.dragStart\n inp.dragOrigValue = float(inp.text)\n\n # Get/Update drag value - x distance from start drag pos\n elif (inp.drag):\n if (inp.pointer == \"mass\"):\n value = inp.GetDragValue(mousePos, 1/10)\n elif (inp.pointer == \"r\" or inp.pointer == \"g\" or inp.pointer == \"b\"):\n value = inp.GetDragValue(mousePos, 5)\n elif (inp.pointer == \"xvel\" or inp.pointer == \"yvel\"):\n value = inp.GetDragValue(mousePos, 50)\n\n # Input field text update\n inp.SetText(value)\n\n # Value update\n r = self.obj.color[0]\n g = self.obj.color[1]\n b = self.obj.color[2]\n if (inp.pointer == \"mass\"): self.obj.SetMass(float(inp.text))\n elif (inp.pointer == \"xvel\"): self.obj.SetStartVel((float(inp.text), None))\n elif (inp.pointer == \"yvel\"): self.obj.SetStartVel((None, float(inp.text)))\n elif (inp.pointer == \"r\"): r = int(inp.text)\n elif (inp.pointer == \"g\"): g = int(inp.text)\n elif (inp.pointer == \"b\"): b = int(inp.text)\n self.obj.SetColor(PG.Color(r,g,b))\n\n return\n\n # Keyboard events for selected input\n if (inp.selected): \n if event.type == PG.KEYDOWN:\n # RETURN mean that some events need main for careful\n # handling = \"stuff this class cannot work with\" \n # (e.g. arrows collide with object movement)\n\n # BACKSPACE, DELETE, ENTER (apply input and deselect)\n if (event.key == PG.K_BACKSPACE): \n text = inp.text[:len(inp.text)-inp.cursor-1] + inp.text[len(inp.text)-inp.cursor:]\n inp.text = text\n elif (event.key == PG.K_DELETE):\n text = inp.text[:len(inp.text)-inp.cursor] + inp.text[len(inp.text)-inp.cursor+1:]\n if (text != inp.text): inp.cursor -= 1\n inp.text = text\n return \"delete\" \n elif (event.key == PG.K_RETURN): \n inp.selected = False\n if (inp.text == \"\" or inp.text == \"-\"): inp.SetText(0)\n else: inp.SetText(inp.text)\n\n # ARROW KEYS - moving cursor around\n elif (event.key == PG.K_LEFT):\n inp.cursor += 1\n if (inp.cursor > len(inp.text)): inp.cursor = len(inp.text) \n return \"arrows\"\n elif (event.key == PG.K_RIGHT):\n inp.cursor -= 1\n if (inp.cursor < 0): inp.cursor = 0\n return \"arrows\"\n elif (event.key == PG.K_UP or event.key == PG.K_DOWN):\n return \"arrows\" \n\n # TAB - changing selected\n elif (event.key == PG.K_TAB):\n idx = self.inputs.index(inp) \n inp.selected = False\n inp.SetText(inp.text)\n\n # SHIFT+TAB/TAB\n if (PG.key.get_mods() & PG.KMOD_SHIFT): idx -= 1\n else: idx += 1\n\n # LOOP AROUND\n try:\n self.inputs[idx].selected = True\n except IndexError:\n idx = 0\n self.inputs[idx].selected = True\n\n self.inputs[idx].cursor = 0\n break\n \n # Pygame's handling of some keys is weird => fix so they can\n # be just plain strings\n key = PG.key.name(event.key)[0] \n if not key == \"[\": pass\n else: key = PG.key.name(event.key)[1]\n\n # ALLOWED KEYS\n if (str(key) in \"0123456789-.\"):\n # MINUS\n if (key == \"-\"):\n text = \"-\"+inp.text\n if (len(text) > 1 and text[0] == text[1] == \"-\"): text = text[2:]\n\n inp.SetText(text)\n\n # DOT\n elif (key == \".\"):\n if (len(inp.text) == 0): text = \"0.\"\n elif (len(inp.text) == 1 and inp.text[0] == '-'): text = \"-0.\"\n elif (\".\" in inp.text): text = inp.text\n else: text = inp.text + \".\"\n \n inp.text = text\n \n # NUMBERS\n else:\n if (len(inp.text) < 5): # Soft text len block (possible to go over with drag)\n inp.text = inp.text[:len(inp.text)-inp.cursor] + str(key) + inp.text[len(inp.text)-inp.cursor:]\n\n\n # Final text -> Variable change + value checks \n safeValue = None\n if (inp.text == \"\" or inp.text == \"-\"):\n if (inp.pointer == \"mass\"): safeValue = 1\n else: safeValue = 0\n\n if (inp.pointer == \"mass\"): \n if (safeValue == None): self.obj.SetMass(float(inp.text))\n else: self.obj.SetMass(safeValue)\n if (inp.pointer == \"xvel\"): \n if (safeValue == None): self.obj.SetStartVel((float(inp.text), None))\n else: self.obj.SetStartVel((float(safeValue), None))\n if (inp.pointer == \"yvel\"): \n if (safeValue == None): self.obj.SetStartVel((None, float(inp.text)))\n else: self.obj.SetStartVel((None, float(safeValue)))\n\n r = self.obj.color[0]\n g = self.obj.color[1]\n b = self.obj.color[2]\n if (inp.pointer == \"r\"): \n if (safeValue == None): inp.SetText(float(inp.text))\n else: inp.SetText(safeValue)\n r = int(inp.text)\n if (inp.pointer == \"g\"):\n if (safeValue == None): inp.SetText(float(inp.text))\n else: inp.SetText(safeValue)\n g = int(inp.text)\n if (inp.pointer == \"b\"):\n if (safeValue == None): inp.SetText(float(inp.text))\n else: inp.SetText(safeValue)\n b = int(inp.text)\n \n self.obj.SetColor(PG.Color(r,g,b))\n\nclass InputField: \n def __init__(self, x,y,width,height,text=\"\",xPadding=0,pointer=\"\",minValue=-1,maxValue=-1,decimal=0):\n self.field = PG.Rect(x,y,width,height) # text field rectangle\n self.text = text # text string\n self.cursor = 0 # cursor position\n self.FONT = PG.font.SysFont('Arial', 20, False, False) # default font\n self.xPadding = xPadding # text padding from the start of the line\n self.pointer = pointer # pointer to object variable\n\n # UI variables\n self.selected = False # if selected\n self.drag = False # if in drag mode\n self.dragOrigPos = None # starting drag position\n self.dragOrigValue = None # starting drag value\n\n # COLOR variables\n self.SELECTED_COLOR = PG.Color(\"white\")\n self.DESELECTED_COLOR = PG.Color(\"gray\")\n self.BG_COLOR = PG.Color(90,90,90)\n self.HOVER_COLOR = PG.Color(120,120,120)\n\n self.color = self.DESELECTED_COLOR # color = fg color = text color\n self.bgcolor = self.BG_COLOR # bgcolor = color of the text rectangle\n\n # INPUT variables\n self.minValue = minValue # min possible value (-1 = no limit)\n self.maxValue = maxValue # max \n self.decimal = str(decimal) # number of decimal places\n \n def Draw(self, screen):\n # Set fgcolor\n self.color = self.SELECTED_COLOR if self.selected else self.DESELECTED_COLOR\n\n # If cursor has to be drawn\n drawText = None\n if (self.selected):\n drawText = self.text[:len(self.text)-self.cursor] +\"|\"+self.text[len(self.text)-self.cursor:]\n else:\n drawText = self.text\n\n # Printing text to text surface\n # (object for Pygame text drawing)\n textSurface = self.FONT.render(drawText, True, self.color)\n \n # Drawing input field rectangle and text\n PG.draw.rect(screen, self.bgcolor, self.field)\n screen.blit(textSurface, (self.field.x+self.xPadding,self.field.y))\n\n def SetText(self, text):\n # Setting text with possible constrains on values\n\n if (text == ''): value = 0 # failsafe\n elif (text == '-'): \n self.text = \"-\" \n return\n else: \n value = float(text)\n \n # constrains\n if (self.minValue > value and self.minValue != -1):\n value = self.minValue\n if (self.maxValue < value and self.maxValue != -1):\n value = self.maxValue\n\n if (value == -0 or value == -0.0):\n value = 0\n \n\n self.text = (\"{:0.\"+self.decimal+\"f}\").format(value)\n\n def GetDragValue(self, mousePos, sensitivity):\n # Calculate value from drag change\n return (self.dragOrigValue + ((mousePos - self.dragOrigPos).x)/sensitivity)\n\nclass RadioButton:\n def __init__(self, x, y, size, default=False):\n self.rect = PG.Rect(x,y,size,size) # button rectangle\n self.FONT = PG.font.SysFont('Arial', 20, False, False) # default font\n\n # COLOR variables\n self.BG_COLOR = PG.Color(90,90,90) \n self.HOVER_COLOR = PG.Color(120,120,120)\n\n self.bgcolor = self.BG_COLOR\n\n # UI variables\n self.text = \" \" # text \" \"/\"X\" button\n self.toggle = default # if it should be checked from the start\n\n def Draw(self, screen):\n self.text = \"X\" if self.toggle else \" \" \n\n # Printing text to text surface\n # (object for Pygame text drawing)\n textSurface = self.FONT.render(self.text, True, PG.Color(\"white\"))\n \n # Drawing input field rectangle and text\n PG.draw.rect(screen, self.bgcolor, self.rect)\n screen.blit(textSurface, self.rect.topleft+PG.Vector2(4,2))\n\n def Event_handler(self, event): \n # Own event handler - compact enough\n\n # Colors\n if (self.rect.collidepoint(PG.mouse.get_pos())): self.bgcolor = self.HOVER_COLOR\n else: self.bgcolor = self.BG_COLOR\n \n # Toggle\n if (event.type == PG.MOUSEBUTTONDOWN and self.rect.collidepoint(event.pos)):\n self.toggle = not self.toggle\n if (self.toggle): return 1\n else: return 0\n\nclass TopMenu(UI):\n def __init__(self):\n super().__init__()\n # UI variables\n self.itemPos = PG.Vector2(17,5)\n self.items = [] # For TopMenu only 1 item = tab text\n\n self.rect = None # Tab rectangle (set in draw)\n self.rectWidth = 75\n self.rectHeight = 25\n self.borderRect = PG.Rect(0,0,400,200) # Set boundaries for tabs - if cursor leaves, tab closes\n\n # list of MenuItem objects\n self.inItems = [] # Need new variable for items under the tab\n\n self.SELECTED = False # If this tab is currently in use\n\n # COLOR variables\n self.NORMAL_COLOR = PG.Color(40,40,40)\n self.HOVER_COLOR = PG.Color(50,50,50)\n\n self.color = self.NORMAL_COLOR\n\n def Draw(self, screen, num, topbar):\n # num - number, needed for padding from other tabs\n offset = 15\n for i in range(num): offset += topbar[i].rectWidth + 15\n \n pos = (offset, 0)\n\n # Drawing own rect\n self.rect = PG.Rect(offset, 0, self.rectWidth, self.rectHeight)\n PG.draw.rect(screen, self.color, self.rect, border_bottom_right_radius=5, border_bottom_left_radius=5)\n\n # Tab text drawn in original UI draw\n super().Draw(screen, pos)\n\n # If opened, draw all the other items as well\n if (self.SELECTED):\n for index, item in enumerate(self.inItems): \n item.Draw(screen, pos, index)\n\n def Event_handler(self, event, others):\n # Own event handler for tabs\n\n # Colors (hover)\n if (self.rect.collidepoint(PG.mouse.get_pos())): \n self.color = self.HOVER_COLOR\n self.rectHeight = 30\n else: \n self.color = self.NORMAL_COLOR\n self.rectHeight = 25\n\n # Close tabs if mouse leaves tab space \n if not (self.borderRect.collidepoint(PG.mouse.get_pos())):\n self.SELECTED = False\n return\n \n # Pass events to inner items if opened\n if (self.SELECTED):\n for item in self.inItems:\n out = item.Event_hanlder(event)\n if not (out == None): return out\n \n else: \n # If not opened - check for tab clicks\n if (event.type == PG.MOUSEBUTTONDOWN):\n if (self.rect.collidepoint(event.pos)):\n self.SELECTED = True\n\n for other in others:\n if (other == self): continue\n other.SELECTED = False\n\n if (self.items[0] == \"About\"): return \"about\"\n\nclass MenuItem(UI):\n def __init__(self, text, pointer=None, shortcut=\"\", scPos=PG.Vector2(), width=120):\n super().__init__()\n # Class for entries inside topbar tabs\n\n # UI variables\n self.itemPos = PG.Vector2(15,5)\n self.items = [text] # items = main text \n\n self.rect = None # Entry rectangle\n self.shortcut = shortcut # Shortcut text\n self.scPos = PG.Vector2(scPos) # Shortcut line offset\n self.width = width # Entry width\n self.pointer = pointer # Object/function pointer (for event handling)\n\n # COLOR variables\n self.NORMAL_COLOR = PG.Color(40,40,40) \n self.HOVER_COLOR = PG.Color(50,50,50)\n\n self.color = self.NORMAL_COLOR\n\n # Default font\n self.FONT = PG.font.SysFont('Arial', 20, False, False)\n\n def Draw(self, screen, pos, index):\n pos = PG.Vector2(pos) # for readability \n pos = PG.Vector2(pos.x, 30+pos.y + 30*index)\n\n # Rectangle drawing\n self.rect = PG.Rect(pos.x, pos.y, self.width, 28)\n PG.draw.rect(screen, self.color, self.rect)\n\n # Possible shortcut drawing\n if not (self.shortcut == \"\"):\n textSurface = self.FONT.render(self.shortcut, True, PG.Color(\"gray\"))\n screen.blit(textSurface, (pos.x+self.scPos.x, \n pos.y+self.scPos.y))\n\n super().Draw(screen, pos)\n\n def Event_hanlder(self, event):\n # Own event handler\n # Colors\n if not (self.pointer == None):\n if (self.rect.collidepoint(PG.mouse.get_pos())): self.color = self.HOVER_COLOR\n else: self.color = self.NORMAL_COLOR\n \n # Mouse clicks -> sends back pointer to function (has to be done in main)\n if (event.type == PG.MOUSEBUTTONDOWN):\n if (self.rect.collidepoint(event.pos)):\n return self.pointer\n","sub_path":"old_PlanetSystem_Final/modules/UIObject.py","file_name":"UIObject.py","file_ext":"py","file_size_in_byte":21885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40954765","text":"import datetime as datetime\nimport numpy as np\nimport cv2\nimport time\nimport os\nimport datetime\nimport pickle\nimport itertools\nfrom imutils.video import VideoStream\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nfrom CamshiftTracker import *\n\n\n# construct the argument parse and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--prototxt\", required=True,\n\thelp=\"path to Caffe 'deploy' prototxt file\")\nap.add_argument(\"-m\", \"--model\", required=True,\n\thelp=\"path to Caffe pre-trained model\")\nap.add_argument(\"-c\", \"--confidence\", type=float, default=0.5,\n\thelp=\"minimum probability to filter weak detections\")\nargs = vars(ap.parse_args())\n\n\npredict = []\nmeasure = []\nlast_measure = current_measure = np.array((2,1),np.float32)\nlast_predict = current_predict = np.zeros((2,1),np.float32)\nframe = np.ndarray\n# measure = np.array((2,1),np.float32)\n\n\n\ndef kalmanSetup():\n kalman = cv2.KalmanFilter(4,2)\n kalman.measurementMatrix = np.array([[1,0,0,0],[0,1,0,0]],np.float32)\n kalman.transitionMatrix = np.array([[1,0,1,0],[0,1,0,1],[0,0,1,0],[0,0,0,1]],np.float32)\n kalman.processNoiseCov = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]],np.float32) * 0.03\n return kalman\n\ndef applyCamshiftFilter(x,y,w,h,termination):\n roi = frame[y:y+h,x:x+w]\n roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n\n roiHist = cv2.calcHist([roi],[0],None,[16],[0,180])\n roiHist = cv2.normalize(roiHist,roiHist, 0 ,255, cv2.NORM_MINMAX)\n roiBox = (x,y,x+w,y+h)\n hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n backProj = cv2.calcBackProject([hsv], [0], roiHist, [0,180], 1)\n\n (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)\n pts = np.int0(cv2.cv2.boxPoints(r))\n cv2.polylines(frame, [pts], True, (0,255,0), 2)\n\n\n\n# applies the Kalman filter\n\ndef applyKalmanFilter(x, y):\n global frame,last_measure,current_measure,measure,current_predict,last_predict\n last_predict=current_predict\n last_measure=current_measure\n predict.append([int(last_predict[0]),int(last_predict[1])])\n measure.append([int(last_predict[0]),int(last_predict[1])])\n current_measure=np.array([[np.float32(x)],[np.float32(y)]])\n kalman.correct(current_measure)\n current_predict = kalman.predict()\n lmx,lmy=last_measure[0],last_measure[1]\n cmx,cmy=current_measure[0],current_measure[1]\n cpx,cpy=current_predict[0],current_predict[1]\n lpx,lpy=last_predict[0],last_predict[1]\n\n cv2.line(frame, (lmx,lmy), (cmx,cmy), (0,100,0), 2)\n cv2.line(frame, (lpx,lpy), (cpx,cpy), (0,0,200), 2)\n\n\n\ntime_count = 0\n# this is the cascade we just made. Call what you want\ndir_path = os.path.dirname(os.path.realpath(__file__))\nnew_face_cascade = cv2.CascadeClassifier(dir_path+'/smallData/cascade.xml')\n\nkalman = kalmanSetup()\n\n\n# load our serialized model from disk\nprint(\"[INFO] loading model...\")\nnet = cv2.dnn.readNetFromCaffe(args[\"prototxt\"], args[\"model\"])\n\n# initialize the video stream and allow the cammera sensor to warmup\nprint(\"[INFO] starting video stream...\")\ncap = VideoStream(src=0).start()\ntime.sleep(2.0)\n\ncamList = []\n\ntermination = (cv2.TERM_CRITERIA_EPS\n | cv2.TERM_CRITERIA_COUNT, 100, 1)\nstart = time.time()\nroiBox = None\n# forcc = cv2.VideoWriter_fourcc(*'MJPG')\n# out = cv2.VideoWriter('output.mp4', -1, 20.0, (640, 480))\nfirstFrame = True\nfaceFound = False\n\nwhile True:\n img = cap.read()\n # frame = cv2.resize(img, None, fx=scaling_factor, fy=scaling_factor, interpolation=cv2.INTER_AREA)\n frame = imutils.resize(img, width=600)\n\n # frame = img\n\n # grab the frame dimensions and convert it to a blob\n (h, w) = frame.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(frame, (300, 300)), 1.0,\n (300, 300), (104.0, 177.0, 123.0))\n\n # pass the blob through the network and obtain the detections and\n # predictions\n net.setInput(blob)\n detections = net.forward()\n\n # loop over the detections\n for i in range(0, detections.shape[2]):\n # extract the confidence (i.e., probability) associated with the\n # prediction\n confidence = detections[0, 0, i, 2]\n\n # filter out weak detections by ensuring the `confidence` is\n # greater than the minimum confidence\n if confidence < args[\"confidence\"]:\n continue\n\n # compute the (x, y)-coordinates of the bounding box for the\n # object\n\n box = detections[0, 0, i, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n newEndX = endX\n\n # creat CmashiftTracker class for each positive detection if on first frame\n if firstFrame:\n\n # get w and h\n # get remainder = int(w * .35)\n h = endY - startY\n w = endX - startX\n remainder = int(w * .35)\n remainder_y = int(h * .35)\n new_w = int(w * .65)\n new_h = int(h * .65)\n new_x = int(startX + (remainder / 2))\n new_y = int(startY + (remainder_y / 2))\n top_left = (new_x,new_y)\n bottom_right = (new_x+new_w, new_y+new_h)\n\n tracker = CamshiftTracker()\n tracker.setCurrentRect(frame.copy(), (startX, startY, bottom_right[0], bottom_right[1]))\n camList.append(tracker)\n firstFrame = False\n\n\n # draw the bounding box of the face along with the associated\n # probability\n text = \"{:.2f}%\".format(confidence * 100)\n y = startY - 10 if startY - 10 > 10 else startY + 10\n cv2.rectangle(frame, (startX, startY), (endX, endY),\n (0, 0, 255), 2)\n cv2.putText(frame, text, (startX, y),\n cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\n\n if faceFound:\n for cam in camList:\n cam.setMainImage(frame)\n (r, roiBox) = cam.trackCurrentRect()\n pts = np.int0(cv2.cv2.boxPoints(r))\n applyKalmanFilter(roiBox[0], roiBox[1])\n\n # text = \"{:.2f}%\".format(confidence * 100)\n # cv2.putText(frame, text, (roiBox[0], roiBox[1]+10),\n # cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)\n\n cv2.polylines(frame, [pts], True, (0, 255, 0), 2)\n\n if not firstFrame:\n faceFound = True\n\n\n # if not ret:\n # break\n #\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n #\n # # image, reject levels level weights.\n # # The scale factor and minNeighbors need to be adjusted according to lighting and background.\n # face_rects = new_face_cascade.detectMultiScale(gray, 1.9, 9)\n # if roiBox is not None:\n # hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n # backProj = cv2.calcBackProject([hsv],[0], roiHist, [0,180], 1)\n #\n # (r, roiBox) = cv2.CamShift(backProj, roiBox, termination)\n # pts = np.int0(cv2.cv2.boxPoints(r))\n # applyKalmanFilter(roiBox[0], roiBox[1])\n # cv2.polylines(frame, [pts], True, (0, 255, 0), 2)\n # else:\n # orig = frame.copy()\n # roi = orig[startY:endY, startX:endX]\n # roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)\n # roiHist = cv2.calcHist([roi], [0], None, [16], [0, 180])\n # roiHist = cv2.normalize(roiHist, roiHist, 0, 255, cv2.NORM_MINMAX)\n # roiBox = (startX, startY, endX, endY)\n\n\n cv2.imshow(\"Frame\", frame)\n c = cv2.waitKey(1)\n end = time.time()\n\n\n k = cv2.waitKey(30) & 0xff\n if k == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()","sub_path":"FaceVideo.py","file_name":"FaceVideo.py","file_ext":"py","file_size_in_byte":7510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"483650161","text":"from jig.commands.base import BaseCommand\nfrom jig.runner import Runner\n\ntry:\n import argparse\nexcept ImportError: # pragma: no cover\n from backports import argparse\n\n_parser = argparse.ArgumentParser(\n description='Run plugins on a revision range',\n usage='jig report [-h] [-p PLUGIN] [--rev-range REVISION_RANGE] [PATH]')\n\n_parser.add_argument(\n '--plugin', '-p',\n help='Only run this specific named plugin')\n_parser.add_argument(\n '--rev-range', dest='rev_range', default='HEAD^1..HEAD',\n help='Git revision range to run the plugins against')\n_parser.add_argument(\n 'path', nargs='?', default='.',\n help='Path to the Git repository')\n\n\nclass Command(BaseCommand):\n parser = _parser\n\n def process(self, argv):\n path = argv.path\n rev_range = argv.rev_range\n\n runner = Runner(view=self.view)\n\n runner.main(\n path,\n plugin=argv.plugin,\n rev_range=rev_range,\n interactive=False\n )\n","sub_path":"src/jig/commands/report.py","file_name":"report.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"398872641","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n#veri on isleme\n\nveri = pd.read_csv('veriler.csv')\n\nfrom sklearn.preprocessing import LabelEncoder , OneHotEncoder\nle = LabelEncoder()\nohe = OneHotEncoder(categorical_features = \"all\")\n\nulke = veri.iloc[:,0:1].values\nulke[:,0] = le.fit_transform(ulke[:,0])\nulke = ohe.fit_transform(ulke).toarray()\n\nulkeler = pd.DataFrame(data = ulke, index = range(22), columns=[\"fr\",\"tr\",\"us\"])\nx = veri.iloc[:,1:4]\nX = veri.iloc[:,1:4].values\ny = veri.iloc[:,4:]\nY = veri.iloc[:,4:].values\ns = pd.concat([ulkeler,x],axis=1)\nS = s.values\n# boy = veri.iloc[5:,1:2] 5.satırdan 5:,> 5.satırdan sonra - 1:2 veri tablosunun içinden 1.sütun için\nprint(s)\n#veri kümesinin eğitim ve test olarak bölünmesi\n #x_train > ana verinin büyük kısmı(öğrenme yapmasını istediğimiz kısım)\n #x_test > kalan gerçek kısım(karşılaştırma için)\n #y_train > büyük kısım tahmin\n #y_test > kalan kısım tahmin\n #random state 0 olmasının sebebi verilerin rastgele öğrenimi için\n #kodun açıklaması : xtraini al, ytraini tahmin etmeyi öğren, xtesti al ytesti tahmin et\n \nfrom sklearn.model_selection import train_test_split\n\nx_train, x_test,y_train,y_test = train_test_split(S,Y,test_size=0.33, random_state=0)\n\n\"\"\"\n#sıralama\n\nx_train=x_train.sort_index()\nx_test=x_test.sort_index()\ny_train=y_train.sort_index()\ny_test=y_test.sort_index()\n\n\"\"\"\n#öznitelik ölçekleme\n #standardscaler > veriyi standartlaştır,standarda yakınlık oranını gösterir\n \nfrom sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nX_train = sc.fit_transform(x_train)\nX_test = sc.transform(x_test)\n\n\"\"\"\n\n#Eğer veri object kaldıysa\n\n for column in y_train.columns:\n if y_train[column].dtype==type(object):\n y_train[column]=le.fit_transform(y_train[column])\n\nfor column in y_test.columns:\n if y_test[column].dtype==type(object):\n y_test[column]=le.fit_transform(y_test[column])\n \n\"\"\"\n\nfrom sklearn.linear_model import LogisticRegression\n\nlogr=LogisticRegression(random_state=0)\nlogr.fit(X_train,y_train)\n\ny_pred = logr.predict(X_test)\nprint(\"tahminler\")\nprint(\"----------------\")\nprint(y_pred)\n\n#confusion matrix\n #tahmin-gerçekteki ilişkisini kurar örn: tahmin k gerçek e, tahmin k gerçek k gibi.\n \nfrom sklearn.metrics import confusion_matrix\n\ncm=confusion_matrix(y_test,y_pred)\nprint(cm)\n\n#NaiveBayes\n #Gaussian , Bernoulli , Multinomial , Complement olarak çeşitleri vardır.\n #kullanılan NB yöntemleri başarıyı değiştirir.\n \nfrom sklearn.naive_bayes import GaussianNB\n\ngnb = GaussianNB()\ngnb.fit(X_train,y_train)\ny_pred = gnb.predict(X_test)\n\ncm=confusion_matrix(y_test,y_pred)\nprint(\"GNB\")\nprint(cm)\n\nfrom sklearn.naive_bayes import BernoulliNB\n\nbnb = BernoulliNB()\nbnb.fit(X_train,y_train)\ny_pred = bnb.predict(X_test)\n\ncm = confusion_matrix(y_test,y_pred)\nprint(\"BNB\")\nprint(cm)\n\n\nfrom sklearn.naive_bayes import ComplementNB\n\ncnb = ComplementNB()\ncnb.fit(X_train,y_train)\ny_pred = cnb.predict(X_test)\n\ncm = confusion_matrix(y_test,y_pred)\nprint(\"CNB\")\nprint(cm)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"5. Machine Learning/1. Dr. Sadi Evren Seker/13. Naive Bayes/naive_bayes.py","file_name":"naive_bayes.py","file_ext":"py","file_size_in_byte":3136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310686462","text":"class Estudiante():\n nombre = None\n indagadores = None\n pensador = None\n comunicantes = None\n matricula = None\n cuatrimestre = None\n uniforme = None\n mentalidad = None\n reflexivo = None\n integro = None\n \n \n def __init__(self):\n print (\"Estudiante\")\n \n def Tareas(self):\n print(\"Realiza\")\n \n def Estudiar(self):\n print(\"Libros\")\n \n def Aprender(self):\n print(\"Materias\")\n \n def Respetar(self):\n print(\"A sus mayores\")\n \n def Participar(self):\n print(\"En clase\")\n\n\n\nmishel= Estudiante()\nmishel.nombre=(\"Alondra Mishel Otero Mendoza\")\nmishel.indagadores=(\"Nivel Medio\")\nmishel.pensador=(\"Nivel Alto\")\nmishel.comunicantes=(\"Influencer\")\nmishel.matricula=(\"1720110358\")\nmishel.cuatrimestre=(\"2 Cuatrimestre\")\nmishel.uniforme=(\"Completo\")\nmishel.mentalidad=(\"Madura\")\nmishel.reflexivo=(\"Si\")\nmishel.integro=(\"Si\")\n\n\n\nprint(mishel.nombre)\nprint(mishel.indagadores)\nprint(mishel.pensador)\nprint(mishel.comunicantes)\nprint(mishel.matricula)\nprint(mishel.cuatrimestre)\nprint(mishel.uniforme)\nprint(mishel.mentalidad)\nprint(mishel.reflexivo)\nprint(mishel.integro)\n","sub_path":"Semana2/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621297912","text":"# Fibonacci Lucas sequence\r\n# 斐波那契—卢卡斯数列\r\n\r\n# Author: zhangzhp\r\n# Date: 2017-8-15 11:42:24\r\n\r\n# ---------- 函数定义 ---------- #\r\n\r\n'''\r\n用户输入\r\n必需输入正整数\r\n'''\r\ndef my_input(str):\r\n if str != '':\r\n try:\r\n # 自定义的条件\r\n int(str)\r\n return True\r\n except ValueError:\r\n pass\r\n print('输入内容有误!\\n')\r\n return False\r\n else:\r\n print(\"输入内容不能为空!\\n\")\r\n return False\r\n\r\n'''\r\n询问是否需要重新输入或继续输入询问语句自定义\r\n'''\r\ndef yorn(str = \"是否重新输入(y or n)\"):\r\n try:\r\n YorN = input(str).lower()\r\n if YorN == 'y' or YorN == 'yes':\r\n return True\r\n elif YorN == 'n' or YorN == 'no':\r\n print(\"\\n查询结束!\\n\")\r\n return False\r\n else:\r\n print(\"\\n输入有误!视为No,查询结束\\n\")\r\n return False\r\n except ValueError:\r\n print(\"\\n输入有误!视为No,查询结束\\n\")\r\n\r\n'''\r\n要查询的数列位数\r\n'''\r\ndef listLengthFunction():\r\n listLengthStr = input(\"请输入一个整数:\\n\")\r\n if my_input(listLengthStr):\r\n listLength = int(listLengthStr)\r\n if listLength > 0:\r\n return listLength\r\n else:\r\n print(\"抱歉,输入内容必须为正整数!\")\r\n # 询问是否重新输入\r\n if yorn():\r\n return listLengthFunction()\r\n else:\r\n return False\r\n else:\r\n if yorn():\r\n return listLengthFunction()\r\n else:\r\n return False\r\n\r\n'''\r\nFibonacci sequence\r\n斐波那契数列\r\nLucas sequence\r\n卢卡斯数列\r\n'''\r\ndef sequence(seq,listLength):\r\n if seq=='Lucas':\r\n sequenceList = [1,3]\r\n str_title = '卢卡斯'\r\n elif seq =='Fibonacci':\r\n sequenceList = [1,1]\r\n str_title = '斐波那契'\r\n else:\r\n return print('抱歉,找不到要查询的数列!')\r\n\r\n print(str_title+\"数列:\\n\")\r\n listLength = int(listLength)\r\n if listLength <= 2:\r\n print(sequenceList[0:listLength])\r\n print()\r\n else:\r\n for i in range(3,listLength+1):\r\n sequenceList.append(sequenceList[i-2] + sequenceList[i-3])\r\n print(sequenceList)\r\n print()\r\n\r\n'''\r\n查询方法\r\nFibonacci or Lucas\r\n'''\r\ndef check():\r\n print()\r\n str = input(\"请输入需要查询的数列(Lucas or Fibonacci)\")\r\n if (str.lower() == 'l'):\r\n print('# ---------- 卢卡斯数列 ---------- #')\r\n checkListLength = listLengthFunction()\r\n if checkListLength == False:return\r\n sequence('Lucas',checkListLength)\r\n print('# ---------- 卢���斯数列 ---------- #')\r\n elif str.lower() == 'f':\r\n print('# ---------- 斐波那契数列 ---------- #')\r\n checkListLength = listLengthFunction()\r\n if checkListLength == False:return\r\n sequence('Fibonacci',checkListLength)\r\n print('# ---------- 斐波那契数列 ---------- #')\r\n else:\r\n print('# ---------- 斐波那契-卢卡斯数列 ---------- #')\r\n checkListLength = listLengthFunction()\r\n if checkListLength == False:return\r\n sequence('Fibonacci',checkListLength)\r\n sequence('Lucas',checkListLength)\r\n print('# ---------- 斐波那契-卢卡斯数列 ---------- #')\r\n\r\n\r\n# ---------- 函数结束 ---------- #\r\n\r\n\r\n# ---------- 开始执行 ---------- #\r\ncheck()\r\nprint()\r\ninput(\"按回车键继续!\")\r\n","sub_path":"var/notes/python/simple-example/Fibonacci Lucas sequence.py","file_name":"Fibonacci Lucas sequence.py","file_ext":"py","file_size_in_byte":3279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238943796","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jun 22 22:56:19 2019\n\n@author: wangchong\n\"\"\"\n# =============================================================================\n# terrible idea\n# =============================================================================\n\nimport numpy as np;\nfrom scipy.special import expit;\nfrom matplotlib import pyplot as plt;\n\nclass InfoMax:\n def __init__(self, dim, GAMMA, BETA, G, bias, sparsity):\n \n self.dim = dim;\n \n # binary state\n self.h = np.zeros((dim, 1));\n \n # membrane voltage\n self.v = np.random.randn(dim, 1);\n \n # membrane time constant\n self.tau_v = 0.1;\n \n # weights\n self.w = G*np.random.randn(dim, dim)/np.sqrt(dim*sparsity);\n mask = np.random.binomial(1, sparsity, size=(dim, dim))\n self.w *= mask;\n \n # slope/temperature parameter\n self.beta = BETA;\n \n # learning rate\n self.gamma = GAMMA;\n \n # bias current\n self.b = bias;\n \n # eligibility trace\n self.eSpike = np.zeros((1, dim));\n self.prevFR = np.zeros((dim, 1));\n \n def trainStep(self, ext_in):\n \n # integrate membrane voltage\n# h_aug = np.concatenate(([[1]], self.h));\n h_aug = self.h;\n \n dvt = -self.tau_v + np.matmul(self.w, h_aug);\n self.v += self.tau_v*dvt;\n \n # noise and spike\n noise = np.random.logistic(0, 1);\n prob_of_spike = expit(self.beta*(self.v - self.b + ext_in));\n new_state = np.array(((self.v - self.b + ext_in + noise)>0), dtype=float);\n \n # update eligibility trace\n self.eSpike = (1-self.tau_v)*self.eSpike + self.tau_v*h_aug.T;\n self.meanFR = (1-self.tau_r)*self.meanFR + self.tau_r*prob_of_spike;\n \n hebbian = np.outer(self.tau_v*dvt*prob_of_spike*(1-prob_of_spike), self.eSpike);\n anti_hebbian = np.outer(prob_of_spike - self.prevFR, prev_eSpike);\n \n self.prevFR = prob_of_spike;\n \n # calculate final gradient\n dw = hebbian - anti_hebbian;\n \n self.w += self.gamma*(dw);\n self.h = new_state;\n \n return self.h.squeeze(), np.linalg.norm(dw)/self.dim**2;\n \n","sub_path":"model/InfomaxTC.py","file_name":"InfomaxTC.py","file_ext":"py","file_size_in_byte":2348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458451869","text":"#coding=utf-8\r\n'''\r\n@文件名:MtimeCollecter.py\r\n@创建时间:2018-03-09 15:28:38\r\n@作者:疾风\r\n@功能:从时光网爬取正在热映电影的信息\r\n'''\r\nimport re\r\nimport requests\r\n\r\ndef getMtimeScore(movies):\r\n '''\r\n @功能:获取时光网评分等\r\n @入参:\r\n @param movies: list 存储数据对象\r\n @出参:\r\n @param movies: list 存储数据对象\r\n '''\r\n url = \"http://service.channel.mtime.com/Search.api\"\r\n #将豆瓣评分导入集合\r\n for movie in movies:\r\n print(\"\\t处理中 正在抓取时光网内容:\" + movie[\"电影名\"])\r\n querystring = {\r\n \"Ajax_CallBack\":\"true\",\r\n \"Ajax_CallBackType\":\"\",\r\n \"Ajax_CallBackMethod\":\"GetSearchResult\",\r\n \"t\":\"0\",\r\n \"Ajax_CallBackArgument0\":movie[\"电影名\"],\r\n \"Ajax_CallBackArgument4\":\"1\"\r\n }\r\n mtime = requests.request(\"GET\", url, params=querystring)\r\n #解析字典对象\r\n movie = getMtimeData(mtime, movie)\r\n if(movie.has_key(\"时光网评分\") == False or movie[\"时光网评分\"] == None):\r\n movie[\"时光网评分\"] = \"暂无评\"\r\n if(movie.has_key(\"电影时长\") == False or movie[\"电影时��\"] == None):\r\n movie[\"电影时长\"] = \"未知\"\r\n if(movie.has_key(\"电影类型\") == False or movie[\"电影类型\"] == None):\r\n movie[\"电影类型\"] = \"未知\"\r\n \r\n return movies\r\n\r\ndef getMtimeData(mtime, movie):\r\n '''\r\n @功能:解析字符串\r\n @入参:\r\n @param mtime: response对象\r\n @param movie: dict 存储字典\r\n @出参:\r\n @param movie: dict 存储字典\r\n '''\r\n for data in mtime.text.split('},{\"movieId\"'):\r\n #判断数据是否正确\r\n title = re.search('\"movieTitle\":\"(.*?)\\s+', data).group(1)\r\n rating = re.search('\"movieRating\":\"(.*?)\"', data)\r\n length = re.search('\"movieLength\":(.*?),', data)\r\n type = re.search('\"genreTypes\":\"(.*?)\"', data)\r\n if(movie[\"电影名\"] == title):\r\n if(rating != None):\r\n movie[\"时光网评分\"] = rating.group(1)\r\n if(length != None):\r\n movie[\"电影时长\"] = length.group(1)\r\n if(type != None):\r\n movie[\"电影类型\"] = type.group(1)\r\n \r\n return movie\r\n","sub_path":"src/MovieShow/MovieCrawler/telecommunication/MtimeCollecter.py","file_name":"MtimeCollecter.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"105465119","text":"from django import forms\n\nfrom tower import ugettext_lazy as _lazy\n\nfrom users.models import UserProfile\n\n\nclass RegistrationForm(forms.ModelForm):\n first_name = forms.CharField(label=_lazy(u'First Name'), max_length=30,\n required=False)\n last_name = forms.CharField(label=_lazy(u'Last Name'), max_length=30,\n required=True)\n\n optin = forms.BooleanField(\n label=_lazy(u\"I'm okay with you handling this info as you \"\n u'explain in your privacy policy.'),\n widget=forms.CheckboxInput(attrs={'class': 'checkbox'}),\n required=True)\n\n class Meta:\n model = UserProfile\n fields = ('bio',)\n widgets = {\n 'bio': forms.Textarea(),\n }\n\n def save(self, user):\n d = self.cleaned_data\n user.first_name = d['first_name']\n user.last_name = d['last_name']\n user.save()\n super(forms.ModelForm, self).save()\n","sub_path":"apps/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47652099","text":"from scipy.constants import m_p, m_e, e as qe\n\n# To be safe, only use immutable python types as default values in the parameters_dict.\n# This most importantly excludes python lists. Tuples may be used in their place.\n\nparameters_dict = {\n 'superparameters': {'pi'}, # are allowed anywhere and can be repeated (for backwards compatibility)\n 'simulation_parameters': {\n 'mandatory': {\n\n # Other input file names\n 'machine_param_file',\n 'secondary_emission_parameters_file',\n 'beam_parameters_file',\n\n # Log and progress files\n 'logfile_path',\n 'progress_path',\n\n # Time sampling\n 'Dt',\n 't_end',\n\n # Neglibile beam linear density\n 'lam_th',\n\n # MP management settings\n 'N_mp_max',\n 'N_mp_regen',\n 'N_mp_regen_low',\n 't_ON_regen_low',\n 'N_mp_after_regen',\n 'fact_split',\n 'fact_clean',\n 'nel_mp_ref_0',\n 'Nx_regen', 'Ny_regen', 'Nvx_regen', 'Nvy_regen', 'Nvz_regen',\n 'regen_hist_cut',\n\n # Space charge parameters\n 'Dt_sc',\n 'Dh_sc',\n 't_sc_ON',\n\n # Saving settings\n 'Dx_hist',\n 'r_center',\n 'Dt_En_hist',\n 'Nbin_En_hist',\n 'En_hist_max',\n },\n 'optional': {\n # Secondary beams\n 'secondary_beams_file_list': (),\n\n # Additional clouds\n 'additional_clouds_file_list': (),\n\n # Name, mass and charge for default cloud\n 'cloud_name': None,\n 'cloud_mass': m_e,\n 'cloud_charge': -qe,\n\n 'N_mp_soft_regen': None,\n 'N_mp_after_soft_regen': None,\n 'N_mp_async_regen': None,\n 'N_mp_after_async_regen': None,\n 'stopfile': 'stop',\n\n # Saving settings\n 'filen_main_outp': 'Pyecltest.mat',\n 'save_only': None,\n 'flag_movie': 0,\n 'flag_sc_movie': 0,\n 'flag_cos_angle_hist': True,\n 'cos_angle_width' : 0.05,\n 'save_mp_state_time_file': -1,\n 'flag_detailed_MP_info': 0,\n 'flag_hist_impact_seg': 1,\n 'flag_En_hist_seg': False,\n 'flag_verbose_file': False,\n 'flag_verbose_stdout': False,\n 'dec_fac_secbeam_prof': 1,\n 'el_density_probes': (),\n 'save_simulation_state_time_file': -1,\n 'checkpoint_DT': None,\n 'checkpoint_folder': None,\n 'copy_main_outp_DT': None,\n 'copy_main_outp_folder': None,\n 'x_min_hist_det': None,\n 'y_min_hist_det': None,\n 'x_max_hist_det': None,\n 'y_max_hist_det': None,\n 'Dx_hist_det': None,\n 'flag_lifetime_hist': False,\n 'Nbin_lifetime_hist': None,\n 'lifetime_hist_max': None,\n 'Dt_lifetime_hist':None,\n\n 'Dh_electric_energy': None,\n 'sparse_solver': 'scipy_slu',\n 'PyPICmode' : 'FiniteDifferences_ShortleyWeller',\n 'flag_reinterp_fields_at_substeps': False,\n\n # Multigrid parameters\n 'f_telescope': None,\n 'target_grid': None,\n 'N_nodes_discard': None,\n 'N_min_Dh_main': None,\n\n 'dec_fact_out': 1,\n 'save_mat_every': 1,\n\n # Where to put this?\n 't_ion': -1,\n\n 'extract_sey': True,\n\n 'step_by_step_custom_observables': None,\n 'pass_by_pass_custom_observables': None,\n 'save_once_custom_observables': None,\n\n # Energy extraction parameters\n 'extract_ene_dist': False,\n 'ene_dist_test_E_impact_eV': None,\n 'Nbin_extract_ene': None,\n 'factor_ene_dist_max': None,\n\n 'flag_em_tracking' : False,\n\n # Cross-ionization\n 'cross_ion_definitions': None\n\n },\n },\n 'machine_parameters': {\n 'mandatory': set(),\n 'optional': {\n\n # Chamber profile\n 'chamb_type': 'ellip',\n 'x_aper': None,\n 'y_aper': None,\n 'filename_chm': None,\n 'filename_chm_photoem': None,\n\n # Tracking and magnetic field\n 'track_method': 'StrongBdip',\n 'B': 0., # Tesla (if B=-1 computed from energy and bending radius)\n 'bm_totlen': -1, # m\n 'B_map_file': None,\n 'Bz_map_file': None, # documented?\n 'fact_Bmap': 1.,\n 'B0x': None,\n 'B0y': None,\n 'B0z': None,\n 'B_zero_thrhld': None,\n 'N_sub_steps': 1,\n 'B_multip': [],\n 'B_skew': None,\n\n # Optics\n 'betafx': None,\n 'betafy': None,\n 'Dx': 0.,\n 'Dy': 0.,\n\n # Residual gas ionization\n 'gas_ion_flag' : 0,\n 'P_nTorr' : -1,\n 'sigma_ion_MBarn' : -1,\n 'Temp_K' : -1,\n 'unif_frac' : -1,\n 'E_init_ion' : -1,\n\n # Photoemission\n 'photoem_flag' : 0,\n 'inv_CDF_refl_photoem_file' : -1,\n 'inv_CDF_all_photoem_file' : -1,\n 'k_pe_st' : -1,\n 'refl_frac' : -1,\n 'alimit' : -1,\n 'e_pe_sigma' : -1,\n 'e_pe_max' : -1,\n 'x0_refl' : -1,\n 'y0_refl' : -1,\n 'out_radius' : -1,\n 'phem_resc_fac' : 0.9999,\n 'photoelectron_angle_distribution' : 'undefined',\n 'energy_distribution' : 'gaussian',\n 'flag_continuous_emission' : False,\n\n\n # Uniform initial distribution\n 'init_unif_flag' : 0,\n 'Nel_init_unif' : None,\n 'E_init_unif' : 0,\n 'x_max_init_unif' : None,\n 'x_min_init_unif' : None,\n 'y_max_init_unif' : None,\n 'y_min_init_unif' : None,\n 'filename_init_MP_state': None, # undocumented?\n\n # Uniform initial density\n 'init_unif_edens_flag' : 0,\n 'init_unif_edens' : None,\n 'E_init_unif_edens' : 0.,\n 'x_max_init_unif_edens' : None,\n 'x_min_init_unif_edens' : None,\n 'y_max_init_unif_edens' : None,\n 'y_min_init_unif_edens' : None,\n\n 'flag_assume_convex': True,\n },\n },\n 'beam_beam': {\n 'mandatory': {\n\n # Basic definitions\n 'energy_eV',\n\n #Transverse electric field\n 'beam_field_file',\n\n # Beam longitudinal profile\n 'b_spac',\n 'fact_beam',\n 'flag_bunched_beam',\n\n # this is mandatory!\n 't_offs',\n 'filling_pattern_file',\n },\n 'optional': {\n # Basic definitions\n 'q_part': qe,\n 'm0_part': m_p,\n 'Dp_p': 0.,\n 'nemittx': None,\n 'nemitty': None,\n 'x_beam_pos': 0.,\n 'y_beam_pos': 0.,\n 'sigmax': -1,\n 'sigmay': -1,\n\n #Transverse electric field\n 'save_beam_field_file_as': None,\n\n # if beam_field_file is given\n 'Dh_beam_field': None,\n 'Nx': None,\n 'Ny': None,\n 'nimag': None,\n\n # if compute_FDSW_multigrid\n 'Dh_beam_field': None,\n 'f_telescope_beam': None,\n 'target_grid_beam': None,\n 'N_nodes_discard_beam': None,\n 'N_min_Dh_main_beam': None,\n\n # if flag_bunched_beam == 1\n 'sigmaz' : -1,\n\n # if flag_bunched_beam == 0\n 'beam_long_prof_file': None,\n\n #????\n 'Dx': None,\n 'Dy': None,\n 'betafx': None,\n 'betafy': None,\n\n # this is optional!\n 'coast_dens': 0.\n },\n },\n 'secondary_emission_parameters': {\n 'mandatory': {\n\n # Secondray Electron Energy Spectrum\n 'E_th',\n 'sigmafit',\n 'mufit',\n\n # Other parameters\n 'scrub_en_th',\n },\n 'optional': {\n\n # Secondray Electron Yield\n 'Emax': None,\n 'del_max': None,\n 'R0': None,\n 'E0': None,\n 's_param': None,\n\n # Choice of model\n 'switch_model': 0,\n\n # Other parameters\n 'secondary_angle_distribution': 'undefined',\n 'switch_no_increase_energy': 0,\n 'thresh_low_energy': -1,\n\n # SEY from file\n 'sey_file': None,\n 'flag_costheta_Emax_shift': True,\n 'flag_costheta_delta_scale': True,\n\n # Furman-Pivi Model\n 'furman_pivi_surface': None\n\n },\n },\n 'combined_simulations_secondaryEmission_machine_parameters': {\n 'mandatory': set(),\n 'optional': {},\n },\n 'additional_cloud_parameters': {\n 'mandatory': {\n\n # Cloud particles\n 'cloud_mass',\n 'cloud_charge',\n\n # Residual gas ionization flag\n 'gas_ion_flag',\n\n # Photoemission flag\n 'photoem_flag',\n\n # Uniform initial distribution flag\n 'init_unif_flag',\n\n # Uniform initial density flag\n 'init_unif_edens_flag',\n\n # Secondary emission model\n 'switch_model',\n },\n 'optional': {\n\n 'save_only': (),\n \n # MP management settings\n 'N_mp_max': (),\n 'N_mp_regen': (),\n 'N_mp_regen_low': (),\n 't_ON_regen_low': (),\n 'N_mp_after_regen': (),\n 'fact_split': (),\n 'fact_clean': (),\n 'nel_mp_ref_0': (),\n 'Nx_regen': (), 'Ny_regen': (), 'Nvx_regen': (), 'Nvy_regen': (), 'Nvz_regen': (),\n 'regen_hist_cut': (),\n\n 'N_mp_soft_regen': (),\n 'N_mp_after_soft_regen': (),\n 'N_mp_async_regen': (),\n 'N_mp_after_async_regen': (),\n\n # Tracking and magnetic field\n 'N_sub_steps': (),\n\n # Residual gas ionization\n 'P_nTorr': (),\n 'sigma_ion_MBarn': (),\n 'Temp_K': (),\n 'unif_frac': (),\n 'E_init_ion': (),\n\n 't_ion': (),\n\n # Photoemission\n 'inv_CDF_refl_photoem_file': (),\n 'inv_CDF_all_photoem_file': (),\n 'k_pe_st': (),\n 'refl_frac': (),\n 'alimit': (),\n 'e_pe_sigma': (),\n 'e_pe_max': (),\n 'x0_refl': (),\n 'y0_refl': (),\n 'out_radius': (),\n 'phem_resc_fac': (),\n 'photoelectron_angle_distribution': (),\n 'energy_distribution': (),\n 'flag_continuous_emission': (),\n 'filename_chm_photoem': (),\n\n # Uniform initial distribution\n 'Nel_init_unif': (),\n 'E_init_unif': (),\n 'x_max_init_unif': (),\n 'x_min_init_unif': (),\n 'y_max_init_unif': (),\n 'y_min_init_unif': (),\n 'filename_init_MP_state': (),\n\n # Uniform initial density\n 'init_unif_edens': (),\n 'E_init_unif_edens': (),\n 'x_max_init_unif_edens': (),\n 'x_min_init_unif_edens': (),\n 'y_max_init_unif_edens': (),\n 'y_min_init_unif_edens': (),\n\n # Secondary emission parameters\n 'E_th': (),\n 'sigmafit': (),\n 'mufit': (),\n 'Emax': (),\n 's_param': (),\n 'del_max': (),\n 'R0': (),\n 'E0': (),\n\n 'switch_no_increase_energy': (),\n 'thresh_low_energy': (),\n 'scrub_en_th': (),\n\n 'secondary_angle_distribution': (),\n\n 'sey_file': (),\n 'flag_costheta_Emax_shift': (),\n 'flag_costheta_delta_scale': (),\n\n # Furman-Pivi model of SEY\n 'furman_pivi_surface': (),\n\n # Saving settings\n 'Dx_hist': (),\n 'r_center': (),\n 'Dt_En_hist': (),\n 'Nbin_En_hist': (),\n 'En_hist_max': (),\n 'flag_lifetime_hist': (),\n 'Nbin_lifetime_hist': (),\n 'lifetime_hist_max': (),\n 'Dt_lifetime_hist': (),\n\n 'flag_movie': (),\n 'flag_sc_movie': (),\n 'flag_cos_angle_hist': (),\n 'cos_angle_width': (),\n 'save_mp_state_time_file': (),\n 'flag_detailed_MP_info': (),\n 'flag_hist_impact_seg': (),\n 'flag_En_hist_seg':(),\n 'flag_verbose_file': (),\n 'flag_verbose_stdout': (),\n 'dec_fac_secbeam_prof': (),\n 'el_density_probes': (),\n 'save_simulation_state_time_file': (),\n 'x_min_hist_det': (),\n 'y_min_hist_det': (),\n 'x_max_hist_det': (),\n 'y_max_hist_det': (),\n 'Dx_hist_det': (),\n\n # Log and progress files\n 'logfile_path': (),\n 'progress_path': (),\n },\n },\n}\n\nfor key in ('secondary_emission_parameters', 'machine_parameters', 'simulation_parameters'):\n parameters_dict['combined_simulations_secondaryEmission_machine_parameters']['mandatory'].update(parameters_dict[key]['mandatory'])\n parameters_dict['combined_simulations_secondaryEmission_machine_parameters']['optional'].update(parameters_dict[key]['optional'])\n","sub_path":"default_input_parameters.py","file_name":"default_input_parameters.py","file_ext":"py","file_size_in_byte":14139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"166039825","text":"import os\n\noutput_dir = r'C:\\Users\\Zach\\Desktop\\esp-idf\\project\\components\\http\\website'\nsource_dir = r'C:\\Users\\Zach\\Desktop\\esp-idf\\project\\website'\n\ndef find_css_file(line):\n cssname_start = line.find('href=')+6\n if cssname_start > 0:\n cssname_end = line.find('\\\"', cssname_start+1)\n cssname = line[cssname_start:cssname_end]\n return cssname\n else:\n return None\n\noutput_path = output_dir\nif not os.path.exists(output_dir):\n os.mkdir(output_dir)\n\nfor dirname, subdirnames, filenames in os.walk(source_dir):\n output_path = dirname.replace(source_dir, output_dir)\n\n for subdirname in subdirnames:\n d = os.path.join(output_path, subdirname)\n if not os.path.exists(d):\n os.mkdir(d)\n\n for filename in filenames:\n name, extension = os.path.splitext(filename)\n if extension == '.html':\n with open(os.path.join(dirname, filename), 'r') as source:\n with open(os.path.join(output_path, filename), 'w') as compressed:\n print(\"Writing to %s\" % os.path.join(output_path, filename))\n\n lines = source.readlines()\n for line in lines:\n if 'rel=\"stylesheet\"' in line:\n cssfile = find_css_file(line)\n if cssfile:\n compressed.write(\"\")\n else:\n compressed.write(line.strip(' \\t\\n\\r'))\n \n\n\n\n \n ","sub_path":"scripts/compress.py","file_name":"compress.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310152234","text":"import language_tool_python\n\ntool = language_tool_python.LanguageTool('en-US')\n\n\nclass GrammarError:\n def __init__(self, message, short_message, replacements, offset, length, rule):\n self.replacements = replacements\n self.offset = offset\n self.length = length\n self.rule = rule\n self.short_message = short_message\n self.message = message\n\n\nclass Client(object):\n\n def check(self, sentence):\n matches = tool.check(sentence)\n return [GrammarError(message=i.context,\n replacements=i.replacements,\n offset=i.offset,\n length=i.errorLength,\n rule=i.category,\n short_message=i.message) for i in matches]\n\n\nclass LanguageChecker:\n def __init__(self):\n self.client = Client()\n\n def check(self, sentence, categories=None, excludes_ids=None):\n if not categories and not excludes_ids:\n return self.client.check(sentence)\n\n ret = []\n for error in self.client.check(sentence):\n if error.rule in categories and (\n excludes_ids is None or error.rule not in excludes_ids):\n ret.append(error)\n\n return ret\n\n def misspellings(self, sentence):\n \"\"\"\n :return: list of spelling errors \n \"\"\"\n resplacments = self.check(sentence, ['TYPOS'])\n ret = set()\n # corrections = set()\n for r in resplacments:\n ret.add(sentence[r.offset: r.offset + r.length])\n # if len(r.replacements) > 0:\n # corrections.add(r.replacements[0]['value'])\n\n return ret\n\n def spelling_corrector(self, sentence):\n resplacments = self.check(sentence, ['TYPOS'])\n for r in resplacments:\n ms = sentence[r.offset: r.offset + r.length]\n if len(r.replacements) > 0:\n sentence = sentence.replace(ms, r.replacements[0])\n\n return sentence\n\n def grammar_corrector(self, sentence, categories=['MISC', 'GRAMMAR', \"TYPOS\"]):\n\n repls = self.check(sentence, categories=categories)\n\n while len(repls) > 0:\n r = repls[0]\n if not r.replacements:\n continue\n sentence = sentence[:r.offset] + r.replacements[0] + sentence[r.offset + r.length:]\n repls = self.check(sentence, categories=['MISC', 'GRAMMAR', 'TYPOS'])\n\n return sentence\n\n def singleWordCorrection(self, word):\n\n for p in self.check(word.replace('_', ' ')):\n if p.rule == 'TYPOS':\n for r in p.replacements:\n if word in r:\n return r.replace(' ', '_')\n if p.replacements:\n return p.replacements[0].replace(' ', '_')\n return word\n\n","sub_path":"language_tool.py","file_name":"language_tool.py","file_ext":"py","file_size_in_byte":2874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"646914115","text":"from regression_tests import *\n\nclass Test(Test):\n settings = TestSettings(\n tool='fileinfo',\n input='ca9998268d1038d448958a4430cd4641'\n )\n\n def test_fileinfo_succeeds(self):\n assert not self.fileinfo.succeeded\n assert self.fileinfo.output.contains(\n 'Error: Failed to parse the input file \\(it is probably'\n ' corrupted\\). Detected format is: Mach-O.'\n )\n","sub_path":"tools/fileinfo/bugs/macho-segfault-getNextLoadCommandInfo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"214924531","text":"import sqlite3\nimport logging\nimport telebot\nimport const\nglobal auth\n\n\nfrom telebot import types\n#tg_conn\nbot = telebot.TeleBot(const.token)\n\n#sql_conn\nconn = sqlite3.connect(const.adr_con)\ncursor = conn.cursor()\nconn.close()\n\n# настройки для журнала\nlogger = logging.getLogger('log')\nlogger.setLevel(logging.INFO)\nfh = logging.FileHandler('someTestBot.log')\nfh.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s | %(levelname)-7s | %(message)s\")\nfh.setFormatter(formatter)\nlogger.addHandler(fh)\n\n#klava\n\n@bot.message_handler(commands=[\"auth\"])\ndef geophone(message):\n global auth\n if auth != 0:\n keyboard = types.ReplyKeyboardMarkup(row_width=1, resize_keyboard=True)\n button_phone = types.KeyboardButton(text=\"Отправить номер телефона\", request_contact=True)\n keyboard.add(button_phone)\n bot.send_message(message.chat.id,\n \"Отправе нам свой текущий номер телефона для аунтификации\",\n reply_markup=keyboard)\n if str(message.text) == \"89995252655\":\n bot.send_message(message.chat.id,\n \"Аунтификация прошла успешно\",\n )\n auth = 0\n else:\n bot.send_message(message.chat.id,\n \"Обратитесь к администратору\",\n )\n\n@bot.message_handler(commands=[\"start\"])\ndef text (message):\n keyboard = types.ReplyKeyboardMarkup(row_width=1, one_time_keyboard=True)\n button1 = types.KeyboardButton(text='Изменение')\n button2 = types.KeyboardButton(text='Добавление')\n button3 = types.KeyboardButton(text='Удаление')\n keyboard.add(button1, button2, button3)\n\n\n@bot.message_handler(func=lambda item: item.text == 'Изменение', content_types=['text'])\ndef change(item):\n if auth == 0:\n pass\n else:\n print(\"Пожалуйста, авторизуйтесь (/auth)\")\n\n@bot.message_handler(func=lambda item: item.text == 'Добавление', content_types=['text'])\ndef add(item):\n if auth == 0:\n pass\n else:\n print(\"Пожалуйста, авторизуйтесь (/auth)\")\n\n@bot.message_handler(func=lambda item: item.text == 'Удаление', content_types=['text'])\ndef dell(item):\n if auth == 0:\n pass\n else:\n print(\"Пожалуйста, авторизуйтесь (/auth)\")\n","sub_path":"SQL/sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85532864","text":"\"\"\"security\n\nRevision ID: ad6d7708b1a0\nRevises: 691377c94ec1\nCreate Date: 2016-01-12 16:58:43.122066\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'ad6d7708b1a0'\ndown_revision = '691377c94ec1'\nbranch_labels = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('bkr_roles',\n sa.Column('id', sa.BigInteger(), nullable=False),\n sa.Column('name', sa.Unicode(), nullable=True),\n sa.Column('description', sa.Unicode(), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.create_table('bkr_users_x_roles',\n sa.Column('users_id', sa.BigInteger(), nullable=True),\n sa.Column('roles_id', sa.BigInteger(), nullable=True),\n sa.ForeignKeyConstraint(['roles_id'], ['bkr_roles.id'], ),\n sa.ForeignKeyConstraint(['users_id'], ['bkr_users.id'], )\n )\n op.add_column('bkr_users', sa.Column('active', sa.Boolean(), nullable=True))\n op.add_column('bkr_users', sa.Column('confirmed_at', sa.DateTime(), nullable=True))\n op.add_column('bkr_users', sa.Column('current_login_at', sa.DateTime(), nullable=True))\n op.add_column('bkr_users', sa.Column('current_login_ip', sa.Unicode(), nullable=True))\n op.add_column('bkr_users', sa.Column('email', sa.Unicode(), nullable=True))\n op.add_column('bkr_users', sa.Column('last_login_at', sa.DateTime(), nullable=True))\n op.add_column('bkr_users', sa.Column('last_login_ip', sa.Unicode(), nullable=True))\n op.add_column('bkr_users', sa.Column('login_count', sa.BigInteger(), nullable=True))\n op.add_column('bkr_users', sa.Column('password', sa.Unicode(), nullable=True))\n op.create_unique_constraint(None, 'bkr_users', ['email'])\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'bkr_users', type_='unique')\n op.drop_column('bkr_users', 'password')\n op.drop_column('bkr_users', 'login_count')\n op.drop_column('bkr_users', 'last_login_ip')\n op.drop_column('bkr_users', 'last_login_at')\n op.drop_column('bkr_users', 'email')\n op.drop_column('bkr_users', 'current_login_ip')\n op.drop_column('bkr_users', 'current_login_at')\n op.drop_column('bkr_users', 'confirmed_at')\n op.drop_column('bkr_users', 'active')\n op.drop_table('bkr_users_x_roles')\n op.drop_table('bkr_roles')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/ad6d7708b1a0_security.py","file_name":"ad6d7708b1a0_security.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"505430469","text":"# import the libraries\nimport cv2\nimport cvzone\nimport csv\nimport time\nfrom cvzone.HandTrackingModule import HandDetector\n\n# take the image from the camera\ncap = cv2.VideoCapture(0)\ncap.set(3, 1288)\ncap.set(4, 720)\n\n# hand detection module import in detector\ndetector = HandDetector(detectionCon=0.8)\n\n\n# 3333333333333333333333333333333333333333333333333333333333333333333333333333333333333333333\n\n\nclass MCQ:\n def __init__(self, data):\n self.Question = data[0]\n self.choice1 = data[1]\n self.choice2 = data[2]\n self.choice3 = data[3]\n self.choice4 = data[4]\n self.answer = int(data[5])\n\n self.userAns = None\n\n def update(self, cursor, bboxs):\n for x, bbox in enumerate(bboxs):\n x1, y1, x2, y2 = bbox\n if x1 < cursor[0] < x2 and y1 < cursor[1] < y2:\n self.userAns = x + 1\n cv2.rectangle(img, (x1, y1), (x2, y2), (0, 0, 255), cv2.FILLED)\n return x + 1\n return 0\n\n\n# 22222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222222\n\n\n# import csv file data\npathCSV = \"MCQS.csv\"\nwith open(pathCSV, newline='\\n') as f:\n reader = csv.reader(f)\n dataAll = list(reader)[1:]\nprint(dataAll)\n\n# create the object of each MCQ\nmcqList = []\nfor q in dataAll:\n mcqList.append(MCQ(q))\n\nprint(\"Total mCQ Objects Created : \", len(mcqList))\n\nqNo = 0\nqTotal = len(dataAll)\nuserAns = 0\n\n\n\n# 33333333333333333333333333333333333333333333333333333333333333333333333333333333\n\nwhile True:\n success, img = cap.read()\n img = cv2.flip(img, 1)\n hands, img = detector.findHands(img, flipType=False)\n cv2.putText(img,\"'Virtual Reality (VR) Quiz'\", (200,50), cv2.FONT_HERSHEY_DUPLEX, 2, 255)\n \n mcq = mcqList[0]\n\n if qNo < qTotal:\n mcq = mcqList[qNo]\n\n img, bbox = cvzone.putTextRect(img, mcq.Question, [100, 150], 2, 2, colorR=(255,234,0), offset=15, border=1)\n img, bbox1 = cvzone.putTextRect(img, mcq.choice1, [150, 250], 2, 2,colorR=(255,150,90), offset=15, border=1)\n img, bbox2 = cvzone.putTextRect(img, mcq.choice2, [150, 350], 2, 2,colorR=(255,150,90), offset=15, border=1)\n img, bbox3 = cvzone.putTextRect(img, mcq.choice3, [150, 450], 2, 2, colorR=(255,150,90),offset=15, border=1)\n img, bbox4 = cvzone.putTextRect(img, mcq.choice4, [150, 550], 2, 2,colorR=(255,150,90), offset=15, border=1)\n print(qNo)\n\n if hands:\n lmList = hands[0]['lmList']\n cursor = lmList[8]\n length, info, img = detector.findDistance(lmList[8], lmList[12], img)\n if length < 60:\n if userAns == 0:\n userAns = mcq.update(cursor, [bbox1, bbox2, bbox3, bbox4])\n print(\"user\" + str(userAns))\n\n if userAns > 0:\n if userAns == 1:\n img, selectA = cvzone.putTextRect(img, \"Option 'A' is selected\", [900, 450], 2, 2, offset=15, border=1)\n if userAns == 2:\n img, selectB = cvzone.putTextRect(img, \"Option 'B' is selected\", [900, 450], 2, 2, offset=15, border=1)\n if userAns == 3:\n img, selectC = cvzone.putTextRect(img, \"Option 'C' is selected\", [900, 450], 2, 2, offset=15, border=1)\n if userAns == 4:\n img, selectD = cvzone.putTextRect(img, \"Option 'D' is selected\", [900, 450], 2, 2, offset=15, border=1)\n\n img, next1 = cvzone.putTextRect(img, \"Next\", [1000, 300], 2, 2, offset=15, border=1)\n x1, y1, x2, y2 = next1\n if x1 < cursor[0] < x2 and y1 < cursor[1] < y2:\n print(\"inside\")\n time.sleep(0.3)\n qNo = qNo + 1\n userAns = 0\n\n else:\n score = 0\n for mcq in mcqList:\n if mcq.answer == mcq.userAns:\n score += 1\n score = round((score / qTotal) * 100, 2)\n img, clan = cvzone.putTextRect(img, \"Quiz Completed\", [250, 300], 2, 2,colorR=(255,100,100), offset=15, border=2)\n img, sco = cvzone.putTextRect(img, \" Your Score : \" + str(score) + \"%\", [700, 300], 2, 2,colorR=(255,100,100), offset=15, border=2)\n img, _ = cvzone.putTextRect(img, f'{round((qNo / qTotal) * 100)}%', [603, 629], 1, 2, offset=15 ,border=1)\n if qNo == qTotal:\n img, exit =cvzone.putTextRect(img,\"EXIT \" ,[400,500],2,2,colorR=(64,64,255), offset=10,border=1)\n img, retry =cvzone.putTextRect(img,\"Retry \" ,[700,500],2,2,colorR=(80,80,255),offset=10,border=1)\n if hands:\n lmList = hands[0]['lmList']\n cursor = lmList[8]\n length1, info, img = detector.findDistance(lmList[8], lmList[12], img)\n if length < 60:\n x1, y1, x2, y2 = exit\n if x1 < cursor[0] < x2 and y1 < cursor[1] < y2:\n print(\"cut\")\n break\n if length < 60:\n x1, y1, x2, y2 = retry\n if x1 < cursor[0] < x2 and y1 < cursor[1] < y2:\n print(\"cut\")\n qNo=0\n\n barValue = 120 + (484 // qTotal) * qNo\n if barValue ==120 :\n barValue=barValue+14\n else:\n barValue = barValue\n cv2.rectangle(img, (120, 604), (barValue, 645), (50,205,50), cv2.FILLED)\n cv2.rectangle(img, (120, 604), (663, 645), (255,255,255), 2)\n # cv2.rectangle(img, (300, 200), (400, 200), (255, 0, 255), 3)\n img, _ = cvzone.putTextRect(img, f'{round((qNo / qTotal) * 100)}%', [barValue, 629], 1, 2,(255,255,255),offset=15 ,border=0)\n cv2.putText(img,f'{round((qNo / qTotal) * 100)}%', (barValue+53, 655), cv2.FONT_HERSHEY_SCRIPT_COMPLEX, 2,150)\n \n\n cv2.imshow(\"image\", img)\n cv2.waitKey(1)\n if cv2.waitKey(1) & 0XFF == ord('q'):\n break\n \n \n # code are ended kow and i am just simle try it\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5951,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474442805","text":"import socket\nfrom decouple import config\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ListTrainer\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nimport pandas as pd\n\nHOST = '127.0.0.1'\nPORT = int(config(\"PORT\"))\n\nbot = ChatBot(\n 'MaslaChat', \n logic_adapters = ['chatterbot.logic.BestMatch']\n)\n\ntrainer = ListTrainer(bot)\ndata = pd.read_excel('frases.xlsx')\nlist = []\n\nfor i in data.index:\n try:\n clave = str(data['clave'][i])\n frase = str(data['frase'][i])\n print(i)\n if clave!='nan':\n list.append(clave)\n list.append(clave)\n print(clave)\n list.append(frase)\n list.append(frase)\n print(frase)\n except:\n print(\"error\")\n\n if i > 60:\n break\n\ntrainer.train(list)\nprint('\\n')\n\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket:\n socket.bind((HOST, PORT))\n socket.listen()\n print('Listening...')\n while 1:\n conn, addr = socket.accept()\n with conn:\n print('Connected by', addr)\n data = conn.recv(1024)\n request = str(data)[2:-1].strip()\n print(\"Other:\", request)\n response = bot.get_response(request)\n print('MASLACHAT: ', response, '\\n')\n # if not data:\n # print(\"Break...\")\n # break\n conn.sendall(str(response).encode('utf-8'))\n conn.close()\n","sub_path":"socket-server.py","file_name":"socket-server.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"58653634","text":"from django.shortcuts import render\nfrom django.db import connection\nfrom django.core.cache import cache\nimport numpy as np\nimport operator # 字典排序\nfrom datetime import datetime, date\nfrom collections import OrderedDict\nfrom time import perf_counter as pc\nfrom stock.catespider_async import download_many3, cate_main\nfrom stock.cate_cn_name import cate_name\nfrom stock.stock_tools import get_criticals, time15_and_tosql\nfrom stock.models import VwmaRank\n\n\n# Create your views here\ndef index(request):\n context = {}\n return render(request, 'stock/index.html', context)\n\n\ndef run_cate_sp(request):\n _, to_sql = time15_and_tosql('Rihangqing')\n try:\n cate_new = cate_main(download_many3, to_sql=to_sql)\n if_success = 'True'\n except RuntimeError:\n if_success = 'False'\n cate_new = None\n\n context = {'to_sql': to_sql,\n 'if_success': if_success,\n 'update_time': datetime.today(),\n 'cate_new': cate_new,\n 'if_update': '最新行情已经更新,请返回盘中数据刷新',\n }\n return render(request, 'stock/run_cate_sp.html', context)\n\n\n# 根据两层嵌套字典的某个內键的值进行排序的函数,并且在原字典的内字典里加上一个字段'new_rank'\ndef dicsort_by_innerkey(origin_dic, inner_key, reserve=True):\n sorted_dic = OrderedDict()\n temp_dic = {k: v[inner_key] for k, v in origin_dic.items() if type(v[inner_key]) is not str}\n sorted_tuple = sorted(temp_dic.items(), key=operator.itemgetter(1), reverse=reserve)\n for t in range(len(sorted_tuple)):\n outer_key = sorted_tuple[t][0]\n origin_dic[outer_key]['new_rank'] = t\n try:\n origin_dic[outer_key]['rank_change'] = origin_dic[outer_key]['last_rank'] - t\n except KeyError:\n pass\n sorted_dic[outer_key] = origin_dic[outer_key]\n return sorted_dic\n\n\ndef intrade(request):\n t0 = pc()\n cate_list = cate_name.keys()\n # 从redis中获取最新爬取的价格和交易量\n all_cate_new = cache.get('all_cate_new')\n # 从redis中获取之前的行情,开高低收,sma,atr\n all_details = cache.get('all_details')\n # 从redis中获取前一日的排名信息\n last_rank = cache.get('last_rank')\n\n intrade_result = {}\n # _vwma_sort = {} # 只用于vwma的排序\n for k in cate_list:\n detail = all_details[k]\n cate_new = all_cate_new[k]\n # 如果此时大于15点,\n if datetime.now() > datetime(date.today().year, date.today().month, date.today().day, 15):\n vwma_close = np.array(all_details[k]['close'][-20:])\n vwma_voluma = np.array(all_details[k]['volume'][-20:])\n else:\n vwma_close = np.array(all_details[k]['close'][-19:]+[cate_new['close']])\n vwma_voluma = np.array(all_details[k]['volume'][-19:]+[cate_new['volume']/0.848])\n\n vwma = sum(vwma_close * vwma_voluma) / sum(vwma_voluma)\n dis_to_vwma = round((vwma_close[-1] / vwma - 1) / np.std(vwma_close[1:] / vwma_close[:-1]), 3)\n # 每个板块的盘中数据详情 i_info\n i_info = {\n 'vwma': vwma, # 量均价的值\n 'dis_to_vwma': dis_to_vwma, # 距离量均价距离\n 'c_value': detail['c_value'], # 开仓临界值\n 'if_break': cate_new['close'] > detail['c_value'], # 是否超过开仓临界\n 'dis_tobreak': detail['dist'], # 距离开仓临界\n 'up_break_percent': round((cate_new['close'] - detail['c_value']) / detail['c_value'] * 100, 2), # 超出开仓百分比\n 'cate_name': cate_name[k], # 板块的中文名\n 'last_rank': last_rank[k], # 上一交易日收盘时的量均价排名\n }\n intrade_result[k] = i_info\n\n sorted_intrade_result = dicsort_by_innerkey(intrade_result, 'dis_to_vwma')\n\n def get_above_and_near_cate(dic):\n above, near = {}, {}\n break_cate_list = []\n for k1, v1 in dic.items():\n if v1['if_break']:\n above[k1] = v1['up_break_percent']\n break_cate_list.append(k1)\n else:\n near[k1] = {'c_value': v1['c_value'], 'dis_tobreak': v1['dis_tobreak']}\n sorted_above_list = sorted(above.items(), key=operator.itemgetter(1), reverse=True)\n sorted_above = OrderedDict()\n for s in sorted_above_list:\n sorted_above[s[0]] = str(s[1]) + '%'\n sorted_near = dicsort_by_innerkey(near, 'dis_tobreak', reserve=False)\n return sorted_above, sorted_near, break_cate_list\n\n sorted_upcate, near_cate, break_cate = get_above_and_near_cate(intrade_result)\n\n context = {\n 'spend_time': str(round(pc()-t0, 2)) + 's',\n 'up999': sorted_upcate,\n 'current_time': datetime.now(),\n 'near_cate': near_cate, # 上一交易日后即将突破的\n 'break_cate': break_cate, # 突破板块列表\n 'vwma_output_dir': sorted_intrade_result, # 所有版块的量均价信息和排名变动信息,嵌套字典\n 'if_rank': '当前交易日%s,排名信息未确定' % str(np.busday_offset(str(date.today()), 0, roll='backward')),\n }\n\n # 如果大于上一交易日15点,把rank传到redis\n d_temp = [int(i) for i in str(np.busday_offset(str(date.today()), 0, roll='backward')).split('-')]\n trade_day = date(d_temp[0], d_temp[1], d_temp[2])\n if datetime.now() > datetime(trade_day.year, trade_day.month, trade_day.day, 15):\n vwma_rank = {k: sorted_intrade_result[k]['new_rank'] for k in cate_list}\n cache.set('last_rank', vwma_rank, timeout=None)\n context['if_rank'] = '%s 排名信息已确定,传至缓存' % str(np.busday_offset(str(date.today()), 0, roll='backward'))\n\n return render(request, 'stock/intrade.html', context)\n\n\ndef update_stock_data(request):\n text = get_criticals(20, 0.5)\n context = {\n 'content': text,\n }\n # 时间判断\n _, to_sql = time15_and_tosql('VwmaRank')\n vwma_rank = cache.get('last_rank')\n # 如果缓存中没有排名信息,则从数据库写入\n rank_updatetime = str(VwmaRank.objects.order_by('-id')[0].vcreated_time)\n if vwma_rank is None:\n vwma_rank = {v.vcode: v.vrank for v in VwmaRank.objects.order_by('-id')[0:66]}\n cache.set('last_rank', vwma_rank, timeout=None)\n context['about_rank'] = '缓存中没有排名信息,从数据库写入,数据时间为%s' % rank_updatetime\n else:\n if to_sql:\n trunk = []\n for k, v in vwma_rank.items():\n vr = VwmaRank()\n vr.vcode = k\n vr.vrank = v\n vr.vtime = date.today()\n trunk.append(vr)\n VwmaRank.objects.bulk_create(trunk)\n context['about_rank'] = '%s 排名信息已确定,传至数据库' % str(np.busday_offset(str(date.today()), 0, roll='backward'))\n else:\n context['about_rank'] = '排名数据已经最新,创建时间为 %s' % rank_updatetime\n connection.close()\n\n return render(request, 'stock/update_stock_data.html', context)\n","sub_path":"stock/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178536958","text":"import json\nfrom hashlib import sha256 as H\n\n# Serializes a list of JSON objects from a specific transaction\n# input(s): json object, term (input or output)\n# output(s): a serialization of the list of inputs or outputs\ndef serialize(tx, term):\n # load the json data\n data = json.loads(tx)\n s = []\n for t in data[term]:\n if term == \"input\":\n s.append(t[\"number\"])\n s.append(str(t[\"output\"][\"value\"]))\n s.append(t[\"output\"][\"pubkey\"])\n elif term == \"output\":\n s.append(str(t[\"value\"]))\n s.append(t[\"pubkey\"])\n return ''.join(s)\n\n\n# Generates a transaction number from a given transaction JSON object\n# input(s): a transaction JSON object\n# output(s): a serialization of the JSON elements into a number\ndef generate_number(tx):\n serials = []\n for ele in [\"input\", \"output\"]:\n res = serialize(tx, ele)\n serials.append(res)\n\n d = json.loads(tx)\n serials.append(d[\"sig\"])\n joinedSerials = \"\".join(serials)\n encodedSerials = joinedSerials.encode('utf-8')\n hashedSerials = H(encodedSerials)\n\n return hashedSerials\n\n\n# Serializes transaction, previous hash, and nonce value\n# input(s): transaction, prev hash, and nonce\n# output(s): string concatenation\ndef serialize_pre_block(tx, prev, nonce):\n serials = []\n for t in [\"number\", \"input\", \"output\", \"sig\"]:\n res = serialize(tx.jsonify(), t)\n serials.append(res)\n serials.append(prev)\n serials.append(str(nonce))\n joinedSerials = \"\".join(serials)\n\n return joinedSerials\n\n\n# creates a serialization of a Block object\n# input(s): b which is a Block object\n# output(s): string serialization of the Block attributes\ndef serialize_block(b):\n s = []\n s.append(b.tx.serialize_self())\n s.append(str(b.prev))\n s.append(str(b.nonce))\n s.append(str(b.pow))\n\n return ''.join(s)\n\n\n# Creates a block list in JSON\n# inputs(s): treenode block with highest height\n# output(s): list of dict blocks\ndef blocklist(tnode):\n currNode = tnode\n blockchain = []\n # with given block with highest height, iterate backwards to genesis\n while (currNode is not None):\n # create JSON from current block\n db = dictBlock(currNode)\n blockchain = [db] + blockchain\n currNode = currNode.prevBlock\n return blockchain\n\n\n# Helper function for test code for serializing list\n# input(s): list, term\n# output(s): serialized list\ndef serialize_list(l, term):\n s = []\n for ele in l:\n if term == \"input\":\n s.append(str(ele.number))\n s.append(str(ele.output.value))\n s.append(str(ele.output.pubkey))\n elif term == \"output\":\n s.append(str(ele.value))\n s.append(str(ele.pubkey))\n return ''.join(s)\n\n\n# Creates a JSON file for a block\n# input(s): a TreeNode (which contains a block)\n# output(s): a JSON representation of a block (from a TreeNode)\ndef JsonBlock(tnode):\n jsonBlock = {}\n # load json into dict\n data = json.loads(tnode.block.tx.jsonify())\n jsonBlock[\"tx\"] = data\n jsonBlock[\"prev\"] = tnode.block.prev\n jsonBlock[\"nonce\"] = str(tnode.block.nonce)\n jsonBlock[\"pow\"] = str(tnode.block.pow)\n return json.dumps(jsonBlock, indent=4)\n\n\n# Creates a dictionary representation of a block\n# input(s): a treenode\n# output(s): a dictionary represntation of a block\ndef dictBlock(tnode):\n dBlock = {}\n # load json into dictionary\n data = json.loads(tnode.block.tx.jsonify())\n dBlock[\"tx\"] = data\n dBlock[\"prev\"] = tnode.block.prev\n dBlock[\"nonce\"] = str(tnode.block.nonce)\n dBlock[\"pow\"] = str(tnode.block.pow)\n return dBlock\n","sub_path":"formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":3659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28131086","text":"import numpy as np\nfrom scipy.spatial import cKDTree\n\nclass SpatialDatabase(object):\n\n \"\"\"\n Spatial database object.\n This class is a spatial index of a collection of points in a (sheared) Domain (see Domain.py)\n Its purpose is to search for neighboring points efficiently. \n Children: cKDSpatial (uses ckD trees to find neighbors), LinkedListSpatial (uses linked list which\n rn are written in native python). \n \"\"\"\n \n def __init__(self,pts,Dom):\n \"\"\"\n Constructor. Inputs are the set of points and the Domain \n which are used to initialize the ptsprime variable\n \"\"\"\n self._Npts,_ = pts.shape;\n self.updateSpatialStructures(pts,Dom);\n\n def updateSpatialStructures(self,pts,Dom):\n \"\"\"\n Update all relevant spatial structures. In the general case,\n this is just updating the deformed coordinates of the points\n and the pointer to the domain object.\n Inputs: pts is an N x 3 array of pints in unprimed coordinates, \n Dom is the Domain object that we'll be doing the computation on\n \"\"\"\n self._ptsprime = Dom.primecoords(pts);\n self._Dom = Dom;\n \n def selfNeighborList(self,rcut):\n \"\"\"\n Compute a list of neighbors. Compute pairs\n of pts (i,j) that are a distance r w/safety factor apart in \n the primed (deformed) norm.\n General method is a quadratic loop\n \"\"\"\n rwsafety=rcut*self._Dom.safetyfactor(); # add the safety factor\n # Quadratic loop\n neighbors = [];\n for iPt in range(self._Npts):\n for jPt in range(iPt+1,self._Npts):\n rvecprime = self._ptsprime[iPt,:]-self._ptsprime[jPt,:];\n # Shift rvec so it's on [-L/2, L/2]^3\n rvecprime = self._Dom.MinPrimeShiftInPrimeCoords(rvecprime);\n if (np.linalg.norm(rvecprime) < rwsafety):\n neighbors.append([iPt,jPt]);\n return np.array(neighbors);\n \n def otherNeighborsList(self,other,rcut):\n \"\"\"\n Compute a list of neighbors between 2 different point sets.\n Pairs of pts (i,j) that are a distance r w/safety factor apart in\n the primed norm, where i is in self._pts and j is in other._pts\n General method is a quadratic loop\n \"\"\"\n rwsafety=rcut*self._Dom.safetyfactor(); # add the safety factor\n # Quadratic loop\n neighbors = [];\n for iPt in range(self._Npts):\n iNeighbors=[];\n for jPt in range(other._Npts):\n rvecprime = self._ptsprime[iPt,:]-other._ptsprime[jPt,:];\n # Shift rvec so it's on [-L/2, L/2]^3\n rvecprime = self._Dom.MinPrimeShiftInPrimeCoords(rvecprime);\n if (np.linalg.norm(rvecprime) < rwsafety):\n iNeighbors.append(jPt);\n neighbors.append(iNeighbors);\n return neighbors; \n\nclass ckDSpatial(SpatialDatabase):\n\n \"\"\"\n Child of SpatialDatabase that uses ckD trees to compute \n neighbors efficiently.\n \"\"\"\n \n def __init__(self,pts,Dom):\n \"\"\"\n Constructor. Initialize the kD tree.\n \"\"\"\n super().__init__(pts,Dom);\n # The super constructor will then call THIS child method to\n # updateSpatialStructures\n \n def updateSpatialStructures(self,pts,Dom):\n \"\"\"\n Update the kD tree using the set of points pts (an N x 3)\n array, and Domain object Dom.\n \"\"\"\n ptsprime = Dom.primecoords(pts);\n # Mod the points so they are on the right bounds [0,Lx] x [0,Ly] x [0,Lz]\n # (needed for the call to kD tree)\n ptsprime = Dom.ZeroLShiftInPrimeCoords(ptsprime);\n # The domain can be periodic or free space. If periodic, pass\n # that information to the kD tree.\n # Update the KD tree\n self._Dom = Dom;\n self._myKDTree = cKDTree(ptsprime,boxsize=Dom.getPeriodicLens());\n\n def selfNeighborList(self,rcut):\n \"\"\"\n Get the neighbors within an Eulerian distance rcut (same \n as rcut*safety factor in the deformed norm) within the\n ckD tree pointTree. \n Inputs: distance rcut\n Output: pairs of neighbors as an nPairs x 2 array\n \"\"\"\n rwsafety=rcut*self._Dom.safetyfactor(); # add the safety factor\n neighbors = self._myKDTree.query_pairs(rwsafety,output_type='ndarray');\n return neighbors;\n\n def otherNeighborsList(self,other,rcut):\n \"\"\"\n Return a list of neighbors on another ckDtree that are\n within a distance rcut from the points on self._mkDTree.\n Inputs: other ckDSpatial object with its kD tree, Eulerian \n distance rcut\n Outputs: nPoints list of neighbors for each point\n \"\"\"\n rwsafety=rcut*self._Dom.safetyfactor(); # add the safety factor\n return self._myKDTree.query_ball_tree(other._myKDTree,rwsafety);\n\nclass LinkedListSpatial(SpatialDatabase):\n\n \"\"\"\n Child of SpatialDatabase that uses linked lists to do neighbor \n searches. \n \"\"\"\n \n def __init__(self,pts,Dom):\n super().__init__(pts,Dom);\n \n def selfNeighborList(self,rcut):\n \"\"\"\n Get the neighbors within an Eulerian distance rcut using\n the linked list construction. \n Inputs: rcut\n Output: pairs of neighbors as an nPairs x 2 array\n \"\"\"\n rwsafety=rcut*self._Dom.safetyfactor(); # add the safety factor\n neighbors = [];\n nbins = self.calcnBins(rwsafety);\n ptbins = self.binsbyP(nbins);\n #print('Number of bins (%d, %d, %d)' %(nbins[0], nbins[1], nbins[2]));\n # First and next linked lists\n bfirst, pnext = self.binPoints(nbins);\n # Loop over points. For each point, check neighboring bins\n # for other points.\n for iPt in range(self._Npts): # loop over points\n tbin=[ptbins[iPt,:]];\n neighBins = LinkedListSpatial.neighborBins(tbin,nbins);\n for iSn in range(len(neighBins)): # loop over neighboring bins\n jPt = bfirst[neighBins[iSn]];\n while (jPt !=-1):\n rvecprime = self._ptsprime[iPt,:]-self._ptsprime[jPt,:];\n # Shift rvec so it's on [-L/2, L/2]^3\n rvecprime = self._Dom.MinPrimeShiftInPrimeCoords(rvecprime);\n # Only include points that are close enough (and include\n # each pair only once)\n if (jPt > iPt and np.linalg.norm(rvecprime) < rwsafety):\n neighbors.append([iPt,jPt]);\n jPt = pnext[jPt];\n neighbors = np.array(neighbors); \n return neighbors;\n\n def otherNeighborsList(self,other,rcut):\n \"\"\"\n Get the neighbors of self within the object other\n for an Eulerian distance rcut. Using linked lists, \n we iterate\n Inputs: other set of points, rcut\n Output: Npts list of neighbors for each point\n \"\"\"\n rwsafety=rcut*self._Dom.safetyfactor(); # add the safety factor\n neighbors = [];\n nbins = self.calcnBins(rwsafety);\n # Bin the self points\n ptbins = self.binsbyP(nbins);\n print('Number of bins (%d, %d, %d)' %(nbins[0], nbins[1], nbins[2]));\n # First and next linked lists for the OTHER points\n bfirst, pnext = other.binPoints(nbins);\n # Loop over points. For each point, check neighboring bins\n # for points in other.\n for iPt in range(self._Npts): # loop over points\n iNeighbors=[];\n tbin=[ptbins[iPt,:]];\n neighBins = LinkedListSpatial.neighborBins(tbin,nbins);\n for iSn in range(len(neighBins)): # loop over neighboring bins\n jPt = bfirst[neighBins[iSn]]; # point in OTHER\n while (jPt !=-1):\n rvecprime = self._ptsprime[iPt,:]-other._ptsprime[jPt,:];\n # Shift rvec so it's on [-L/2, L/2]^3\n rvecprime = self._Dom.MinPrimeShiftInPrimeCoords(rvecprime);\n # Only include points that are close enough\n if (np.linalg.norm(rvecprime) < rwsafety):\n iNeighbors.append(jPt);\n jPt = pnext[jPt];\n neighbors.append(iNeighbors);\n return neighbors;\n\n def calcnBins(self,binlen):\n \"\"\"\n Calculate the number of bins in each direction for \n a given bin edge length rcut.\n \"\"\"\n nbins = np.int_(self._Dom.getLens()/binlen);\n return nbins;\n \n def binsbyP(self,nbins):\n \"\"\"\n Get the bins for each point. \n Inputs: nbins (3 array with number of bins in each direction)\n Return: the bin number for each point as an array as an N x 3 \n array where the columns are the bin numbers in each direction.\n \"\"\"\n # Shift all coordinates so they are on [0,L]^3 (assuming this\n # will only be necessary for a periodic domain\n coords = self._Dom.ZeroLShiftInPrimeCoords(self._ptsprime);\n # Get the bin for each pt\n dperbin = self._Dom.getLens()/nbins;\n bins = np.int_(coords/dperbin);\n bins = np.mod(bins,nbins); # takes care of any rounding issues\n return bins; \n\n def binPoints(self,nbins):\n \"\"\" \n Bin the points, i.e. create the linked lists first and next.\n Inputs = nbins (3 array with number of bins in each direction), \n Returns the 2 linked lists first (for the first point in each bin) and\n next (for the next point in the bin).\n \"\"\"\n bins = self.binsbyP(nbins);\n sbins = bins[:,0]+nbins[0]*bins[:,1]+nbins[0]*nbins[1]*bins[:,2];\n # Form the linked lists\n bfirst = -np.ones(nbins[0]*nbins[1]*nbins[2],dtype=np.int);\n pnext = -np.ones(self._Npts,dtype=np.int);\n for iPt in range(self._Npts):\n if (bfirst[sbins[iPt]] == -1):\n bfirst[sbins[iPt]] = iPt;\n else:\n jPt = bfirst[sbins[iPt]];\n while (jPt !=-1):\n jPtprev = jPt;\n jPt = pnext[jPt];\n pnext[jPtprev] = iPt;\n return bfirst, pnext;\n \n @staticmethod\n def neighborBins(tbin,nbins):\n \"\"\"\n Neighbor bins for each bin\n Input: the bin as a 3 array (iBin,jBin,kBin)\n Output: list of at most 27 (possibly less if there are less\n than 3 bins in a direction) neighbor bins as a 27 array. \n Note: it is ok to have periodic neighbors for a general free\n space domain since those points will just get dropped anyway. \n \"\"\"\n neighbors = tbin+np.int_([[0,0,0],[1,0,0],[-1,0,0],[0,1,0],\\\n [1,1,0],[-1,1,0],[0,-1,0],[1,-1,0],[-1,-1,0],[0,0,1],\\\n [1,0,1],[-1,0,1],[0,1,1],[1,1,1],[-1,1,1],[0,-1,1],\\\n [1,-1,1],[-1,-1,1],[0,0,-1],[1,0,-1],[-1,0,-1],\\\n [0,1,-1,],[1,1,-1],[-1,1,-1],[0,-1,-1],[1,-1,-1],\\\n [-1,-1,-1]]);\n neighbors = np.mod(neighbors,nbins);\n _, idx = np.unique(neighbors,axis=0,return_index=True);\n neighbors = neighbors[np.sort(idx)];\n sN = neighbors[:,0]+nbins[0]*neighbors[:,1]+nbins[0]*nbins[1]*neighbors[:,2];\n return sN;\n \n\n\n\n \n","sub_path":"Python/SpatialDatabase.py","file_name":"SpatialDatabase.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216529719","text":"import os\nimport pygame as pg\n\nimage_width = 0\nimage_height = 0\n\n# 0: Dead, 1: Idle, 2: Jump, 3: Run, 4: Walk\nimage_stages = [\"Dead\", \"Fall\", \"Hurt\", \"Idle\", \"Jump\", \"Run\", \"Slide\", \"Walk\"]\nsprite_stages = {}\n\n\ndef load_images(path, name_of_sprite):\n global image_width\n image_width = 200\n global image_height\n image_height = 150\n name_of_file = path + name_of_sprite\n\n # list that contains sprite images\n images_sprite_dead = read_pictures(name_of_file, 'Dead')\n images_sprite_fall = read_pictures(name_of_file, 'Fall')\n images_sprite_hurt = read_pictures(name_of_file, 'Hurt')\n images_sprite_idle = read_pictures(name_of_file, 'Idle')\n images_sprite_jump = read_pictures(name_of_file, 'Jump')\n images_sprite_run = read_pictures(name_of_file, 'Run')\n images_sprite_slide = read_pictures(name_of_file, 'Slide')\n images_sprite_walk = read_pictures(name_of_file, 'Walk')\n\n # dictionary of sprite movements\n global sprite_stages\n sprite_stages = {0: images_sprite_dead, 1: images_sprite_fall, 2: images_sprite_hurt,\n 3: images_sprite_idle, 4: images_sprite_jump, 5: images_sprite_run,\n 6: images_sprite_slide, 7: images_sprite_walk}\n\n\ndef read_pictures(base_path, picture_name):\n images = []\n # list all files in directory\n for file_name in os.listdir(base_path):\n if os.path.isfile(os.path.join(base_path, file_name)):\n if picture_name in file_name:\n image = pg.image.load(os.path.join(base_path, file_name))\n image = pg.transform.scale(image, (image_width, image_height))\n images.append(image)\n\n return images\n\n\ndef get_index_sprite_by_name(sprite_name):\n return image_stages.index(sprite_name)\n","sub_path":"v11/StageAnimWithText/load_sprites.py","file_name":"load_sprites.py","file_ext":"py","file_size_in_byte":1763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"602025148","text":"import logging\nimport os\nimport uuid\n\nfrom pypedream.job import Job, repeat, required, optional, conditional, stripsuffix\n\n\nclass QDNASeq(Job):\n def __init__(self, input_bam, output_segments, background=None):\n Job.__init__(self)\n self.input = input_bam\n self.output = output_segments\n self.background = background\n self.jobname = \"qdnaseq\"\n\n def command(self):\n qdnaseq_cmd = \"qdnaseq.R \" + \\\n required(\"--bam \", self.input) + \\\n required(\"--output \", self.output) + \\\n optional(\"--background \", self.background)\n\n return qdnaseq_cmd\n\n\nclass QDNASeq2Bed(Job):\n def __init__(self, input_segments, output_bed, genes_gtf):\n Job.__init__(self)\n self.input_segments = input_segments\n self.output_bed = output_bed\n self.genes_gtf = genes_gtf\n\n def command(self):\n qdnaseq2bed_cmd = \"qdnaseq2bed.py -n segments \" + \\\n required(\"-i \", self.input_segments) + \\\n \"| sort -k1,1 -k2,2n \" + \\\n \"| bedtools median -c 5 -o mean \" + \\\n required(\"-a \", self.genes_gtf) + \" -b - \" + \\\n \"| cnvgtf2bed.py -i /dev/stdin -n gene_id \" + \\\n required(\"> \", self.output_bed)\n return qdnaseq2bed_cmd\n\n\nclass AlasccaCNAPlot(Job):\n def __init__(self):\n Job.__init__(self)\n self.input_cnr = None\n self.input_cns = None\n self.input_germline_vcf = None\n self.input_somatic_vcf = None\n self.chrsizes = None\n self.output_png = None\n self.output_json = None\n self.jobname = \"alascca-cna\"\n\n def command(self):\n return \"alasccaCNA.R \" + \\\n required(\"--cnr \", self.input_cnr) + \\\n required(\"--cns \", self.input_cns) + \\\n required(\"--germlinevcf \", self.input_germline_vcf) + \\\n required(\"--somaticvcf \", self.input_somatic_vcf) + \\\n required(\"--chrsizes \", self.chrsizes) + \\\n required(\"--png \", self.output_png) + \\\n required(\"--json \", self.output_json)\n\n\nclass CNVkit(Job):\n \"\"\"Runs CNVkit. Either reference or targets_bed must be supplied\"\"\"\n\n def __init__(self, input_bam, output_cns, output_cnr, reference=None, targets_bed=None, scratch=\"/tmp\"):\n self.input_bam = input_bam\n self.reference = reference\n self.output_cnr = output_cnr\n self.output_cns = output_cns\n self.targets_bed = targets_bed\n self.scratch = scratch\n\n def command(self):\n if not self.reference and not self.targets_bed:\n raise ValueError(\"Either reference or targets_bed must be supplied\")\n if self.reference and self.targets_bed:\n raise ValueError(\"Supply either reference OR targets_bed\")\n\n tmpdir = \"{}/cnvkit-{}\".format(self.scratch, uuid.uuid4())\n sample_prefix = stripsuffix(os.path.basename(self.input_bam), \".bam\")\n cnvkit_cmd = \"cnvkit.py batch \" + required(\"\", self.input_bam) + \\\n optional(\"-r \", self.reference) + \\\n conditional(self.targets_bed, \"-n\") + \\\n optional(\"-t \", self.targets_bed) + \\\n required(\"-d \", tmpdir)\n copy_cns_cmd = \"cp {}/{}.cns \".format(tmpdir, sample_prefix) + required(\" \", self.output_cns)\n copy_cnr_cmd = \"cp {}/{}.cnr \".format(tmpdir, sample_prefix) + required(\" \", self.output_cnr)\n rm_cmd = \"rm -r {}\".format(tmpdir)\n return \" && \".join([cnvkit_cmd, copy_cns_cmd, copy_cnr_cmd, rm_cmd])\n","sub_path":"autoseq/tools/cnvcalling.py","file_name":"cnvcalling.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558697681","text":"values = []\nwith open('input.txt', 'r') as f:\n line = f.readline()\n while line:\n values.append(int(line))\n line = f.readline()\n\nfor i in range(len(values)):\n for j in range(i, len(values)):\n for k in range(j, len(values)):\n if (values[i] + values[j] + values[k]) == 2020:\n print(values[i] * values[j] * values[k])\n exit(0)\nprint(\"Nothing found :(\")\n","sub_path":"day01/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221699867","text":"'''Uses particle-in-cell algorithms from PyPIC for\nspace charge modelling in transverse 2.5D and 3D.\n\nPyPIC can be found under\nhttps://github.com/PyCOMPLETE/PyPIC .\nNB: the (feature/redesign) branch is required for this!\n\n@authors: Adrian Oeftiger\n@date: 18.01.2016\n'''\n\n\n\nimport numpy as np\nfrom scipy.constants import c\n\nfrom . import Element\n\nfrom ..general import pmath as pm\n\n\ndef align_particles(beam, mesh_3d):\n '''Sort all particles by their mesh node IDs.'''\n ids = mesh_3d.get_node_ids(beam.x, beam.y, beam.z_beamframe)\n permutation = pm.argsort(ids)\n beam.reorder(permutation)\n # node id array has changed by now!\n\ndef get_bounds(beam, mesh_3d):\n '''Determine indices of sorted particles for each cell, i.e.\n lower and upper index bounds.\n '''\n seq = pm.seq(mesh_3d.n_nodes)\n ids = mesh_3d.get_node_ids(beam.x, beam.y, beam.z_beamframe)\n lower_bounds = pm.searchsortedleft(ids, seq)\n upper_bounds = pm.searchsortedright(ids, seq)\n return lower_bounds, upper_bounds\n\nclass SpaceChargePIC(Element):\n '''Transverse slice-by-slice (2.5D) or full (3D) space charge using\n a particle-in-cell algorithm via PyPIC. Uses a fixed 3D mesh\n with respect to beam.z_beamframe , i.e. the mesh does not adapt\n and remains constant.\n '''\n\n def __init__(self, length, pypic_algorithm, sort_particles=False,\n *args, **kwargs):\n '''Arguments:\n - length: interaction length over which the space charge\n force is integrated.\n - pypic_algorithm: PyPIC.pypic.PyPIC(_GPU) instance which\n has the particle-in-cell algorithm encoded. This has to\n be set up by the user beforehand (i.e. the mesh,\n poisson solver, particle-to-mesh deposition method etc.).\n - sort_particles: determines whether to sort the particles\n by their mesh ID. This may speed up the PyPIC\n particle-to-mesh and mesh-to-particles methods\n due to coalesced memory access, especially on the GPU\n (test the timing for your parameters though!).\n\n (NB: sort_particles=True is necessarily required for the\n PyPIC_GPU.sorted_particles_to_mesh method.)\n '''\n self.length = length\n self.pypic = pypic_algorithm\n self.sort_particles = sort_particles\n self.is_25D = getattr(self.pypic.poissonsolver, 'is_25D', False)\n if self.pypic.mesh.dimension != 3:\n raise RuntimeError(\n '2.5D / 3D space charge requires a three-dimensional mesh!')\n\n def track(self, beam, pypic_state=None):\n mesh = self.pypic.mesh\n\n solve_kwargs = {\n 'charge': beam.charge_per_mp,\n 'state': pypic_state,\n }\n if self.is_25D:\n # 2.5D: macro-particle charge becomes line density in beam frame\n # (in 3D this is implicit via rho=mesh_charges/mesh_3d.volume_elem)\n solve_kwargs['charge'] /= mesh.dz\n\n if self.sort_particles:\n align_particles(beam, mesh)\n\n solve_kwargs['lower_bounds'], solve_kwargs['upper_bounds'] = \\\n get_bounds(beam, mesh)\n\n # electric fields for each particle in beam frame [V/m]\n force_fields = self.pypic.pic_solve(\n beam.x, beam.y, beam.z_beamframe, **solve_kwargs)\n\n # we want force F = q * (1 - beta^2) E_r where E_r is in lab frame\n # --> Lorentz boost E_r from beam frame to lab frame (*gamma)\n # --> include magnetic fields (1 - beta^2) = 1/gamma^2\n # ==> overall factor 1/gamma\n force_fields[0] /= beam.gamma\n force_fields[1] /= beam.gamma\n\n # integrate over dt and apply the force to each charged particle,\n # p0 comes from kicking xp=p_x/p0 instead of p_x\n kick_factor = (self.length / (beam.beta*c) * beam.charge / beam.p0)\n\n beam.xp += force_fields[0] * kick_factor\n beam.yp += force_fields[1] * kick_factor\n if not self.is_25D:\n # need 1/gamma^2: one gamma factor is contained in d/dz_beamframe\n # gradient in PyPIC, another gamma factor included here:\n beam.dp += force_fields[2] * kick_factor/beam.gamma\n\n","sub_path":"PyHEADTAIL/spacecharge/pypic_spacecharge.py","file_name":"pypic_spacecharge.py","file_ext":"py","file_size_in_byte":4238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"269449929","text":"import jax\nimport jax.numpy as jnp\nfrom jax import random\nfrom jax.experimental import optimizers\n\nimport os, sys\nsys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))\n\nfrom jaxmeta.loss import *\n\n# name of job\nNAME = \"1\"\n\n# random key\nkey = random.PRNGKey(1)\n\n# network config\ndirect_layers = [2] + [32]*4 + [2] \ndirect_c0 = 1.0\ndirect_w0 = jnp.array([[1.0, 1.0]]).T\n\ninverse_layers = [1] + [8]*4 + [1]\ninverse_c0 = 1.0\ninverse_w0 = jnp.array([[1.0]]).T\n\n# network training\nmetaloss = mae\noptimizer = optimizers.adam\nlr = 1e-2\nweights = {\n\t\"c1\": 1.0,\n\t\"c2\": 1.0,\n\t\"d1\": 1.0,\n\t\"d2\": 1.0,\n\t\"l1\": 1e-8,\n\t\"l2\": 1e-8,\n}\nbatch_size = {\n\t\"dirichlet\": 300,\n\t\"collocation\": 20100,\n}\niterations = 200000\nprint_every = 1000\nsave_every = 10000\nloss_names = [\"Loss\", \"c1\", \"c2\", \"d1\", \"d2\", \"l1_reg\", \"l2_reg\"]\nlog_file = None\n\n\n# data\nn_data = {\n\t\"i\": 100,\n\t\"b\": 100,\n\t\"cx\": 201, \n\t\"ct\": 100,\n}","sub_path":"examples/Goldstein_Taylor_inverse/KL_one_term_coefficient/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107957788","text":"import pandas as pd\nimport pyodbc, os\nimport datetime\n\ndef mod_cols_name(df):\n cols = df.columns.to_list()\n sqlkey = ['ADD','ALTER','ALL','AND','ANY',\n 'AS','ASC','BETWEEN','CASE','CHECK','COLUMN','CONSTRAINT',\n 'CREATE','DATABASE','DEFAULT','DELETE','DESC','DISTINCT','DROP','EXEC','EXISTS','FROM',\n 'HAVING','IN','INDEX','JOIN','LIKE','LIMIT','NOT','OR','PROCEDURE',\n 'ROWNUM','SELECT','SET','TABLE','TOP','UNION','UNIQUE','UPDATE','VALUES','VIEW','WHERE']\n for i in range(len(cols)):\n st = cols[i]\n stmod = st.replace(' ','_')\n for n in sqlkey:\n if stmod == n:\n xx = '_' + stmod\n stmod = xx\n if st != stmod:\n df = df.rename(columns = {st:stmod})\n return df\n\ndef sql_lstyp(d_type):\n addID = \"NULL\"\n if d_type == 'Int64':\n return \"INT \" + addID\n elif d_type == 'datetime64[ns]':\n return \"DATETIME \" + addID\n elif d_type == 'Float64':\n return \"FLOAT \" + addID\n else:\n return \"TEXT \" + addID\n\ndef CT_MSSQL(conn, tablename, list_col, list_type = []):\n st = \"\"\n finalstr = ''\n x = \"\"\n cur = conn.cursor()\n try:\n cur.execute('select 1 from ' + tablename)\n print('table already exist')\n exit\n except:\n for i in range(len(list_col)):\n x = ''\n col = list_col[i]\n if len(list_type) != 0:\n lsty = list_type[i]\n x = '\"' + col.replace(\" \",\"_\") + '\" ' + str(lsty)\n else:\n x = '\"' + col.replace(\" \",\"_\") + '\" TEXT NULL'\n if st == \"\":\n addsl = \" SL INT PRIMARY KEY IDENTITY (1, 1), \"\n st = 'CREATE TABLE \"' + tablename + '\" (' + str(x)\n else:\n st = st + ',' + str(x)\n else:\n finalstr = st + ')'\n try:\n cur.execute(finalstr)\n conn.commit()\n cur.close()\n print('table created succssfully with cmd', finalstr)\n except:\n cur.close()\n print('table creation failed', finalstr)\n\ndef df_dtype_conv(df):\n ndf = df.convert_dtypes()\n cols = ndf.columns.to_list()\n for i in range(len(cols)):\n col = cols[i]\n if ndf[col].dtypes == 'string':\n try:\n ndf[col] = ndf.apply(lambda x : pd.to_datetime(x[col]).strftime(\"%Y-%m-%d %H:%M:%S\"), axis = 1)\n ndf[col] = pd.to_datetime(ndf[col])\n except:\n pass\n return ndf\n\ndef is_table_exist(tbl, conn):\n qry = \"SELECT 1 FROM \" + tbl\n try:\n cr = conn.cursor()\n rs = cr.execute(qry)\n print('table already exist')\n except:\n print('table creation failed')\n\ndef CreateTable_MSSQL(df, tablename, conn):\n dfx = mod_cols_name(df)\n ndf = df_dtype_conv(dfx)\n lscol = ndf.columns.to_list()\n lstype = []\n q = 0\n for col in range(len(lscol)):\n q = q + 1\n try:\n cl = lscol[col]\n dtyp = ndf[cl].dtypes\n lstype.append(sql_lstyp(dtyp))\n except:\n print('error for ', q, ' ', ndf[cl].dtypes)\n CT_MSSQL(conn, tablename, lscol, lstype)\n return 1\n \ndef MsSql(user, password, host, db):\n #socdb = \"Driver={SQL Server};SERVER=192.168.88.121;DATABASE=SOC_Roster;UID=sa;PWD=Robi456&\"\n cstr = \"Driver={SQL Server};SERVER=\" + host + \";DATABASE=\" + db + \";UID=\" + user + \";PWD=\" + password\n conn = pyodbc.connect(cstr)\n return conn\n \n\n\n#lser = df_to_sql(ndf, 'om1', 'TAXW3', conn, oncolumn = 'ALL', bycolumn = ['CustomAttr15'])\n","sub_path":"AOmPy_o_/sql_o_/create_table/tbl_mssql.py","file_name":"tbl_mssql.py","file_ext":"py","file_size_in_byte":3670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191582366","text":"#!/usr/bin/env python\r\n#-*- coding: utf-8 -*-\r\nimport wx\r\nimport wx.html\r\n\r\nclass SketchAbout(wx.Dialog):\r\n text = '''\r\n \r\n \r\n
\r\n \r\n \r\n \r\n

Sketch!

\r\n
\r\n

Sketch is a demonstration program for wxPython In Action\r\n Chapter 7. It is based on the SuperDoodle demo included with wxPython,\r\n available at http://www.wxpython.org/\r\n

\r\n

SuperDoodle and wxPython are brought to you by\r\n Robin Dunn and Total Control Software, Copyright\r\n ? 1997-2006.

\r\n \r\n \r\n '''\r\n\r\n def __init__(self, parent):\r\n wx.Dialog.__init__(self, parent, -1, 'About Sketch', size=(440, 400))\r\n html = wx.html.HtmlWindow(self) # 创建一个HtmlWindow的实例\r\n html.SetPage(self.text)\r\n button = wx.Button(self, wx.ID_OK, 'Okay')\r\n\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n # 添加html窗口,并设置横向扩展与四周的间隔\r\n sizer.Add(html, 1, wx.EXPAND|wx.ALL, 5)\r\n # 添加Okey按钮并且设置按钮在box的中间\r\n sizer.Add(button, 0, wx.ALIGN_CENTER|wx.ALL, 5)\r\n\r\n self.SetSizer(sizer)\r\n self.Layout() # 强迫dialog去重新计算它内部的控件的尺寸和位置。\r\n\r\n\r\nif __name__ == '__main__':\r\n app = wx.PySimpleApp()\r\n dlg = SketchAbout(None)\r\n # 貌似dialog内部的按钮都会导致关闭对话框并且返回按钮输入\r\n dlg.ShowModal()\r\n# print dlg.ShowModal()\r\n# print wx.ID_OK\r\n app.MainLoop()\r\n","sub_path":"tech/python/wxPython/wxPython入门/6_基本的建造部件/old/6.4_给程序一个好看的外观/建造一个关于(about)框.py","file_name":"建造一个关于(about)框.py","file_ext":"py","file_size_in_byte":1715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"582357843","text":"def is_prime(n):\n \"\"\"Zadanie 5.1\"\"\"\n x = 1\n while x <= n:\n if n%x == 0:\n if x != n and x != 1:\n return False\n x += 1\n return True\n\ndef prime_factorization(n):\n \"\"\"Zadanie 5.2\"\"\"\n _ = []\n x = 2\n while x <= n:\n if n%x == 0:\n n = n//x\n _.append(x)\n else:\n x += 1\n return _\n\ndef algorytm_euklidesa(a, b):\n \"\"\"Zadanie 5.3\"\"\"\n while b > 0:\n r = b\n b = a%b\n a = r\n return a \n\nis_prime(2)\n","sub_path":"programowanie/zestaw5.py","file_name":"zestaw5.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167764747","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(help_text=b'Name of the event', unique=True, max_length=30, verbose_name=b'Event name')),\n ('booking_starts_at', models.DateTimeField(help_text=b'Booking will be enabled at this time', verbose_name=b'Booking start')),\n ('booking_ends_at', models.DateTimeField(help_text=b'Booking will be disabled at this time', verbose_name=b'Booking ends')),\n ('starts_at', models.DateTimeField(help_text=b'Is not displayed in public, but used for selections in admin tools. If not given, end of booking is used', null=True, verbose_name=b'Starts at', blank=True)),\n ],\n ),\n ]\n","sub_path":"events/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"396294698","text":"import requests\nfrom JavPy.embed.BaseEmbed import BaseEmbed\nfrom JavPy.embed.fvs_io import fvs_io\nimport json\nfrom JavPy.utils.config import proxy\n\n\nclass playfinder_xyz(BaseEmbed):\n @staticmethod\n def decode(url):\n video_id = url.split(\"#\")[0].split(\"/v/\").pop().split(\"/\")[0]\n rsp = requests.post(\n \"https://playfinder.xyz/api/source/%s\" % video_id,\n r'{r: \"\", d: \"smartshare.tv\"}',\n proxies=proxy\n )\n obj = json.loads(rsp.text)\n url = fvs_io.decode(obj[\"data\"][-1][\"file\"])\n return url\n\n @staticmethod\n def pattern(url):\n if \"playfinder.xyz\" in url:\n return True\n return False\n\n\nif __name__ == '__main__':\n print(playfinder_xyz.decode(\"https://playfinder.xyz/v/7q970kx-wog#poster=https://findercdn.me/cdn/movie/s1no-1style-ssni-351-yoshitaka-nene-the-adviser-of-the-bad-female-teacher-was-a-humiliation-tennis-club-with-only-devil-pupils_1542526435.png\"))","sub_path":"JavPy/embed/playfinder_xyz.py","file_name":"playfinder_xyz.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"52424915","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.4 (3310)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.10-x86_64/egg/boto/route53/hostedzone.py\n# Compiled at: 2015-11-24 05:02:18\n# Size of source mod 2**32: 1961 bytes\n\n\nclass HostedZone(object):\n\n def __init__(self, id=None, name=None, owner=None, version=None, caller_reference=None):\n self.id = id\n self.name = name\n self.owner = owner\n self.version = version\n self.caller_reference = caller_reference\n\n def startElement(self, name, attrs, connection):\n pass\n\n def endElement(self, name, value, connection):\n if name == 'Id':\n self.id = value\n else:\n if name == 'Name':\n self.name = value\n else:\n if name == 'Owner':\n self.owner = value\n else:\n if name == 'Version':\n self.version = value\n else:\n if name == 'CallerReference':\n self.caller_reference = value\n else:\n setattr(self, name, value)","sub_path":"pycfiles/boto_rsync-0.8.1.tar/hostedzone.cpython-34.py","file_name":"hostedzone.cpython-34.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"21052480","text":"#================================================================\n# Rerun PID Reco + Remake ProtoParticles\n#================================================================\nfrom Configurables import ( DaVinci, RecSysConf, GaudiSequencer,\n ProcessPhase, PhysConf )\n\nfrom STTools import STOfflineConf\nSTOfflineConf.DefaultConf().configureTools()\n\n# Create the top level Conf object and set some general options from DV\nrConf = RecSysConf(\"RecSysConf\")\nDaVinci().setOtherProps(rConf,[\"Simulation\",\"DataType\"])\n\n# Only run PID + Protoparticles\nrConf.RecoSequence = [\"CALO\",\"PROTO\"]\nrConf.SkipTracking = True\nPhysConf().CaloReProcessing = True\n\n# list of algs to prepend to DV\npalgs = [ ]\n\n# Create the Reco process phase\nreco = ProcessPhase(\"Reco\")\npalgs += [reco]\n\n# Re-pack the new CALO output\nfrom Configurables import CaloDstPackConf\ncaloPackSeq = GaudiSequencer(\"CaloPacking\")\ncaloPack = CaloDstPackConf ( Enable = True )\ncaloPack.Sequence = caloPackSeq\ncaloPack.AlwaysCreate = True\ncaloPack.EnableChecks = False\ncaloPack.ClearRegistry = False\npalgs += [caloPackSeq]\n\n# Pack the new ProtoParticles\nfrom Configurables import PackProtoParticle\npackChargedPs = PackProtoParticle( name = \"PackChargedProtos\",\n AlwaysCreateOutput = True,\n ClearRegistry = False,\n InputName = \"/Event/Rec/ProtoP/Charged\",\n OutputName = \"/Event/pRec/ProtoP/Charged\",\n EnableCheck = False )\npackNeutralPs = PackProtoParticle( name = \"PackNeutralProtos\",\n AlwaysCreateOutput = True,\n ClearRegistry = False,\n InputName = \"/Event/Rec/ProtoP/Neutrals\",\n OutputName = \"/Event/pRec/ProtoP/Neutrals\",\n EnableCheck = False )\npalgs += [packChargedPs,packNeutralPs]\n\n# Add to the start of the DV main sequence\n#DaVinci().prependToMainSequence(palgs)\nfrom Configurables import GaudiSequencer\ninit = GaudiSequencer(\"PhysInitSeq\")\ninit.Members+= palgs\n\n# Disable on-demand unpacking of locations created by the above\nfrom Gaudi.Configuration import appendPostConfigAction\ndef removeUnpacking():\n from Configurables import DataOnDemandSvc, CaloProcessor\n DataOnDemandSvc().AlgMap.pop( \"/Event/Rec/ProtoP/Neutrals\", None )\n DataOnDemandSvc().AlgMap.pop( \"/Event/Rec/ProtoP/Charged\", None )\n DataOnDemandSvc().AlgMap.pop( \"/Event/Rec/Calo/Electrons\", None )\n DataOnDemandSvc().AlgMap.pop( \"/Event/Rec/Calo/Photons\", None )\n DataOnDemandSvc().AlgMap.pop( \"/Event/Rec/Calo/MergedPi0s\", None )\n DataOnDemandSvc().AlgMap.pop( \"/Event/Rec/Calo/SplitPhotons\", None )\nappendPostConfigAction( removeUnpacking )\n","sub_path":"DaVinciDev_v39r1/Phys/StrippingSelections/tests/users/DV-RedoCaloPID-Stripping21.py","file_name":"DV-RedoCaloPID-Stripping21.py","file_ext":"py","file_size_in_byte":2944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"134424424","text":"import cv2, time, itertools\nfrom numpy import *\nfrom math import *\n\nfrom mcvgui.core.controllers.OTPBase import *\n\n\nclass Blob(object): \n\n\n def draw(self, frame): cv2.polylines(frame, array( [self._contour] ), True, (255,255,0), 2)\n\n def distanceTo(self, blob):\n p0 = self._centroid\n p1 = blob._centroid\n return math.sqrt((p0[0] - p1[0])**2 + (p0[1] - p1[1])**2)\n\n def angleBetween(self, previous_blob, next_blob):\n if isinstance( previous_blob, tuple ) and isinstance( next_blob, tuple ):\n pt0, pt1, pt2 = previous_blob, self._centroid, next_blob\n else:\n pt0, pt1, pt2 = previous_blob._centroid, self._centroid, next_blob._centroid\n dx1 = pt0[0] - pt1[0]\n dy1 = pt0[1] - pt1[1]\n dx2 = pt2[0] - pt1[0]\n dy2 = pt2[1] - pt1[1]\n nom = dx1*dx2 + dy1*dy2\n denom = math.sqrt( (dx1*dx1 + dy1*dy1) * (dx2*dx2 + dy2*dy2) + 1e-10 )\n ang = nom / denom\n return math.degrees( math.acos(ang) )\n\n\n\n\nclass OTPFindBlobs(OTPBase):\n\n _param_min_area = 0\n _param_max_area = 100000000\n \n def __init__(self, **kwargs):\n super(OTPFindBlobs, self).__init__(**kwargs)\n self._param_min_area = 0\n self._param_max_area = 100000000\n\n def compute(self, frame):\n contours, hierarchy = cv2.findContours(frame.copy(), cv2.cv.CV_RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n \n objectsFound = []\n for cnt in contours:\n m = cv2.moments(cnt); m00 = m['m00']\n \n if m00 > self._param_min_area and m00 < self._param_max_area:\n if m00!=0: centroid = ( int(round(m['m10']/m00) ), int(round(m['m01']/m00)) )\n else: centroid = (0,0)\n\n box = cv2.boundingRect(cnt)\n p1, p2 = (box[0], box[1]), (box[0]+box[2], box[1]+box[3])\n \n obj = Blob()\n obj._contour = cnt\n obj._bounding = (p1, p2)\n obj._area = m00\n obj._centroid = centroid\n objectsFound.append( obj )\n\n \n return objectsFound\n\n def process(self, frame):\n frame = super(OTPFindBlobs, self).process(frame)\n if len(frame)>2: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return OTPFindBlobs.compute(self,frame)","sub_path":"mcvgui/core/plugins_old/Blobs/OTModuleFindBlobs/OTPFindBlobs.py","file_name":"OTPFindBlobs.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32454725","text":"import pytest\nfrom ethereum.tools import tester\nfrom ethereum.tests.utils import new_db\nfrom ethereum.db import EphemDB\nfrom ethereum.hybrid_casper import casper_utils\nfrom ethereum.slogging import get_logger\nfrom ethereum.tests.hybrid_casper.testing_lang import TestLangHybrid\nlog = get_logger('test.chain')\nlogger = get_logger()\n\n_db = new_db()\n\n# from ethereum.slogging import configure_logging\n# config_string = ':info,eth.chain:debug,test.chain:info'\n# configure_logging(config_string=config_string)\n\nEPOCH_LENGTH = 25\nSLASH_DELAY = 864\nALLOC = {a: {'balance': 500*10**19} for a in tester.accounts[:10]}\nk0, k1, k2, k3, k4, k5, k6, k7, k8, k9 = tester.keys[:10]\na0, a1, a2, a3, a4, a5, a6, a7, a8, a9 = tester.accounts[:10]\n\n\n@pytest.fixture(scope='function')\ndef db():\n return EphemDB()\nalt_db = db\n\ndef init_chain_and_casper():\n genesis = casper_utils.make_casper_genesis(ALLOC, EPOCH_LENGTH, 100, 0.02, 0.002)\n t = tester.Chain(genesis=genesis)\n casper = tester.ABIContract(t, casper_utils.casper_abi, t.chain.config['CASPER_ADDRESS'])\n return t, casper\n\ndef init_multi_validator_chain_and_casper(validator_keys):\n t, casper = init_chain_and_casper()\n mine_epochs(t, 1)\n for k in validator_keys:\n casper_utils.induct_validator(t, casper, k, 200 * 10**18)\n t.mine()\n mine_epochs(t, 2)\n assert casper.get_dynasty() == 3\n return t, casper\n\n# Mines blocks required for number_of_epochs epoch changes, plus an offset of 2 blocks\ndef mine_epochs(t, number_of_epochs):\n distance_to_next_epoch = (EPOCH_LENGTH - t.head_state.block_number) % EPOCH_LENGTH\n number_of_blocks = distance_to_next_epoch + EPOCH_LENGTH*(number_of_epochs-1) + 2\n return t.mine(number_of_blocks=number_of_blocks)\n\ndef test_mining(db):\n t, casper = init_chain_and_casper()\n assert t.chain.state.block_number == 0\n assert t.chain.state.block_difficulty == 1\n for i in range(2):\n t.mine()\n assert t.chain.state.block_number == i + 1\n\ndef test_mining_block_rewards(db):\n t, casper = init_chain_and_casper()\n genesis = t.mine(coinbase=a1)\n blk2 = t.mine(coinbase=a1)\n blk3 = t.mine(coinbase=a1)\n blk4 = t.mine(coinbase=a1)\n t.mine(coinbase=a1)\n assert t.chain.state.get_balance(a1) == t.chain.env.config['BLOCK_REWARD'] + t.chain.mk_poststate_of_blockhash(blk4.hash).get_balance(a1)\n assert t.chain.state.get_balance(a1) == t.chain.env.config['BLOCK_REWARD'] * 2 + t.chain.mk_poststate_of_blockhash(blk3.hash).get_balance(a1)\n assert t.chain.state.get_balance(a1) == t.chain.env.config['BLOCK_REWARD'] * 3 + t.chain.mk_poststate_of_blockhash(blk2.hash).get_balance(a1)\n assert t.chain.state.get_balance(a1) == t.chain.env.config['BLOCK_REWARD'] * 4 + t.chain.mk_poststate_of_blockhash(genesis.hash).get_balance(a1)\n assert blk2.prevhash == genesis.hash\n\ndef test_simple_chain(db):\n t, casper = init_chain_and_casper()\n t.tx(k0, a1, 20, gasprice=0)\n blk2 = t.mine()\n blk3 = t.mine()\n assert blk2.hash in t.chain\n assert blk3.hash in t.chain\n assert t.chain.has_block(blk2.hash)\n assert t.chain.has_block(blk3.hash)\n assert t.chain.get_block(blk2.hash) == blk2\n assert t.chain.get_block(blk3.hash) == blk3\n assert t.chain.head == blk3\n assert t.chain.get_children(blk2) == [blk3]\n assert t.chain.get_chain() == [blk2, blk3]\n assert t.chain.get_block_by_number(1) == blk2\n assert t.chain.get_block_by_number(2) == blk3\n assert not t.chain.get_block_by_number(3)\n\ndef test_head_change_for_longer_pow_chain(db):\n \"\"\"\" [L & R are blocks]\n Local: L0, L1\n add\n Remote: R0, R1, R2\n \"\"\"\n t, casper = init_chain_and_casper()\n t.mine()\n root_hash = t.chain.head_hash\n L = t.mine(2)\n assert t.chain.head_hash == L.hash\n t.change_head(root_hash)\n R = t.mine(2)\n # Test that we just need one more block before the head switches\n assert t.chain.head_hash == L.hash\n R = t.mine(1)\n assert t.chain.head_hash == R.hash\n\ndef test_head_change_for_more_commits(db):\n \"\"\" This tests that the chain does not change head unless there are more commits on the alternative fork \"\"\"\n test_string = 'B J0 J1 J2 J3 B B S0 B P0 P1 P2 P3 B1 C0 C1 B1 S1 R0 B B H1 P0 P1 P2 P3 B1 C0 C1 B1 H1 C2 B1 S2 H2'\n test = TestLangHybrid(test_string, 15, 100, 0.02, 0.002)\n test.parse()\n\ndef test_double_prepare_slash(db):\n \"\"\" This tests that the chain does not change head unless there are more commits on the alternative fork \"\"\"\n test_string = 'B J0 B B S0 B P0 B1 C0 B1 R0 B P0 B1 C0 B1 X0 B J1 J2 B B P1 P2 B1 C1 C2 B P1 P2 B1 C1 C2 B1'\n test = TestLangHybrid(test_string, 15, 100, 0.02, 0.002)\n test.parse()\n\ndef test_commit_consistency_slash_with_violating_prepare(db):\n \"\"\" This tests that the chain does not change head unless there are more commits on the alternative fork \"\"\"\n test_string = 'B J0 B B S0 B P0 B1 C0 B1 R0 B B B P0 B1 C0 B1 X0 B1'\n test = TestLangHybrid(test_string, 15, 100, 0.02, 0.002)\n test.parse()\n\ndef test_commit_consistency_slash_with_violating_commit(db):\n \"\"\" This tests that the chain does not change head unless there are more commits on the alternative fork \"\"\"\n test_string = 'B J0 B B S0 B B B P0 B1 R0 B P0 B1 C0 B1 X0 B1'\n test = TestLangHybrid(test_string, 15, 100, 0.02, 0.002)\n test.parse()\n\ndef test_long_range_forks(db):\n \"\"\" This tests that the chain is efficient even when forks are very long range \"\"\"\n test_string = 'B J0 J1 J2 B B S0 B B B B B B B B P0 P1 P2 B1 C0 B1 R0 B B B B B B B B B B P0 P1 P2 B1 C0 C1 C2 B P0 P1 P2 B1 C0 C1 B1 S1 H1'\n test = TestLangHybrid(test_string, 15, 100, 0.02, 0.002)\n test.parse()\n","sub_path":"ethereum/tests/hybrid_casper/test_chain.py","file_name":"test_chain.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"417310434","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport time\nimport serialaero\n\nprint ( \"Start serial aero-logger service.\" )\n\nlog_path = os.path.dirname(os.path.abspath(__file__))\nlog_folder = log_path+'/logs_serial/'\nlog_file = log_folder+time.strftime(\"%Y%m%d_%H%M%S_SerialLog.csv\",time.localtime())\nioSerial = serialaero.SERIALaero()\n\ntry:\n if os.path.isdir( log_path ) == False:\n os.makedirs ( log_path )\n file_out = open(log_file, \"w\", 0)\n file_out.write ( 'FCC serial log\\n' )\n\n while ( 1 ):\n if ( ioSerial.get_frame() ):\n #file_out.write ( ioSerial.line+\"\\n\" )\n\n file_out.write ( ioSerial.line_1+\"\\n\" )\n file_out.write ( ioSerial.line_2+\"\\n\" )\n\n #print ( \"success.\" )\n\nexcept IOError:\n msg = (\"Unable to create file on disk.\")\n file_out.close()\nfinally:\n ioSerial.ser.close()\n print ( \"bye.\" )\n","sub_path":"serial_listen.py","file_name":"serial_listen.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35653513","text":"from django.conf.urls.defaults import patterns, url\nfrom eisula.proyecto.views import (ThesisArchiveIndexView,\n ThesisYearArchiveView,\n ThesisMonthArchiveView, ThesisDayArchiveView,\n ThesisDateDetailView, CourseView,\n CourseDetailView, ProfessorView,\n ProfessorDetailView, AuthorView,\n AuthorDetailView, LibraryView,\n LibraryDetailView)\n\nurlpatterns = patterns('',\n url(r'^$', ThesisArchiveIndexView.as_view(), name='eisula_index'),\n url(r'^proyectos/(?P\\d{4})/$', ThesisYearArchiveView.as_view(),\n name='thesis_by_year'),\n url(r'^proyectos/(?P\\d{4})/(?P\\w{3})/$',\n ThesisMonthArchiveView.as_view(), name='thesis_by_month'),\n url(r'^proyectos/(?P\\d{4})/(?P\\w{3})/(?P\\d{2})/$',\n ThesisDayArchiveView.as_view(), name='thesis_by_day'),\n url(r'^proyectos/(?P\\d{4})/(?P\\w{3})/(?P\\d{2})/(?P[-\\w]+)/$',\n ThesisDateDetailView.as_view(), name='thesis_detail'),\n url(r'^proyectos/$', ThesisArchiveIndexView.as_view(), name='thesis_index'),\n url(r'^materias/(?P[-\\w]+)/$', CourseDetailView.as_view(),\n name='course_detail'),\n url(r'^materias/$', CourseView.as_view(), name='course_index'),\n url(r'^profesores/(?P[-\\w]+)/(?P\\d+)/$', ProfessorDetailView.as_view(),\n name='professor_detail'),\n url(r'^profesores/$', ProfessorView.as_view(), name='professor_index'),\n url(r'^autores/(?P[-\\w]+)/(?P\\d+)/$', AuthorDetailView.as_view(),\n name='author_detail'),\n url(r'^autores/$', AuthorView.as_view(), name='author_index'),\n url(r'^bibliotecas/(?P[-\\w]+)/$', LibraryDetailView.as_view(),\n name='library_detail'),\n url(r'^bibliotecas/$', LibraryView.as_view(), name='library_index'),\n)\n","sub_path":"eisula/proyecto/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"408572852","text":"from sklearn.preprocessing import LabelEncoder\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport json\nfrom pandas.io.json import json_normalize\nfrom datetime import datetime\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import train_test_split\nimport lightgbm as lgb\n\nimport os\n\n# display all columns when viewing obj_df.head()\npd.set_option('display.max_columns', None)\n\n# load in test & train data\ntrain_df = pd.read_csv('C:/Users/Jeffrey/Documents/home_projects/houses_all/train.csv')\ntest_df = pd.read_csv('C:/Users/Jeffrey/Documents/home_projects/houses_all/test.csv')\n\n# set target variable from train_df\ntarget = train_df['SalePrice']\n\n# verify columns in train and test sets are the same\ntrain_cols = train_df.columns\ntest_cols = test_df.columns\ndiff_cols = []\nfor col in train_cols:\n for col_test in test_cols:\n if col_test != col:\n if col_test == test_cols[-1]:\n diff_cols.append(col)\n break\n continue\n else:\n break\nprint('Different columns: ', diff_cols)\n\n# Create a merged DF containing all features from both test and train, appart from SalePrice.\nmerged_df = pd.concat([train_df.loc[:, train_df.columns != 'SalePrice'], test_df])\nmerged_division = len(train_df)\n\n# Divide data in to categorical and quantitative columns ** ACTUALLY COLUMNS ONLY DIVIDED IN TO NUMERICAL AND NON NUMERICAL **\ncat_features = []\nquant_features = []\nfor col in merged_df:\n if merged_df[col].dtype == object:\n cat_features.append(col)\n # print(col, ': ', train_dfbis[col].unique())\n elif merged_df[col].dtype == bool:\n merged_df.loc[:, col] = merged_df[col].astype(np.int64)\n quant_features.append(col)\n else:\n quant_features.append(col)\n\n# Format quant_features\nmerged_df[quant_features] = merged_df[quant_features].fillna(0)\n\n# label encode non numerical data\nlb_make = LabelEncoder()\nohe_cols = []\nfor col in cat_features:\n if col == 'fullVisitorId':\n continue\n if merged_df[col].nunique() < 50:\n ohe_cols.append(col)\n continue\n merged_df[col] = lb_make.fit_transform(merged_df[col].astype(str))\nmerged_df = pd.get_dummies(merged_df, columns=ohe_cols)\n\n# Reconstruct train and test data frames from modified merged data set\ntrain_df_mod = merged_df[:merged_division]\ntest_df_mod = merged_df[merged_division:]\n\n# validate performance on training set only\ny = target # supposing that column indexing has not been modified\nX = train_df_mod\n\n# Use train test split\ntrain_X, val_X, train_y, val_y = train_test_split(X, y, test_size=0.3, random_state=1)\n\n# Random forrest model\nrf_model = RandomForestRegressor(random_state=1)\nrf_model.fit(train_X, train_y)\nrf_val_predictions = rf_model.predict(val_X)\nrf_val_mae = mean_absolute_error(val_y, rf_val_predictions)\nprint(\"Validation MAE for Random Forest Model: \", rf_val_mae)\n\nmodel_lgb = lgb.LGBMRegressor(objective='regression', num_leaves=5, learning_rate=0.05, n_estimators=720, max_bin=55, bagging_fraction=0.8, bagging_freq=5,\n feature_fraction=0.2319, feature_fraction_seed=9, bagging_seed=9, min_data_in_leaf=6, min_sum_hessian_in_leaf=11)\n\n\n# Predictions on test set\nrf_model.fit(X, y)\nX_new = test_df_mod\nrf_val_predictions = rf_model.predict(X_new)\nsale_price_predict = pd.Series(rf_val_predictions, name='SalePrice', index=X_new.index)\nid = X_new['Id']\nsubmission_df = pd.concat([id, sale_price_predict], axis=1)\nsubmission_df.head()\n\nsubmission_df.to_csv('submission.csv', index=False)\n","sub_path":"housing_prices.py","file_name":"housing_prices.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594622800","text":"import datetime\nfrom unittest import TestCase\n\nfrom . import CSVTstMixin as _CSVTstMixin\nfrom iatilib.test import factories as fac\n\nfrom iatilib.frontend import serialize\nfrom iatilib import codelists as cl\n\n\nclass CSVTstMixin(_CSVTstMixin):\n def serialize(self, data):\n return serialize.transaction_csv(data)\n\n\ndef example():\n activity = fac.ActivityFactory.build(\n iati_identifier=\"GB-1-123\",\n title=\"Project 123\",\n description=\"Desc project 123\",\n recipient_country_percentages=[\n fac.CountryPercentageFactory.build(\n country=cl.Country.kenya,\n percentage=80,\n ),\n fac.CountryPercentageFactory.build(\n country=cl.Country.uganda,\n percentage=20,\n )\n ],\n sector_percentages=[\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.teacher_training,\n percentage=60\n ),\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.primary_education,\n percentage=40\n ),\n\n ]\n )\n\n transactions = [\n fac.TransactionFactory.build(\n type=cl.TransactionType.disbursement,\n date=datetime.datetime(2012, 6, 30),\n value_amount=10000,\n ),\n fac.TransactionFactory.build(\n type=cl.TransactionType.disbursement,\n date=datetime.datetime(2012, 9, 30),\n value_amount=90000,\n ),\n fac.TransactionFactory.build(\n type=cl.TransactionType.disbursement,\n date=datetime.datetime(2012, 1, 31),\n value_amount=30000,\n ),\n ]\n for trans in transactions:\n trans.activity = activity\n activity.transactions = transactions\n return activity\n\n\nclass TestCSVTransactionExample(TestCase, CSVTstMixin):\n # See example here: https://docs.google.com/a/okfn.org/spreadsheet/ccc?key=0AqR8dXc6Ji4JdHJIWDJtaXhBV0IwOG56N0p1TE04V2c&usp=sharing#gid=5\n def test_transaction_type(self):\n data = self.process([\n fac.TransactionFactory.build(type=cl.TransactionType.disbursement)\n ])\n self.assertField({\"transaction-type\": \"D\"}, data[0])\n\n def test_transaction_type2(self):\n data = self.process([\n fac.TransactionFactory.build(type=cl.TransactionType.commitment)\n ])\n self.assertField({\"transaction-type\": \"C\"}, data[0])\n\n def test_transaction_date(self):\n data = self.process([\n fac.TransactionFactory.build(date=datetime.date(2012, 6, 30))\n ])\n self.assertField({\"transaction-date\": \"2012-06-30\"}, data[0])\n\n def test_default_currency(self):\n data = self.process([\n fac.TransactionFactory.build(\n type=cl.TransactionType.disbursement,\n activity__default_currency=cl.Currency.us_dollar)\n ])\n self.assertField({\"default-currency\": \"USD\"}, data[0])\n\n def test_currency(self):\n # I'm assuming they want the actual currency\n data = self.process([\n fac.TransactionFactory.build(\n type=cl.TransactionType.disbursement,\n value_currency=cl.Currency.australian_dollar)\n ])\n self.assertField({\"default-currency\": \"AUD\"}, data[0])\n\n def test_transaction_value(self):\n data = self.process([\n fac.TransactionFactory.build(value_amount=1000)\n ])\n self.assertField({\"transaction-value\": \"1000\"}, data[0])\n\n def test_iati_id(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity__iati_identifier=\"GB-1-123\")\n ])\n self.assertField({\"iati-identifier\": \"GB-1-123\"}, data[0])\n\n def test_title(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity__title=\"test title\")\n ])\n self.assertField({\"title\": \"test title\"}, data[0])\n\n def test_description(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity__description=\"test desc\")\n ])\n self.assertField({\"description\": \"test desc\"}, data[0])\n\n\n def test_recipient_country_code(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n recipient_country_percentages=[\n fac.CountryPercentageFactory.build(\n country=cl.Country.zambia)\n ])\n )\n ])\n self.assertField({\"recipient-country-code\": \"ZM\"}, data[0])\n\n def test_recipient_country(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n recipient_country_percentages=[\n fac.CountryPercentageFactory.build(\n country=cl.Country.zambia),\n fac.CountryPercentageFactory.build(\n country=cl.Country.australia)\n ])\n )\n ])\n self.assertField({\"recipient-country\": \"Zambia;Australia\"}, data[0])\n\n def test_recipient_country_percentage(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n recipient_country_percentages=[\n fac.CountryPercentageFactory.build(\n country=cl.Country.zambia,\n percentage=20),\n fac.CountryPercentageFactory.build(\n country=cl.Country.australia,\n percentage=80)\n ])\n )\n ])\n self.assertField({\"recipient-country-percentage\": \"20;80\"}, data[0])\n\n def test_recipient_country_percentage_blank(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n recipient_country_percentages=[\n fac.CountryPercentageFactory.build(\n country=cl.Country.zambia)\n ])\n )\n ])\n self.assertField({\"recipient-country-percentage\": \"\"}, data[0])\n\n def test_sector_code(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n sector_percentages=[\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.teacher_training),\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.primary_education)\n ])\n )\n ])\n self.assertField({\"sector-code\": \"11130;11220\"}, data[0])\n\n def test_sector(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n sector_percentages=[\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.teacher_training),\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.primary_education)\n ])\n )\n ])\n self.assertField(\n {\"sector\": \"Teacher training;Primary education\"},\n data[0])\n\n def test_sector_blank(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n sector_percentages=[\n fac.SectorPercentageFactory.build(sector=None),\n ])\n )\n ])\n self.assertField({\"sector\": \"\"}, data[0])\n\n def test_sector_percentage(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n sector_percentages=[\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.teacher_training,\n percentage=60),\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.primary_education,\n percentage=40)\n ])\n )\n ])\n self.assertField({\"sector-percentage\": \"60;40\"}, data[0])\n\n def test_sector_percentage_blank(self):\n data = self.process([\n fac.TransactionFactory.build(\n activity=fac.ActivityFactory.build(\n sector_percentages=[\n fac.SectorPercentageFactory.build(\n sector=cl.Sector.teacher_training,\n percentage=None),\n ])\n )\n ])\n self.assertField({\"sector-percentage\": \"\"}, data[0])\n\n\nclass TestTransactionByCountry(TestCase, CSVTstMixin):\n def serialize(self, data):\n return serialize.csv_transaction_by_country(data)\n\n def example(self):\n ret = []\n act = example()\n for transaction in act.transactions:\n for country in act.recipient_country_percentages:\n ret.append((transaction, country))\n return ret\n\n def test_rec_country_code_0(self):\n data = self.process(self.example())\n self.assertField({\"recipient-country-code\": \"KE\"}, data[0])\n\n def test_rec_country_code_1(self):\n data = self.process(self.example())\n self.assertField({\"recipient-country-code\": \"UG\"}, data[1])\n\n def test_trans_date_0(self):\n data = self.process(self.example())\n self.assertField({\"transaction-date\": \"2012-06-30\"}, data[1])\n\n def test_trans_date_2(self):\n data = self.process(self.example())\n self.assertField({\"transaction-date\": \"2012-09-30\"}, data[2])\n\n def test_identifier(self):\n data = self.process(self.example())\n self.assertField({\"iati-identifier\": \"GB-1-123\"}, data[2])\n\n\nclass TestTransactionBySector(TestCase, CSVTstMixin):\n def serialize(self, data):\n return serialize.csv_transaction_by_sector(data)\n\n def example(self):\n ret = []\n act = example()\n for transaction in act.transactions:\n for sector in act.sector_percentages:\n ret.append((transaction, sector))\n return ret\n\n def test_sector_code_0(self):\n data = self.process(self.example())\n self.assertField({\"sector-code\": \"11130\"}, data[0])\n\n def test_sector_code_1(self):\n data = self.process(self.example())\n self.assertField({\"sector-code\": \"11220\"}, data[1])\n\n def test_trans_date_0(self):\n data = self.process(self.example())\n self.assertField({\"transaction-date\": \"2012-06-30\"}, data[1])\n\n def test_trans_date_2(self):\n data = self.process(self.example())\n self.assertField({\"transaction-date\": \"2012-09-30\"}, data[2])\n\n def test_identifier(self):\n data = self.process(self.example())\n self.assertField({\"iati-identifier\": \"GB-1-123\"}, data[2])\n\n def test_recepient_country_code(self):\n data = self.process(self.example())\n self.assertField({\"recipient-country-code\": \"KE;UG\"}, data[0])\n\n","sub_path":"iati_datastore/iatilib/test/test_serializers/test_csv_transactions.py","file_name":"test_csv_transactions.py","file_ext":"py","file_size_in_byte":11431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575139394","text":"from .decagon.utility import preprocessing\nfrom ...Utils.Config import Config\nfrom ...Dtos.AdjacencyMatrices import AdjacencyMatrices\nfrom ...Dtos.NodeFeatures import NodeFeatures\nfrom ...Dtos.DataSet import DataSet\nfrom ...Dtos.TypeShortcuts import PlaceholdersDict\nfrom collections import defaultdict\nfrom tensorflow.python.platform import flags as tfFlags\nfrom typing import Dict, List, Type, Iterable, Tuple\nimport tensorflow as tf\nimport scipy.sparse as sp\nimport numpy as np\n\nInteractionSubGraphType = int\nEdgeType = tuple\nStrDecoderSpecifier = str\n\nEdgeTypeMatrixDimensionsDict = Dict[EdgeType, List[tuple]]\nEdgeTypeAdjacencyMatrixDict = Dict[EdgeType, List[sp.coo_matrix]]\nEdgeTypeDecoderDict = Dict[EdgeType, StrDecoderSpecifier]\nEdgeTypeNumMatricesDict = Dict[EdgeType, int]\nFeaturesTuple = Tuple[np.ndarray, sp.coo_matrix, Tuple[int, int]]\nFeaturesDict = Dict[InteractionSubGraphType, FeaturesTuple]\nDegreesDict = Dict[InteractionSubGraphType, List[int]]\nPlaceholdersDict = Dict[str, tf.placeholder]\nFlags = tfFlags._FlagValuesWrapper\n\nclass DecagonDataSet:\n PPI_GRAPH_IDX = 0\n DRUG_DRUG_GRAPH_IDX = 1\n\n PPI_GRAPH_EDGE_TYPE = (PPI_GRAPH_IDX, PPI_GRAPH_IDX)\n DRUG_DRUG_EDGE_TYPE = (DRUG_DRUG_GRAPH_IDX, DRUG_DRUG_GRAPH_IDX)\n PPI_TO_DRUG_EDGE_TYPE = (PPI_GRAPH_IDX, DRUG_DRUG_GRAPH_IDX)\n DRUG_TO_PPI_EDGE_TYPE = (DRUG_DRUG_GRAPH_IDX, PPI_GRAPH_IDX)\n\n HaveDefinedFlags = False\n\n '''\n This class contains all the necessary information to correctly\n instantiate a decagon batch iterator, optimizer, and model. It\n also contains methods to build a DecagonDataSet from a base\n DataSet object.\n '''\n def __init__(\n self,\n adjacencyMatrixDict: EdgeTypeAdjacencyMatrixDict,\n edgeTypeMatrixDimDict: EdgeTypeMatrixDimensionsDict,\n edgeTypeNumMatricesDict: EdgeTypeNumMatricesDict,\n featuresDict: FeaturesDict,\n degreesDict: DegreesDict,\n config: Config\n ) -> None:\n self.adjacencyMatrixDict: EdgeTypeAdjacencyMatrixDict = adjacencyMatrixDict\n self.edgeTypeMatrixDimDict: EdgeTypeMatrixDimensionsDict = edgeTypeMatrixDimDict\n self.edgeTypeNumMatricesDict: EdgeTypeNumMatricesDict = edgeTypeNumMatricesDict\n self.featuresDict: FeaturesDict = featuresDict\n self.degreesDict: DegreesDict = degreesDict\n\n self.edgeTypeDecoderDict: EdgeTypeDecoderDict = self._getEdgeTypeDecoderDict(config)\n self.flags: Flags = self._getFlags(config)\n self.placeholdersDict: PlaceholdersDict = self._getPlaceholdersDict(\n edgeTypeNumMatricesDict\n )\n\n def _getEdgeTypeDecoderDict(self, config: Config) -> EdgeTypeDecoderDict:\n validDecoders = set(['innerproduct', 'distmult', 'bilinear', 'dedicom'])\n\n result: EdgeTypeDecoderDict = {}\n\n result[DecagonDataSet.PPI_GRAPH_EDGE_TYPE] = \\\n config.getSetting('PPIEdgeDecoder')\n result[DecagonDataSet.PPI_TO_DRUG_EDGE_TYPE] = \\\n config.getSetting('ProteinToDrugEdgeDecoder')\n result[DecagonDataSet.DRUG_DRUG_EDGE_TYPE] = \\\n config.getSetting('DrugDrugEdgeDecoder')\n\n if DecagonDataSet._shouldTranspose(config):\n result[DecagonDataSet.DRUG_TO_PPI_EDGE_TYPE] = \\\n config.getSetting('DrugProteinEdgeDecoder')\n\n return result\n\n def _getPlaceholdersDict(\n self,\n edgeTypeNumMatricesDict: EdgeTypeNumMatricesDict\n ) -> PlaceholdersDict:\n result: PlaceholdersDict = {}\n\n result['batch'] = tf.placeholder(tf.int32, name='batch')\n result['degrees'] = tf.placeholder(tf.int32)\n result['dropout'] = tf.placeholder_with_default(0., shape=())\n\n result['batch_edge_type_idx'] = tf.placeholder(\n tf.int32,\n shape=(),\n name='batch_edge_type_idx'\n )\n\n result['batch_row_edge_type'] = tf.placeholder(\n tf.int32,\n shape=(),\n name='batch_row_edge_type'\n )\n\n result['batch_col_edge_type'] = tf.placeholder(\n tf.int32,\n shape=(),\n name='batch_col_edge_type'\n )\n\n for edgeType, numMtxsForEdgeType in edgeTypeNumMatricesDict.items():\n for i in range(numMtxsForEdgeType):\n key = 'adj_mats_%d,%d,%d' % (edgeType[0], edgeType[1], i)\n result[key] = tf.sparse_placeholder(tf.float32)\n\n for x in [DecagonDataSet.PPI_GRAPH_IDX, DecagonDataSet.DRUG_DRUG_GRAPH_IDX]:\n result['feat_%d' % x] = tf.sparse_placeholder(tf.float32)\n\n return result\n\n def _getFlags(self, config: Config) -> Flags:\n flags = tf.app.flags\n\n def defVal(key: str, desc: str, typeToDef: type) -> None:\n defFxn = None\n if typeToDef == int:\n defFxn = flags.DEFINE_integer\n elif typeToDef == float:\n defFxn = flags.DEFINE_float\n elif typeToDef == bool:\n defFxn = flags.DEFINE_boolean\n elif typeToDef == str:\n defFxn = flags.DEFINE_string\n else:\n raise TypeError('Invalid type')\n\n defFxn(key, typeToDef(config.getSetting(key)), desc)\n\n if not DecagonDataSet.HaveDefinedFlags:\n defVal('neg_sample_size', 'Negative sample size.', float)\n defVal('learning_rate', 'Initial learning rate.', float)\n defVal('epochs', 'Number of epochs to train.', int)\n defVal('hidden1', 'Number of units in hidden layer 1.', int)\n defVal('hidden2', 'Number of units in hidden layer 2.', int)\n defVal('weight_decay', 'Weight for L2 loss on embedding matrix.', float)\n defVal('dropout', 'Dropout rate (1 - keep probability).', float)\n defVal('max_margin', 'Max margin parameter in hinge loss', float)\n defVal('batch_size', 'Minibatch size.', int)\n defVal('bias', 'Bias term.', bool)\n\n # For compatibility with ray\n flags.DEFINE_string('node-ip-address', '', 'RayCompat')\n flags.DEFINE_string('node-manager-port', '', 'RayCompat')\n flags.DEFINE_string('object-store-name', '', 'RayCompat')\n flags.DEFINE_string('raylet-name', '', 'RayCompat')\n flags.DEFINE_string('redis-address', '', 'RayCompat')\n flags.DEFINE_string('config-list', '', 'RayCompat')\n flags.DEFINE_string('temp-dir', '', 'RayCompat')\n flags.DEFINE_string('redis-password', '', 'RayCompat')\n flags.DEFINE_string('use-pickle', '', 'RayCompat')\n\n DecagonDataSet.HaveDefinedFlags = True\n\n return flags.FLAGS\n\n @staticmethod\n def fromDataSet(dataSet: DataSet, config: Config) -> Type['DecagonDataSet']:\n adjMtxDict = DecagonDataSet._getAdjMtxDict(\n dataSet.adjacencyMatrices,\n config\n )\n\n featuresDict = DecagonDataSet._getFeaturesDict(dataSet.nodeFeatures)\n\n edgeTypeMatrixDimDict = DecagonDataSet._getEdgeTypeMtxDimDict(adjMtxDict)\n edgeTypeNumMatricesDict = DecagonDataSet._getEdgeTypeNumMatricesDict(adjMtxDict)\n degreesDict = DecagonDataSet._getDegreesDict(adjMtxDict)\n\n return DecagonDataSet(\n adjMtxDict,\n edgeTypeMatrixDimDict,\n edgeTypeNumMatricesDict,\n featuresDict,\n degreesDict,\n config,\n )\n\n @staticmethod\n def _getAdjMtxDict(\n adjMtxs: AdjacencyMatrices,\n config: Config\n ) -> EdgeTypeAdjacencyMatrixDict:\n result: EdgeTypeAdjacencyMatrixDict = defaultdict(list)\n\n result[DecagonDataSet.PPI_GRAPH_EDGE_TYPE] = [adjMtxs.proteinProteinRelationMtx]\n result[DecagonDataSet.PPI_TO_DRUG_EDGE_TYPE] = [adjMtxs.drugProteinRelationMtx]\n result[DecagonDataSet.DRUG_DRUG_EDGE_TYPE] = list(adjMtxs.drugDrugRelationMtxs.values())\n\n # Decagon's original code uses transposed matrices to train as well\n # as original matrices. Here we provide the option to do so too.\n if DecagonDataSet._shouldTranspose(config):\n DecagonDataSet._augmentAdjMtxDictWithTranspose(result)\n\n return result\n\n @staticmethod\n def _shouldTranspose(config: Config) -> bool:\n strVal = config.getSetting('TrainWithTransposedAdjacencyMatrices')\n return bool(strVal)\n\n @staticmethod\n def _augmentAdjMtxDictWithTranspose(\n adjMtxDict: EdgeTypeAdjacencyMatrixDict\n ) -> None:\n tmp: EdgeTypeAdjacencyMatrixDict = {}\n for edgeType, mtxs in adjMtxDict.items():\n mtxs = DecagonDataSet._extractMtxs(adjMtxDict[edgeType])\n tMtxs = [mtx.transpose(copy=True, setId=True) for mtx in mtxs]\n\n if edgeType == DecagonDataSet.PPI_TO_DRUG_EDGE_TYPE:\n tmp[edgeType] = mtxs\n tmp[DecagonDataSet.DRUG_TO_PPI_EDGE_TYPE] = tMtxs\n\n else:\n tmp[edgeType] = mtxs + tMtxs\n\n for edgeType, mtxs in tmp.items():\n adjMtxDict[edgeType] = mtxs\n\n return\n\n # Do not type annotate the argument as it is either a dict or list\n @staticmethod\n def _extractMtxs(mtxContainer) -> List[sp.coo_matrix]:\n if isinstance(mtxContainer, list):\n return mtxContainer\n elif isinstance(mtxContainer, dict):\n return list(mtxContainer.values())\n else:\n raise TypeError('mtxContainer must be of type list or dict')\n\n @staticmethod\n def _getFeaturesDict(nodeFeatures: NodeFeatures) -> FeaturesDict:\n processedProteinFeatures = preprocessing.sparse_to_tuple(\n nodeFeatures.proteinNodeFeatures\n )\n\n processedDrugFeatures = preprocessing.sparse_to_tuple(\n nodeFeatures.drugNodeFeatures\n )\n\n return {\n DecagonDataSet.PPI_GRAPH_IDX: processedProteinFeatures,\n DecagonDataSet.DRUG_DRUG_GRAPH_IDX: processedDrugFeatures,\n }\n\n @staticmethod\n def _getEdgeTypeMtxDimDict(\n adjMtxDict: EdgeTypeAdjacencyMatrixDict\n ) -> EdgeTypeMatrixDimensionsDict:\n return {\n edgeType: [\n mtx.shape\n for mtx in DecagonDataSet._extractMtxs(adjMtxDict[edgeType])\n ]\n for edgeType, mtxs in adjMtxDict.items()\n }\n\n @staticmethod\n def _getEdgeTypeNumMatricesDict(\n adjMtxDict: EdgeTypeAdjacencyMatrixDict\n ) -> EdgeTypeNumMatricesDict:\n return { edgeType: len(mtxs) for edgeType, mtxs in adjMtxDict.items() }\n\n @staticmethod\n def _getDegreesDict(\n adjMtxDict: EdgeTypeAdjacencyMatrixDict\n ) -> DegreesDict:\n def getDegrees(mtx: sp.coo_matrix) -> int:\n return np.array(mtx.sum(axis=0)).squeeze()\n\n def getDegreesList(mtxs: Iterable[sp.coo_matrix]) -> List[int]:\n return [getDegrees(mtx) for mtx in mtxs]\n\n ppiMtxs = adjMtxDict[DecagonDataSet.PPI_GRAPH_EDGE_TYPE]\n drugDrugMtxsDict = adjMtxDict[DecagonDataSet.DRUG_DRUG_EDGE_TYPE]\n\n return {\n DecagonDataSet.PPI_GRAPH_IDX: getDegreesList(ppiMtxs),\n DecagonDataSet.DRUG_DRUG_GRAPH_IDX: getDegreesList(drugDrugMtxsDict),\n }\n\n","sub_path":"main/Trainable/Decagon/DecagonDataSet.py","file_name":"DecagonDataSet.py","file_ext":"py","file_size_in_byte":11296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394417391","text":"from Module.CTC_Reconfiguration import CTC\nfrom Loader.IEMOCAP_Transaction_Loader import CTC_Loader\nfrom Loader.IEMOCAP_CTC_Loader import IEMOCAP_CTC_Loader\nfrom time import strftime\nimport os\n\nif __name__ == '__main__':\n savepath = 'D:\\\\ProjectData\\\\Results-Labels\\\\Bands30-Again\\\\'\n os.makedirs(savepath)\n train_inputs, train_targets, train_seq_len = IEMOCAP_CTC_Loader(\n datafold='D:\\\\ProjectData\\\\IEMOCAP-Normalized\\\\Bands30\\\\',\n labelfold='D:\\\\ProjectData\\\\IEMOCAP-Label-Words\\\\')\n print(len(train_inputs))\n # exit()\n classifier = CTC(trainData=train_inputs, trainLabel=train_targets, trainSeqLength=train_seq_len,\n featureShape=30, numClass=6, learningRate=0.5e-5, rnnLayers=1, hiddenNodules=1024)\n print(classifier.information)\n for episode in range(1000):\n name = str(episode)\n while len(name) < 5:\n name = '0' + name\n\n print('Episode', episode, ':', classifier.Train(), strftime(\"%Y/%m/%d %H:%M:%S\"))\n classifier.PredictOutput(testData=train_inputs, testSequenceLength=train_seq_len,\n filename=savepath + name + '.txt')\n if episode != 0 and episode % 10 == 0:\n classifier.Save(savepath + name)\n","sub_path":"Train/StartTest.py","file_name":"StartTest.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"612115011","text":"from all_hands_to_scores import score_to_pc\nfrom dataframe_builder import build_dataframe\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport random\n\nrandom.seed(0)\n_, _, df_scores = build_dataframe(\n nb_hands=1000,\n nb_players=5\n)\n\ndf_scores.columns = df_scores.columns.get_level_values(1)\n\ndf_opponents = pd.concat(\n [\n df_scores[[0]],\n df_scores[[1]].min(axis=1),\n df_scores[[1, 2]].min(axis=1),\n df_scores[[1, 2, 3]].min(axis=1),\n df_scores[[1, 2, 3, 4]].min(axis=1)\n ],\n axis = 1,\n keys = range(5)\n)\ndf_opponents.columns = df_opponents.columns.get_level_values(0)\n\n\nif __name__ == '__main__':\n ax = pd.scatter_matrix(df_scores, s=2)\n for i, row in enumerate(ax):\n for j, item in enumerate(row):\n if i != j:\n item.set_xlim(0, 8000)\n item.set_ylim(0, 8000)\n plt.savefig('scatter_matrix.png')\n\n ax = pd.scatter_matrix(df_opponents, s=2)\n for i, row in enumerate(ax):\n for j, item in enumerate(row):\n if i != j:\n item.set_xlim(0, 8000)\n item.set_ylim(0, 8000)\n plt.savefig('scatter_matrix_opponents.png')\n\n df_pc = df_scores.applymap(score_to_pc)\n ax = pd.scatter_matrix(df_pc, s=2)\n for i, row in enumerate(ax):\n for j, item in enumerate(row):\n if i != j:\n item.set_xlim(0, 1.0)\n item.set_ylim(0, 1.0)\n plt.savefig('scatter_matrix_of_pc.png')\n\n df_pc = df_scores.applymap(score_to_pc)\n ax = pd.scatter_matrix(df_pc, s=2)\n for i, row in enumerate(ax):\n for j, item in enumerate(row):\n if i != j:\n item.set_xlim(0, 0.15)\n item.set_ylim(0, 0.15)\n plt.savefig('scatter_matrix_of_pc_zoom.png')\n","sub_path":"scatter_matrix.py","file_name":"scatter_matrix.py","file_ext":"py","file_size_in_byte":1825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"144122121","text":"import cv2\nimport numpy as np\n\nfrom pipeline import Processor\n\n\ndef draw_fitted_lanes_warped(image, l_func, r_func, search_margin, left_color=(0, 255, 0), right_color=(0, 255, 0)):\n \"\"\"\n Draws fitted lanes with search margin, overlayed on a warped image.\n Returns a new image.\n \"\"\"\n out_img = np.dstack((image, image, image)) * 255\n window_img = np.zeros_like(out_img)\n\n ploty = np.linspace(0, image.shape[0] - 1, image.shape[0])\n\n if l_func.loaded:\n left_line_pts = get_lane_search_points(l_func, ploty, search_margin)\n cv2.fillPoly(window_img, np.int_([left_line_pts]), left_color)\n\n if r_func.loaded:\n right_line_pts = get_lane_search_points(r_func, ploty, search_margin)\n cv2.fillPoly(window_img, np.int_([right_line_pts]), right_color)\n\n result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)\n return result\n\n\ndef get_lane_search_points(func, ploty, search_margin):\n \"\"\"\n Returns points to display fitted lane search margin.\n \"\"\"\n fitx = func.apply(ploty)\n line_window1 = np.array([np.transpose(np.vstack([fitx - search_margin, ploty]))])\n line_window2 = np.array([np.flipud(np.transpose(np.vstack([fitx + search_margin, ploty])))])\n pts = np.hstack((line_window1, line_window2))\n return pts\n\n\nclass DisplayLaneSearchFittedUnwarped(Processor):\n \"\"\"\n This really displays the final image. It takes input image of the pipeline (undistorted) as a side channel\n in `image_source`, and then displays `items`, which are lane functions and lane parameters, on the image.\n \"\"\"\n def __init__(self, image_source, src, dst):\n super().__init__()\n self._image_source = image_source\n self._minv = cv2.getPerspectiveTransform(dst, src)\n\n def apply(self, items):\n l_func, r_func, curv, car_shift_m = items\n\n image = self._image_source.output\n ploty = np.linspace(0, image.shape[0] - 1, image.shape[0])\n\n # Draw lane line on a warped image, then unwarp it and overlay on the input image.\n warp = np.zeros_like(image).astype(np.uint8)\n\n if not l_func.loaded or not r_func.loaded:\n error = 'No lane found'\n else:\n l_fitx = l_func.apply(ploty)\n r_fitx = r_func.apply(ploty)\n pts_left = np.array([np.transpose(np.vstack([l_fitx, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([r_fitx, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n cv2.fillPoly(warp, np.int_([pts]), (0, 255, 0))\n error = ''\n\n unwarped = cv2.warpPerspective(warp, self._minv, (image.shape[1], image.shape[0]))\n result = cv2.addWeighted(image.copy(), 1, unwarped, 0.3, 0)\n\n # Display measurements.\n cv2.rectangle(result, (0, 0), (image.shape[1], 120), (0, 0, 0), -1)\n\n if not error:\n text = f'Curvative radius: {curv:.1f}m'\n cv2.putText(result, text, (10, 55), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n direction = 'left' if car_shift_m < 0 else 'right'\n text = f'Shift from center: {abs(car_shift_m):.1f}m (to the {direction})'\n cv2.putText(result, text, (10, 90), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n else:\n cv2.putText(result, error, (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2, cv2.LINE_AA)\n\n return result\n\n def dump_input_frame(self, items):\n return self._image_source.output\n\n def dump_output_frame(self, items):\n return self.output\n\n\nclass DisplayLaneSearchFitted(Processor):\n def __init__(self, image_source_warped, search_margin):\n super().__init__()\n self._image_source = image_source_warped\n self._search_margin = search_margin\n\n def apply(self, items):\n l_func, r_func, curv, car_shift_m = items\n image = self._image_source.output\n return draw_fitted_lanes_warped(image, l_func[0], r_func[1], self._search_margin)\n\n def dump_input_frame(self, centroids):\n image = self._image_source.output\n return image\n\n def dump_output_frame(self, fits):\n return self.apply(fits)\n","sub_path":"lanes_display.py","file_name":"lanes_display.py","file_ext":"py","file_size_in_byte":4186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384127616","text":"from practice import *\nfrom sklearn import metrics\nimport scikitplot as skplt\nif __name__ == '__main__':\n\n val_whole_y = []\n val_whole_scores = []\n PATH = [\n '/home/hyunwoo/PycharmProjects/pytorch/Apr17_09-31-06_ubuntu/1fold/epoch40 valAcc0.9530864197530864.tar',\n '/home/hyunwoo/PycharmProjects/pytorch/Apr17_09-31-06_ubuntu/2fold/epoch40 valAcc0.9645799011532126.tar',\n '/home/hyunwoo/PycharmProjects/pytorch/Apr17_09-31-06_ubuntu/3fold/epoch40 valAcc0.9522240527182867.tar'\n ]\n for i in [0,1,2]:\n #Dataloader_train = data_loaders[0][0]\n Dataloader_val = data_loaders[i][1]\n #\n\n net = DenseNet()\n net = nn.DataParallel(net)\n net.to(device)\n\n\n PATH_ = PATH[i]\n checkpoint = torch.load(PATH_)\n\n net.load_state_dict(checkpoint['model_state_dict'])\n\n\n confusion_matrixx = torch.zeros(2, 2)\n val_acc_one_epoch = 0\n\n\n\n for i, (images, targets) in enumerate(Dataloader_val):\n net.eval()\n\n val_whole_y.append(targets.numpy())\n #print('val_while_y is :',val_whole_y)\n images = images.type('torch.FloatTensor')\n targets = torch.tensor(targets)\n images, targets = images.to(device), targets.to(device)\n\n\n\n with torch.set_grad_enabled(False):\n scores = net(images)\n #print('scores : ',scores)\n k = scores.cpu().numpy()\n #print('k : ', k)\n k = k[[0,1,2,3,4,5],[1,1,1,1,1,1]]\n #print('after k :',k)\n val_whole_scores.append(k)\n #print('val_whole+scores :',val_whole_scores)\n predicts = torch.argmax(scores, 1)\n print('predicts :', predicts)\n print('targets :', targets)\n true1_false0 = predicts == targets\n val_acc_one_epoch += true1_false0.sum().item()\n\n print('val mode | epoch : [%d/40] ' % (checkpoint['epoch'] ))\n\n # confusion_matrix\n for t, p in zip(targets.view(-1), predicts.view(-1)):\n confusion_matrixx[t.long(), p.long()] += 1\n\n val_acc_one_epoch /= len(Dataloader_val.dataset)\n print('val_acc_one_epoch', val_acc_one_epoch)\n print('confusion_matrix :', confusion_matrixx)\n\n val_whole_y = np.asarray(val_whole_y).flatten()\n val_whole_scores = np.asarray(val_whole_scores).flatten()\n fpr, tpr, thresholds = metrics.roc_curve(val_whole_y, val_whole_scores, pos_label = 1)\n #skplt.metrics.plot_roc_curve(val_whole_y, val_whole_scores)\n print('fpr :', fpr)\n print('tpr :', tpr)\n print('thresholds :', thresholds)\n plt.plot(fpr, tpr)\n plt.show()\n auc_score = metrics.roc_auc_score(val_whole_y, val_whole_scores,)\n print('auc_score ',auc_score)\n #print('fpr :',fpr)\n #print('tpr :',tpr)\n #print('thresholds :',thresholds)\n","sub_path":"load_model.py","file_name":"load_model.py","file_ext":"py","file_size_in_byte":2934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"584446463","text":"from magma.logging import root_logger\n\n\n_logger = root_logger()\n\n\nclass WiringLog:\n def __init__(self, tpl, *bits):\n self.tpl = tpl\n self.bits = bits\n\n def get_debug_name(self, bit):\n if isinstance(bit, int):\n return bit\n return bit.debug_name\n\n def __str__(self):\n bits = [self.get_debug_name(bit) for bit in self.bits]\n return self.tpl.format(*bits)\n\n\nclass Wire:\n \"\"\"\n Wire implements wiring.\n\n Each wire is represented by a bit.\n \"\"\"\n def __init__(self, bit):\n self._bit = bit\n self._driving = []\n self._driver = None\n\n def __repr__(self):\n return repr(self._bit)\n\n def __str__(self):\n return str(self._bit)\n\n def anon(self):\n return self._bit.anon()\n\n def unwire(self, other):\n other._driving.remove(self)\n self._driver = None\n\n def connect(self, other, debug_info):\n \"\"\"\n Connect two wires, self should be an input and other should be an\n output, or both should be inouts\n \"\"\"\n if self._driver is not None:\n _logger.warning(\n WiringLog(\n (\"Wiring multiple outputs to same wire, using last \"\n \"connection. Input: {}, Old Output: {}, New Output: {}\"),\n self._bit, self._driver._bit, other._bit),\n debug_info=debug_info\n )\n if self._bit.is_output():\n _logger.error(\n WiringLog(\"Using `{}` (an output) as an input\", self._bit),\n debug_info=debug_info\n )\n return\n if other._bit.is_input():\n _logger.error(\n WiringLog(\"Using `{}` (an input) as an output\", other._bit),\n debug_info=debug_info\n )\n return\n if self._bit.is_inout() and not other._bit.is_inout():\n _logger.error(\n WiringLog(\"Using `{}` (not inout) as an inout\", other._bit),\n debug_info=debug_info\n )\n return\n if not self._bit.is_inout() and other._bit.is_inout():\n _logger.error(\n WiringLog(\"Using `{}` (not inout) as an inout\", self._bit),\n debug_info=debug_info\n )\n return\n\n self._driver = other\n other._driving.append(self)\n\n def trace(self, skip_self=True):\n \"\"\"\n If a value is an input or an intermediate (undirected), trace it until\n there is an input or inout (this is the source)\n\n Upon the first invocation (from a user), we skip the current bit (so\n we don't trace to ourselves)\n \"\"\"\n if self._driver is not None:\n return self._driver.trace(skip_self=False)\n if not skip_self and (self._bit.is_output() or self._bit.is_inout()):\n return self._bit\n return None\n\n def value(self):\n \"\"\"\n Return the bit connected to this bit. Specifically, return the bit this\n bit is driving if it exists. Else if, there is a unique \"drivee\" bit,\n return that bit. Otherwise, return None.\n \"\"\"\n if self._driver is not None:\n return self._driver._bit\n if len(self._driving) == 1:\n return self._driving[0]._bit\n return None\n\n def driven(self):\n return self._driver is not None\n\n def wired(self):\n return self._driver or self._driving\n\n def driving(self):\n \"\"\"\n Return a (possibly empty) list of all bits this bit is driving.\n \"\"\"\n return [driving._bit for driving in self._driving]\n\n @property\n def driver(self):\n return self._driver\n\n @property\n def bit(self):\n return self._bit\n","sub_path":"magma/wire_container.py","file_name":"wire_container.py","file_ext":"py","file_size_in_byte":3765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496845101","text":"'''\nAuthor : Amar Naik\n\nProgram Description : This is Random Forest Classifier program. It reads data from 'train_data.json' and does the training\n Post the training the predictions and probabilities are applied on 'test_data.json'\n Finally a submission file 'Submission_Final.csv\" in the correct format is created \n \n\n\n'''\n# Load scikit's random forest classifier library\nfrom sklearn.ensemble import RandomForestClassifier\n\n# Load pandas\nimport pandas as pd\n\n# Load numpy\n#import numpy as np\nimport json\nimport csv\n\nfile_name_1 = \"E:/Problem2/train_data.json\"\n\nwith open(file_name_1, 'r') as jsonfile1:\n data_dict_1 = json.load(jsonfile1)\n \nfile_name_2 = \"E:/Problem2/test_data.json\"\nwith open(file_name_2, 'r') as jsonfile2:\n data_dict_2 = json.load(jsonfile2)\n\ndftrain = pd.DataFrame.from_dict(data_dict_1, orient='index')\n#train.reset_index(level=0, inplace=True)\ndftrain.rename(columns = {'index':'ID'},inplace=True)\ndftrain['segment'] = dftrain['segment'].map({'pos': 0, 'neg': 1})\nprint(dftrain.shape)\nprint(dftrain.columns[3])\n#print(dftrain[\"segment\"])\n\ndftest = pd.DataFrame.from_dict(data_dict_2, orient='index')\n#test.reset_index(level=0, inplace=True)\ndftest.rename(columns = {'index':'ID'},inplace=True)\nprint(dftest.shape)\nprint(dftest.columns)\n#print(test.head)\nfrom sklearn import preprocessing\nle = preprocessing.LabelEncoder()\ndftrain['titles'] = le.fit_transform(dftrain['titles'])\ndftest['titles'] = le.fit_transform(dftest['titles'])\ndftrain['genres'] = le.fit_transform(dftrain['genres'])\ndftest['genres'] = le.fit_transform(dftest['genres'])\ndftrain['cities'] = le.fit_transform(dftrain['cities'])\ndftest['cities'] = le.fit_transform(dftest['cities'])\ndftrain['tod'] = le.fit_transform(dftrain['tod'])\ndftest['tod'] = le.fit_transform(dftest['tod'])\ndftrain['dow'] = le.fit_transform(dftrain['dow'])\ndftest['dow'] = le.fit_transform(dftest['dow'])\n\nprint('Number of observations in the training data:', len(dftrain))\nprint('Number of observations in the test data:', len(dftest))\n\n# Create a list of the column's names without segment column\nSEGtarget = dftrain.columns[:3]\ny = pd.factorize(dftrain['segment'])[0]\n#print(y)\n\n#SignFacing = dftrain.columns[:0]\nprint(SEGtarget)\n# Create a random forest classifier. By convention, clf means 'classifier'\nclf = RandomForestClassifier(n_estimators=100, max_depth=None, min_samples_split=3, random_state=0)\nn = len(dftest)\n# Train the classifier to take the training features and learn how they relate\n# to the values in column 'segment'\nprint (\"Train started\")\n\nclf.fit(dftrain[SEGtarget], y)\n# Capture the prediction of each row\nprint (\"Prediction started\")\noutput1=clf.predict(dftest[SEGtarget])\n# Capture the predicted probabilities of each row \nprint (\"probability\")\noutput2=clf.predict_proba(dftest[SEGtarget])\n\n#Open the file to write \n#open_file_object = csv.writer(open(\"E:/Problem2/testRA3.csv\", \"w\"))\nheaders=[\"segment1\",\"segment\", \"ID\"]\n#open_file_object.writerow(headers) \nz = 0\nprint (\"starting writing to output file\")\n\nwith open('E:/Problem2/Submission_Intermed.csv', 'w') as f:\n #f.write(headers) \n w = csv.writer(f, quoting=csv.QUOTE_ALL)\n w.writerow(headers)\n for row in data_dict_2:\n m3=float(output2[z][1])\n m4=float(output2[z][0])\n istrr=[]\n istrr.append(m3)\n istrr.append(m4)\n istrr.append(row)\n wr = csv.writer(f, quoting=csv.QUOTE_ALL)\n wr.writerow(istrr)\n z += 1\n if n == z :\n break\n#open_file_object.writerow(row)\n\nprint (\"Total Records written :\",z)\n\n#CREATE FINAL FILE\n \nprint(\"starting creation of final file\")\ndf = pd.read_csv('E:/Problem2/Submission_Intermed.csv', sep=',')\n#df.columns=[\"segment1\",\"segment\", \"ID\"]\n# select desired columns\ndf = df[['ID', 'segment']]\ndf.to_csv('E:/Problem2/Submission_Final.csv', sep=',', index=False,header=True)\nprint (\"END:Total Records written :\",z)\nexit\n\n\n#-----------------END---------------#\n","sub_path":"Problem2_Classifier_Random_forest.py","file_name":"Problem2_Classifier_Random_forest.py","file_ext":"py","file_size_in_byte":4059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340190523","text":"import pilas\n# Permite que este ejemplo funcion incluso si no has instalado pilas.\nimport sys\nsys.path.insert(0, \"..\")\n\n\npilas.iniciar()\n\n\npuntaje = pilas.actores.Puntaje()\n\n\ntexto = pilas.actores.Texto('Puntaje:')\ntexto.x = -80\ntexto.y = -2\n\ndef sumar_5_al_clickear(evento):\n puntaje.aumentar(5)\n \npilas.escena_actual().click_de_mouse.conectar(sumar_5_al_clickear)\npilas.avisar('Clickea la pantalla y agregaras 5 puntos al puntaje')\n\npilas.ejecutar()\n","sub_path":"Tutoriales-Ejemplos/Ejemplos/pilas-engine/pilas/ejemplos/ejemplos/interfaz/puntaje.py","file_name":"puntaje.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362897594","text":"# 优美图库2--->翻页效果\nimport requests\nimport parsel\n\nfor page in range (1,4):\n print('==============================正在爬取第{}页数据========================='.format(page))\n base_url='https://www.umei.cc/meinvtupian/meinvxiezhen/{}.htm'.format(str(page))\n headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3756.400 QQBrowser/10.5.4043.400'}\n\n response= requests.get(base_url,headers=headers)\n response.encoding = response.apparent_encoding\n html= response.text\n # print(html)\n parse = parsel.Selector(html)\n # print(parse)\n href_list = parse.xpath('//div[@class= \"TypeList\"]/ul/li/a/@href').extract()\n # print(href_list)\n for href in href_list:\n # print(href)\n href_data=requests.get(href,headers=headers).text\n img = parsel.Selector(href_data)\n img_src = img.xpath('//div[@class=\"ImageBody\"]/p/a/img/@src').extract_first()\n # print(img_src)\n img_data = requests.get(img_src,headers=headers).content\n file_name = img_src.split('/')[-1]\n # print(file_name)\n\n with open (r'D:\\\\电脑桌面\\\\编程\\\\img2\\\\'+file_name,'wb') as f:\n print('下载完成:', file_name)\n f.write(img_data)\nprint('\\n'+'爬虫运行完毕!!!')","sub_path":"优美图库.py","file_name":"优美图库.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"428432047","text":"from neuralnetwork import NeuralNetwork\nimport numpy as np\n\n#constructing the XOR dataset\nX=np.array([[0,0],[0,1],[1,0],[1,1]])\ny=np.array([[1,0],[0,1],[0,1],[1,0]])\n\n#defining and traing our neural network\nnn=NeuralNetwork([2,2], alpha=0.5)\n\nnn.fit(X,y, epochs=20000)\n\nfor (x,target) in zip(X,y):\n\n pred=nn.predict(x)[0][0]\n step=1 if pred>0.5 else 0\n print(\"[INFO] data={},ground-truth={}, pred={:.4f}, step={}\".format(x,\n target[0], pred, step))\n'''\n\nputting no hidden layer we get the following\n[INFO] data=[0 0],ground-truth=0, pred=0.5161, step=1\n[INFO] data=[0 1],ground-truth=1, pred=0.5000, step=1\n[INFO] data=[1 0],ground-truth=1, pred=0.4839, step=0\n[INFO] data=[1 1],ground-truth=0, pred=0.4678, step=0\n\n'''\n","sub_path":"nn/nn_xor.py","file_name":"nn_xor.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74465581","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as sio\n\n\ndef build_dataset():\n\n\n data = sio.loadmat('amp_data.mat')\n data_x = data['amp_data']\n #plt.plot(data_x,'bx')\n np.random.seed(30)\n indices = np.array([1,2,3,4,5,6])\n discarded_data=np.delete(data_x,indices)\n final_data = discarded_data.reshape(1605394,21)\n final_data=np.random.permutation(final_data)\n #split the data set in training, validation and test sets\n training_size = int(final_data.shape[0]*0.7)\n validation_size = int(final_data.shape[0]*0.15)\n test_size = int(final_data.shape[0]*0.15)\n training_set,validation_set,test_set = final_data[:training_size,:], final_data[training_size:training_size+validation_size,:],final_data[validation_size+training_size:,:]\n\n X_shuffle_train = training_set[:,:20]\n y_shuffle_train = training_set[:,20]\n X_shuffle_val = validation_set[:,:20]\n y_shuffle_val = validation_set[:,20]\n X_shuffle_test = test_set[:,:20]\n y_shuffle_test = test_set[:,20]\n\n return [X_shuffle_train,X_shuffle_val,X_shuffle_test,y_shuffle_train,y_shuffle_val,y_shuffle_test]\n\ndef time_bias(time):\n\n ones = np.ones((time.shape))\n time_bias = np.vstack((ones,time)).T\n return time_bias\n\ndef build_feature_matrix(C,K):\n\n time=np.arange(0,1,1/20.0)\n t_bias = time_bias(time)\n f = t_bias[20-C:]\n for i in range(2,K+1):\n t = time[20-C:]**i\n f = np.vstack([f.T,t.T]).T\n return f\n\ndef main():\n\n X_train,X_val,X_test,y_train,y_val,y_test=build_dataset()\n time=np.arange(0,1,1/20.0)\n feature = build_feature_matrix(20,4)\n\n w_fit = np.linalg.lstsq(feature,(X_train[0,:].T))[0]\n plt.plot(time,X_train[0,:],'bx')\n plt.plot(time,feature.dot(w_fit),'r-')\n plt.show()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"assignment1/ex1.py","file_name":"ex1.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"399821972","text":"# Copyright 2018 Google Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS-IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Tests for backend.api.shelf_api.\"\"\"\n\nimport mock\n\nfrom protorpc import message_types\n\nimport endpoints\n\nfrom loaner.web_app.backend.api import root_api # pylint: disable=unused-import\nfrom loaner.web_app.backend.api import shelf_api\nfrom loaner.web_app.backend.api.messages import shelf_message\nfrom loaner.web_app.backend.models import device_model\nfrom loaner.web_app.backend.models import shelf_model # pylint: disable=unused-import\nfrom loaner.web_app.backend.testing import loanertest\n\n\nclass ShelfApiTest(loanertest.EndpointsTestCase):\n \"\"\"Test for the Shelf API.\"\"\"\n\n def setUp(self):\n super(ShelfApiTest, self).setUp()\n self.patcher_directory = mock.patch(\n '__main__.device_model.directory.DirectoryApiClient')\n self.mock_directoryclass = self.patcher_directory.start()\n self.addCleanup(self.patcher_directory.stop)\n self.service = shelf_api.ShelfApi()\n self.login_admin_endpoints_user()\n self.patcher_xsrf = mock.patch(\n '__main__.shelf_api.root_api.Service.check_xsrf_token')\n self.shelf = shelf_model.Shelf.enroll(\n user_email=loanertest.USER_EMAIL, location='NYC', capacity=10,\n friendly_name='GnG', latitude=40.6892534, longitude=-74.0466891,\n altitude=1.0)\n shelf1 = shelf_model.Shelf.enroll(\n user_email=loanertest.USER_EMAIL, location='MTV', capacity=20)\n shelf2 = shelf_model.Shelf.enroll(\n user_email=loanertest.USER_EMAIL, location='SAO', capacity=10)\n self.disabled_shelf = shelf_model.Shelf.enroll(\n user_email=loanertest.USER_EMAIL, location='SVL', capacity=10,\n friendly_name='Bay')\n self.disabled_shelf.disable(loanertest.USER_EMAIL)\n self.shelf_locations = [\n self.shelf.location, shelf1.location, shelf2.location,\n self.disabled_shelf.location]\n\n self.device1 = device_model.Device(\n serial_number='12345',\n enrolled=True,\n device_model='HP Chromebook 13 G1',\n current_ou='/',\n chrome_device_id='unique_id_1',\n damaged=False,\n ).put()\n self.device2 = device_model.Device(\n serial_number='54321',\n enrolled=True,\n device_model='HP Chromebook 13 G1',\n current_ou='/',\n chrome_device_id='unique_id_2',\n damaged=False,\n ).put()\n self.device3 = device_model.Device(\n serial_number='67890',\n enrolled=True,\n shelf=self.shelf.key,\n device_model='HP Chromebook 13 G1',\n current_ou='/',\n chrome_device_id='unique_id_3',\n damaged=False,\n ).put()\n self.device4 = device_model.Device(\n serial_number='ABC123',\n enrolled=True,\n shelf=self.shelf.key,\n device_model='HP Chromebook 13 G1',\n current_ou='/',\n chrome_device_id='unique_id_4',\n damaged=False,\n ).put()\n self.device_identifiers = [\n self.device1.get().serial_number, self.device2.get().serial_number,\n self.device3.get().serial_number]\n\n def tearDown(self):\n super(ShelfApiTest, self).tearDown()\n self.service = None\n\n @mock.patch('__main__.root_api.Service.check_xsrf_token')\n @mock.patch('__main__.shelf_model.Shelf.enroll')\n def test_enroll(self, mock_enroll, mock_xsrf_token):\n \"\"\"Test Enroll with mock methods.\"\"\"\n request = shelf_message.EnrollShelfRequest(\n location='nyc', capacity=100, friendly_name='test', latitude=12.5,\n longitude=12.5, altitude=2.0, responsible_for_audit='precise')\n response = self.service.enroll(request)\n mock_xsrf_token.assert_called_once()\n self.assertIsInstance(response, message_types.VoidMessage)\n\n def test_enroll_bad_request(self):\n request = shelf_message.EnrollShelfRequest(capacity=10)\n with self.assertRaisesRegexp(\n shelf_api.endpoints.BadRequestException,\n 'Entity has uninitialized properties'):\n self.service.enroll(request)\n request = shelf_message.EnrollShelfRequest(\n location='nyc', capacity=10, latitude=12.5)\n with self.assertRaisesRegexp(\n shelf_api.endpoints.BadRequestException,\n shelf_model._LAT_LONG_MSG):\n self.service.enroll(request)\n\n def test_get(self):\n shelf = shelf_model.Shelf.get(location='MTV')\n self.assertEqual(shelf.capacity, 20)\n\n @mock.patch('__main__.root_api.Service.check_xsrf_token')\n def test_get_by_location(self, mock_xsrf_token):\n request = shelf_message.GetShelfRequest(location='NYC')\n response = self.service.get(request)\n mock_xsrf_token.assert_called_once()\n self.assertEqual(self.shelf.location, response.location)\n self.assertEqual(self.shelf.friendly_name, response.friendly_name)\n\n def test_disable_by_location(self):\n request = shelf_message.GetShelfRequest(location='NYC')\n self.assertTrue(self.shelf.enabled)\n response = self.service.disable(request)\n self.assertFalse(self.shelf.enabled)\n self.assertIsInstance(response, message_types.VoidMessage)\n\n @mock.patch('__main__.root_api.Service.check_xsrf_token')\n def test_enable_by_location(self, mock_xsrf_token):\n request = shelf_message.GetShelfRequest(location='SVL')\n self.assertFalse(self.disabled_shelf.enabled)\n response = self.service.enable(request)\n mock_xsrf_token.assert_called_once()\n self.assertTrue(self.disabled_shelf.enabled)\n self.assertIsInstance(response, message_types.VoidMessage)\n\n @mock.patch('__main__.root_api.Service.check_xsrf_token')\n def test_update_using_location(self, mock_xsrf_token):\n request = shelf_message.UpdateShelfRequest(\n current_location='NYC', location='NYC-9th')\n response = self.service.update(request)\n mock_xsrf_token.assert_called_once()\n self.assertEqual(self.shelf.location, 'NYC-9th')\n shelf = shelf_model.Shelf.get(friendly_name='GnG')\n self.assertEqual(shelf.location, 'NYC-9th')\n self.assertIsInstance(response, message_types.VoidMessage)\n\n @mock.patch('__main__.root_api.Service.check_xsrf_token')\n def test_list_shelves(self, mock_xsrf_token):\n request = shelf_message.Shelf(enabled=True)\n response = self.service.list_shelves(request)\n mock_xsrf_token.assert_called_once()\n self.assertEqual(3, len(response.shelves))\n\n def test_list_shelves_with_page_token(self):\n request = shelf_message.Shelf(enabled=True, page_size=1)\n response = self.service.list_shelves(request)\n response_shelves = []\n while response.page_token or response.additional_results:\n for shelf in response.shelves:\n self.assertTrue(shelf.location in self.shelf_locations)\n response_shelves.append(shelf)\n request = shelf_message.Shelf(\n enabled=True, page_size=1, page_token=response.page_token)\n response = self.service.list_shelves(request)\n self.assertEqual(len(response_shelves), 3)\n\n @mock.patch('__main__.root_api.Service.check_xsrf_token')\n @mock.patch('__main__.shelf_api.logging.info')\n def test_audit_using_shelf_location(self, mock_logging, mock_xsrf_token):\n request = shelf_message.ShelfAuditRequest(\n location='NYC', device_identifiers=self.device_identifiers)\n response = self.service.audit(request)\n mock_xsrf_token.assert_called_once()\n mock_logging.assert_called()\n for identifier in self.device_identifiers:\n datastore_device = device_model.Device.get(serial_number=identifier)\n self.assertEqual(datastore_device.shelf.get().location, 'NYC')\n self.assertFalse(self.shelf.audit_requested)\n self.assertEqual(self.shelf.last_audit_by, loanertest.SUPER_ADMIN_EMAIL)\n self.assertIsInstance(response, message_types.VoidMessage)\n\n def test_audit_invlid_device(self):\n request = shelf_message.ShelfAuditRequest(\n location='NYC', device_identifiers=['Invalid'])\n with self.assertRaisesRegexp(\n endpoints.NotFoundException,\n shelf_api._DEVICE_DOES_NOT_EXIST_MSG % 'Invalid'):\n self.service.audit(request)\n\n def test_audit_unable_to_move_to_shelf(self):\n self.shelf.capacity = len(device_model.Device.list_devices(\n shelf=self.shelf.key))\n request = shelf_message.ShelfAuditRequest(\n location=self.shelf.location,\n device_identifiers=self.device_identifiers)\n with self.assertRaises(endpoints.BadRequestException):\n self.service.audit(request)\n\n @mock.patch('__main__.device_model.Device.list_devices')\n @mock.patch('__main__.shelf_api.get_shelf')\n def test_audit_remove_devices(\n self, mock_get_shelf, mock_model_list_devices):\n shelf = self.device2.get()\n shelf.shelf = self.shelf.key\n shelf.put()\n mock_model_list_devices.return_value = (\n [self.device2.get().key, self.device3.get().key,\n self.device4.get().key], None, False)\n mock_get_shelf.return_value = self.shelf\n request = shelf_message.ShelfAuditRequest(\n location=self.shelf.location,\n device_identifiers=[self.device3.get().serial_number])\n self.service.audit(request)\n self.assertEqual(self.device3.get().shelf, self.shelf.key)\n self.assertEqual(self.device2.get().shelf, None)\n self.assertEqual(self.device4.get().shelf, None)\n\n def test_get_shelf_using_location(self):\n location = self.shelf.location\n shelf = shelf_api.get_shelf(location=location)\n self.assertEqual(shelf, self.shelf)\n\n def test_get_shelf_using_location_error(self):\n location = 'Not_Valid'\n with self.assertRaisesRegexp(\n endpoints.NotFoundException,\n shelf_api._SHELF_DOES_NOT_EXIST_MSG % location):\n shelf_api.get_shelf(location=location)\n\n\nif __name__ == '__main__':\n loanertest.main()\n","sub_path":"loaner/web_app/backend/api/shelf_api_test.py","file_name":"shelf_api_test.py","file_ext":"py","file_size_in_byte":10090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"358984683","text":"from django.conf.urls import url\nfrom . import views\n\nurlpatterns = [\n\n\turl(r'^$', \n\t\tviews.FacturasListView.as_view(), \n\t\tname='list'), \n\n\turl(r'^add/$', \n\t\tviews.FacturaCreateView.as_view(), \n\t\tname='add'), \n\n\turl(r'^(?P\\d+)/detail/$', \n\t\tviews.FacturaDetailView.as_view(), \n\t\tname='detail'), \n\t\n\n]","sub_path":"vgex_erp/facturas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"513592202","text":"from keras .callbacks import BaseLogger\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport json\nimport os\n\nclass TrainingMonitor(BaseLogger):\n def __init__(self, figPath, jsonPath=None, startAt=0):\n # Store the output path for the figure, he path to the JSON serialied file,\n # and the starting epoch\n \"\"\"\n\n :param figPath: The path to the output that we can use to visualize loss and accuracy over time\n :param jsonPath: An optional path used to serialize the loss and accuracy values as a JSON file\n :param startAt: The Starting epoch that training is resumed at when using ctrl+c training\n \"\"\"\n super(TrainingMonitor, self).__init__()\n self.figPath = figPath\n self.jsonPath = jsonPath\n self.startAt = startAt\n\n\n def on_train_begin(self, logs={}):\n # Initialize the history dictionary fo the losses\n self.hisotry = {}\n\n # If the JSON hisotry path exists, load the training history\n if self.jsonPath is not None:\n if os.path.exists(self.jsonPath):\n self.hisotry = json.load(open(self.jsonPath).read())\n\n # Check to see if a starting epoch was supplied\n if self.startAt >0:\n # Loop over the entries in the history log and\n # trim any entries that are past the starting epoch\n for key in self.hisotry.keys():\n self.hisotry[key] = self.hisotry[key][:self.startAt]\n\n\n def on_epoch_end(self, epoch, logs={}):\n # Loop over the logs and update the loss, accuracy,etc\n # for the entire training process\n for (key, value) in logs.items():\n log = self.hisotry.get(key, [])\n log.append(value)\n self.hisotry[key] = log\n\n # Check to see if the training history should be serialized to file\n if self.jsonPath is not None:\n file = open(self.jsonPath, \"w\")\n file.write(json.dumps(self.history))\n file.close()\n\n # Ensure at least 2 epochs have passed before plotting\n if len(self.hisotry[\"loss\"]) > 1:\n # Plot the training loss and accuracy\n N = np.arange(0, len(self.hisotry[\"loss\"]))\n plt.style.use(\"ggplot\")\n plt.figure()\n plt.plot(N, self.hisotry[\"loss\"], label=\"train_loss\")\n plt.plot(N, self.hisotry[\"val_loss\"], label=\"val_loss\")\n plt.plot(N, self.hisotry[\"acc\"], label=\"train_acc\")\n plt.plot(N, self.hisotry[\"val_acc\"], label=\"val_acc\")\n plt.title(\"Training Loss and Accuracy [Epoch {}]\".format(len(self.hisotry[\"loss\"])))\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n\n # Save the figure\n plt.savefig(self.figPath)\n plt.close()\n","sub_path":"dogs_vs_cats/trainingmonitor.py","file_name":"trainingmonitor.py","file_ext":"py","file_size_in_byte":2879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450761505","text":"import csv\r\n\r\n# Open the earthquake data file.\r\nfilename = 'transmitter_info.csv'\r\n# Create empty lists for the latitudes and longitudes.\r\nlats, lons, labels, tunlon, tunlat = [], [], [], [], []\r\n\r\n# Read through the entire file, skip the first line,\r\n# and pull out just the lats and lons.\r\nwith open(filename) as f:\r\n # Create a csv reader object.\r\n reader = csv.reader(f)\r\n \r\n # Ignore the header row.\r\n next(reader)\r\n \r\n # Store the latitudes and longitudes in the appropriate lists.\r\n for row in reader:\r\n labels.append(row[0])\r\n lats.append(float(row[2]))\r\n lons.append(float(row[3]))\r\n tunlon.append(float(row[5]))\r\n tunlat.append(float(row[6]))\r\n#Tunis coordinate\r\ntunis_lat=36.5\r\ntunis_lon=10.08\r\n# --- Build Map ---\r\nfrom mpl_toolkits.basemap import Basemap\r\nimport matplotlib.pyplot as plt\r\n#from matplotlib.widgets import Cursor\r\nimport numpy as np\r\nfrom datetime import datetime\r\nmap = plt.figure(figsize=[10,7]) \r\nmap = Basemap(projection='mill', area_thresh = 1000.0,\r\n lat_0=0, lon_0=10, resolution = 'l', suppress_ticks=True) #resolution : 'l' for low and 'h' for hight and 'c'\r\n\r\n\r\nmap.drawcoastlines(color='#A3A3A3')\r\nmap.drawcountries(color='#A3A3A3')\r\nmap.drawmapboundary(fill_color='lightblue')\r\nmap.fillcontinents(color='#FFFFFF',lake_color='lightblue')\r\nmap.drawparallels(np.arange(-90,100,30),labels=[1,0,0,0],color='#A1A866', linewidth=.2)\r\nmap.drawmeridians(np.arange(map.lonmin,map.lonmax+30,30),labels=[0,0,0,1],color='#A1A866', linewidth=.2)\r\n \r\nx,y = map(lons, lats)\r\nv,w = map(tunis_lon, tunis_lat)\r\nmap.plot(x, y, 'ro', markersize=10, label='VLF Transmitter')\r\nmap.plot(v, w, 'bo', markersize=10, label='VLF Reciever')\r\nfor label, xpt, ypt in zip(labels, x, y):\r\n plt.text(xpt+10000, ypt+5000, label, color='k',fontsize=12, fontweight='bold')\r\n\r\nfor wpt, vpt, xpt, ypt in zip(tunlon, tunlat, lons, lats):\r\n # draw great circle route between tunisia and vlf trx\r\n \r\n map.drawgreatcircle(wpt,vpt,xpt,ypt,linewidth=2, linestyle='--',color='k')\r\n \r\n# shade the night areas, with alpha transparency so the\r\n# map shows through. Use current time in UTC.\r\n#date = datetime.today()\r\ndate=datetime(2016,6, 21,12)\r\nCS=map.nightshade(date)\r\n#plt.title('Day/Night Map for %s (UT)' % date.strftime(\"%d %b %Y %H:%M:%S\"))\r\n\r\n#to show long/lat coordination under the curosor\r\nax = plt.gca()\r\ndef format_coord(x, y):\r\n return 'Longitude=%.4f, Latitude=%.4f'%(map(x, y, inverse = True))\r\nax.format_coord = format_coord\r\n\r\n#cursor\r\n## set useblit = True on gtkagg for enhanced performance\r\n#cursor = Cursor(ax, useblit=True, color='yellow', linewidth=1 )\r\n\r\n#make legend\r\nlegend = plt.legend(loc='lower left',fontsize=8,frameon=True,title='Legend',markerscale=1,prop={'size':10})\r\nlegend.get_title().set_fontsize('20')\r\nplt.savefig(\"carte_vlf_dn.pdf\", dps=50)\r\nplt.show()\r\n","sub_path":"maps-basemap/DayNighTransmitters.py","file_name":"DayNighTransmitters.py","file_ext":"py","file_size_in_byte":2882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256640050","text":"def main(result):\r\n print(\"Articles\")\r\n print(\"1:Add | 2:Edit | 3:Delete | 4: Back\")\r\n buttonpressed = None\r\n buttonpressed = input()\r\n select_opt_manage_articles(buttonpressed,result)\r\n\r\n\r\ndef select_opt_manage_articles(buttonpressed,result):\r\n\r\n if buttonpressed == '1':\r\n #print(\"Add\")\r\n from admin_pages import add_article\r\n add_article.main(result)\r\n\r\n elif buttonpressed == '2':\r\n #print(\"Edit\")\r\n from admin_pages import edit_article\r\n edit_article.main(result)\r\n\r\n elif buttonpressed == '3':\r\n #print(\"Delete\")\r\n from admin_pages import delete_article\r\n delete_article.main(result)\r\n\r\n elif buttonpressed == '4':\r\n print(\"Going back\")\r\n from admin_pages import admin_homepage\r\n admin_homepage.main(result)\r\n\r\n else:\r\n print(\"Invalid option.\")\r\n select_opt_manage_articles()\r\n\r\n","sub_path":"admin_pages/manage_articles.py","file_name":"manage_articles.py","file_ext":"py","file_size_in_byte":913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"78841973","text":"#Store three pets names within a tuple. Choose an appropriate variable name.\r\n\r\nfootball_clubs = {\"Newport\": \"Newport County\",\r\n \"Leicester\": \"Leicester City\",\r\n \"Manchester\": [\"Manchester City\", \"Manchester United\"],\r\n \"London\": [\"Brentford\", \"Queens Park Rangers\", \"Chelsea\", \"Fulham\", \"Arsenal\", \"West Ham\", \"Millwall\"],\r\n \"Grimsby\": \"Grimbsy Town\",\r\n \"Fareham\": \"Fareham Town\"} \r\n\r\n# Return the dictionary\r\n\r\nfootball_clubs","sub_path":"solutions/chapter_2/object_type_dictionary.py","file_name":"object_type_dictionary.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196941761","text":"f = open(\"files/cities.csv\", \"r\")\ncontent = f.read() # lecture du fichier et stockage de son contenu\nf.close()\n\n# content2 = f.read() # impossible, le fichier a été fermé\n\nrows = content.splitlines()\ncityNameIndex = 8\nn = 0 # compteur du nombre villes correspondant aux critères de recherche\n\nfor r in rows:\n cols = r.split(\",\") # virgule, comme séparateur de colonnes\n cityName = cols[cityNameIndex].strip().strip(\"\\\"\") # enlève les espaces en début/fin de chaîne, enlève les guillements début/fin de chaîne\n #print(cityName, \"=>\", len(cityName))\n if cityName.startswith(\"San\"):\n n += 1\n\nprint(\"Nombre de villes trouvées: %d\" % n)\n\n# r = \"toto-tata-titi\".split(\"-\")\n# print(r)\n\n# s = \" \\\"Yakima\\\"\".strip().strip('\"')\n# print(len(s))\n\n \n\n\n\n","sub_path":"python/csvDemo.py","file_name":"csvDemo.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"150311228","text":"import json\nfrom SPARQLWrapper import SPARQLWrapper\nimport urllib.parse\nimport requests\nimport yaml\n\nendpoint = \"https://jpsearch.go.jp/rdf/sparql\"\n\nsparql = SPARQLWrapper(endpoint=endpoint, returnFormat='json')\n\nq = (\"\"\"\n PREFIX jps: \n PREFIX schema: \n SELECT ?provider (count(?cho) as ?count) WHERE {\n ?cho jps:sourceInfo/schema:provider ?provider .\n } GROUP BY ?provider ORDER BY ?count\n\"\"\")\nsparql.setQuery(q)\n\nurl = endpoint+\"?query=\"+urllib.parse.quote(q)+\"&format=json&output=json&results=json\"\n\nresults = requests.get(url).json()\n\nsettings = {}\n\nfor i in range(len(results[\"results\"][\"bindings\"])):\n obj = results[\"results\"][\"bindings\"][i]\n provider = obj[\"provider\"][\"value\"]\n count = obj[\"count\"][\"value\"]\n\n q = (\"\"\"\n PREFIX jps: \n PREFIX schema: \n SELECT DISTINCT ?cho ?label ?image WHERE {\n ?cho rdfs:label ?label ; jps:sourceInfo/schema:provider <\"\"\"+provider+\"\"\"> .\n OPTIONAL {?cho schema:image ?image}\n } LIMIT 1\n \"\"\")\n\n sparql.setQuery(q)\n\n url = endpoint+\"?query=\"+urllib.parse.quote(q)+\"&format=json&output=json&results=json\"\n\n items = requests.get(url).json()\n\n item = items[\"results\"][\"bindings\"][0]\n uri = item[\"cho\"][\"value\"]\n prefix = uri.split(\"https://jpsearch.go.jp/data/\")[1].split(\"-\")[0]\n\n provider_local = provider.split(\"/\")[-1]\n \n print(i+1, len(results[\"results\"][\"bindings\"]), prefix, provider_local, count)\n settings[prefix] = provider_local\n\nwith open(\"settings.yml\", \"w\") as yf:\n yaml.dump(settings, yf, encoding='utf-8', allow_unicode=True, sort_keys=False)","sub_path":"301_update_settings.py","file_name":"301_update_settings.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233554363","text":"def find_match(p, s):\n # Let i and j be loop variables\n m = len(p) # length of pattern string\n n = len(s) # length of actual string\n for i in range(n - m + 1):\n j = 0\n while j < m and s[i + j] == p[j]:\n j += 1\n if j == m:\n return i\n\n\nprint(\"Enter the string \\n\")\ns = input()\nprint(\"Enter the pattern string \\n\")\np = input()\npos = find_match(p, s)\nprint(\"Matching pattern found at position\", pos)\n","sub_path":"PatternMatching.py","file_name":"PatternMatching.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"351962284","text":"import turtle\nimport os\nimport random\nimport math\nimport sys\n\n# Create a screen object\nwn = turtle.Screen()\nwn.setup(700, 700)\nwn.bgcolor('black')\nwn.title('I love you Mama!')\n\n# Create Boarder\nboarder_pen = turtle.Turtle()\nboarder_pen.color('cyan')\nboarder_pen.pensize(5)\nboarder_pen.penup()\nboarder_pen.speed(0)\nboarder_pen.setposition(-300, -300)\nboarder_pen.pendown()\nfor i in range(4):\n boarder_pen.fd(600)\n boarder_pen.lt(90)\nboarder_pen.penup()\nboarder_pen.hideturtle()\n\n# Create Player\nplayer = turtle.Turtle()\nplayer.speed(0)\nplayer.color('yellow')\nplayer.turtlesize(1)\nplayer.showturtle()\nplayer.shape('triangle')\nplayer.setheading(90)\nplayer.penup()\nplayer.setposition(0, -250)\nplayer_move_pixels = 15\n\n# Create enemies:\nenemies = []\nfor i in range(5):\n e = turtle.Turtle()\n e.speed(0)\n e.color('magenta')\n e.turtlesize(1.5)\n e.showturtle()\n e.shape('circle')\n e.penup()\n e.setposition(-250 + i * random.randint(40, 60), 250)\n enemies.append(e)\n\nenemy_move_pixels = 2\n\n# Create Bullet\nbullet = turtle.Turtle()\nbullet.speed(0)\nbullet.color('orange')\nbullet.turtlesize(0.3)\nbullet.hideturtle()\nbullet.shape('triangle')\nbullet.setheading(90)\nbullet.penup()\nbullet.setposition(0, -400)\nbullet_move_pixels = 30\nbullet_status = 'ready'\n\ndef move_left():\n x = player.xcor()\n x -= player_move_pixels\n if x < -280:\n x = -280\n player.setx(x)\n\ndef move_right():\n x = player.xcor()\n x += player_move_pixels\n if x > 280:\n x = 280\n player.setx(x)\n\n\ndef fire():\n global bullet_status\n if bullet_status == 'ready':\n x = player.xcor()\n bullet.setposition(x, -245)\n bullet.showturtle()\n bullet_status = 'on_fire'\n pass\n\n\ndef is_collision(a, b): # a = (x1, y1), b = (x2, y2)\n x1 = a[0]\n y1 = a[1]\n x2 = b[0]\n y2 = b[1]\n dis = math.sqrt((x1 - x2) * (x1 - x2) + (y1 - y2) * (y1 - y2))\n#dis = math.sqrt(math.pow((a[0] - b[0]), 2) + math.pow((a[1] - b[1]), 2))\n if dis <= 15:\n return True\n else:\n return False\n\nwn.listen()\nwn.onkey(move_left, 'Left')\nwn.onkey(move_right, 'Right')\nwn.onkey(fire, 'space')\n\nwhile True:\n for enemy in enemies:\n x = enemy.xcor()\n x += enemy_move_pixels\n enemy.setx(x)\n if x > 280 or x < -280:\n enemy_move_pixels *= -1\n\n if bullet_status == 'on_fire':\n y = bullet.ycor()\n y += bullet_move_pixels\n bullet.sety(y)\n\n if bullet.ycor() >= 350:\n bullet_status = 'ready'\n bullet.hideturtle()\n bullet.sety(-400)\n\n for enemy in enemies:\n a = (enemy.xcor(), enemy.ycor())\n b = (bullet.xcor(), bullet.ycor())\n if is_collision(a, b):\n sys.exit()\n\n\n\n wn.update()\n\n\n\n\n\n\n\n","sub_path":"HB_ADV_L11.py","file_name":"HB_ADV_L11.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"35641271","text":"import webbrowser\n\nbrowsers = [\"firefox\", \"chromium\", \"firefox-esr\", \"chromium-browser\", 'mozilla', 'netscape', 'safari', 'google-chrome', 'chrome', 'galeon', 'epiphany', 'skipstone', 'kfmclient', 'konqueror', 'kfm', 'mosaic', 'opera', 'grail', 'links', 'elinks', 'lynx', 'w3m', 'windows-default', 'macosx', \"chromium\"]\ni = 0\n\nwhile i < len(browsers):\n browser = browsers[i]\n try:\n installed = webbrowser.get(browser)\n installed.open(\"http://127.0.0.1:8000/\")\n break\n except:\n i += 1\n continue\n\nif i == len(browsers):\n print(\"\\n\\nPython didn't find your browser. You can access the website manually by entering the following URL in wour web browser: http://localhost/\\n\\n\")","sub_path":"app/website-opening.py","file_name":"website-opening.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"146029495","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/land/copernicus/content/userdataschema.py\n# Compiled at: 2017-12-07 03:50:15\nfrom zope.component import getUtility\nfrom plone.app.users.userdataschema import IUserDataSchema\nfrom plone.app.users.userdataschema import IUserDataSchemaProvider\nfrom plone.app.users.browser.register import RegistrationForm\nfrom plone.app.users.browser.personalpreferences import UserDataPanel\nfrom plone.app.users.browser.register import CantChoosePasswordWidget\nfrom Products.CMFCore.interfaces import ISiteRoot\nfrom plone.app.controlpanel.widgets import MultiCheckBoxVocabularyWidget\nfrom zope import schema\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\nfrom zope.interface import implements\nfrom zope.browserpage import ViewPageTemplateFile\nfrom zope.formlib.boolwidgets import CheckBoxWidget\nfrom land.copernicus.content.config import EEAMessageFactory as _\nimport re\nprofessional_thematic_domain_options = SimpleVocabulary([\n SimpleTerm(value='Agriculture', title=_('Agriculture')),\n SimpleTerm(value='Architectural and Landscape Design', title=_('Architectural and Landscape Design')),\n SimpleTerm(value='Atmosphere', title=_('Atmosphere')),\n SimpleTerm(value='Climate Change', title=_('Climate Change')),\n SimpleTerm(value='Demography', title=_('Demography')),\n SimpleTerm(value='Ecology and Environment', title=_('Ecology and Environment')),\n SimpleTerm(value='Emergency Management', title=_('Emergency Management')),\n SimpleTerm(value='Energy, Utilities and Industrial Infrastructure', title=_('Energy, Utilities and Industrial Infrastructure')),\n SimpleTerm(value='Forestry', title=_('Forestry')),\n SimpleTerm(value='Health', title=_('Health')),\n SimpleTerm(value='Hydrography', title=_('Hydrography')),\n SimpleTerm(value='Mapping', title=_('Mapping')),\n SimpleTerm(value='Security', title=_('Security')),\n SimpleTerm(value='Snow and Ice', title=_('Snow and Ice')),\n SimpleTerm(value='Soils and Geology', title=_('Soils and Geology')),\n SimpleTerm(value='Tourism and Recreation', title=_('Tourism and Recreation')),\n SimpleTerm(value='Transport and Routing', title=_('Transport and Routing')),\n SimpleTerm(value='Urban and Spatial Planning', title=_('Urban and Spatial Planning'))])\ninstitutional_domain_options = SimpleVocabulary([\n SimpleTerm(value='Citizen', title=_('Citizen')),\n SimpleTerm(value='Commercial', title=_('Commercial')),\n SimpleTerm(value='Education', title=_('Education')),\n SimpleTerm(value='NGO', title=_('NGO')),\n SimpleTerm(value='Public Authority', title=_('Public Authority')),\n SimpleTerm(value='Research and development', title=_('Research and development'))])\n\ndef validateAccept(value):\n if value is not True:\n return False\n return True\n\n\ndef validate_phone(value):\n phone_re = re.compile('(\\\\d{3})\\\\D*(\\\\d{3})\\\\D*(\\\\d{4})\\\\D*(\\\\d*)$', re.VERBOSE)\n if phone_re.match(value):\n return True\n return False\n\n\nclass DisclaimerWidget(CheckBoxWidget):\n \"\"\" Widget for accept terms of use in user registration \"\"\"\n template = ViewPageTemplateFile('browser/templates/disclaimer-widget.pt')\n\n def __call__(self):\n val = super(DisclaimerWidget, self).__call__()\n self.val = val\n return self.template()\n\n\nclass CopernicusRegistrationForm(RegistrationForm):\n\n @property\n def form_fields(self):\n if not self.showForm:\n return []\n portal = getUtility(ISiteRoot)\n defaultFields = super(RegistrationForm, self).form_fields\n if portal.getProperty('validate_email', True):\n defaultFields = defaultFields.omit('password', 'password_ctl')\n defaultFields['mail_me'].custom_widget = CantChoosePasswordWidget\n defaultFields['disclaimer'].custom_widget = DisclaimerWidget\n else:\n defaultFields = defaultFields.omit('mail_me')\n defaultFields = defaultFields.omit('fullname')\n thematic_domain = defaultFields['thematic_domain']\n institutional_domain = defaultFields['institutional_domain']\n thematic_domain.custom_widget = MultiCheckBoxVocabularyWidget\n institutional_domain.custom_widget = MultiCheckBoxVocabularyWidget\n return defaultFields\n\n\nclass CustomizedUserDataPanel(UserDataPanel):\n\n def __init__(self, context, request):\n super(CustomizedUserDataPanel, self).__init__(context, request)\n self.form_fields = self.form_fields.omit('email', 'first_name', 'last_name', 'description', 'disclaimer', 'fax', 'fullname', 'home_page', 'job_title', 'location', 'mobile', 'postal_address', 'portrait', 'pdelete', 'organisation', 'reason', 'telephone')\n thematic_domain = self.form_fields['thematic_domain']\n thematic_domain.custom_widget = MultiCheckBoxVocabularyWidget\n institutional_domain = self.form_fields['institutional_domain']\n institutional_domain.custom_widget = MultiCheckBoxVocabularyWidget\n\n def validate(self, action, data):\n errors = super(UserDataPanel, self).validate(action, data)\n return errors\n\n\nclass UserDataSchemaProvider(object):\n implements(IUserDataSchemaProvider)\n\n def getSchema(self):\n \"\"\"\n \"\"\"\n return IEnhancedUserDataSchema\n\n\nclass IEnhancedUserDataSchema(IUserDataSchema):\n \"\"\" Use all the fields from the default user data schema, and add various\n extra fields.\n \"\"\"\n first_name = schema.TextLine(title=_('label_first_name', default='First Name'), description=_('help_first_name', default='Enter your first name.'), required=True)\n last_name = schema.TextLine(title=_('label_last_name', default='Last Name'), description=_('help_last_name', default='Enter your last name.'), required=True)\n thematic_domain = schema.List(title=_('label_thematic_domain', default='Professional thematic domain'), value_type=schema.Choice(vocabulary=professional_thematic_domain_options))\n institutional_domain = schema.List(title=_('label_institutional_domain', default='Institutional domain'), value_type=schema.Choice(vocabulary=institutional_domain_options))\n reason = schema.TextLine(title=_('label_reason', default='Reason to create the account'), description=_('help_reason', default='Fill in the reason for account creation'), required=False)\n job_title = schema.TextLine(title=_('label_job_title', default='Job title'), description=_('help_job_title', default='Fill in the job title'), required=False)\n postal_address = schema.Text(title=_('label_postal_address', default='Postal address'), description=_('help_postal_address', default='Fill in the postal address'), required=False)\n telephone = schema.ASCIILine(title=_('label_telephone', default='Telephone number'), description=_('help_telephone', default='Fill in the telephone number'), required=False, constraint=validate_phone)\n mobile = schema.ASCIILine(title=_('label_mobile', default='Mobile telephone number'), description=_('help_mobile', default='Fill in the mobile telephone number'), required=False, constraint=validate_phone)\n fax = schema.ASCIILine(title=_('label_fax', default='Fax number'), description=_('help_fax', default='Fill in the fax number'), required=False, constraint=validate_phone)\n organisation = schema.TextLine(title=_('label_organisation', default='Organisation'), description=_('help_organisation', default='Fill in the organisation'), required=False)\n disclaimer = schema.Bool(title=_('label_disclaimer', default='Accept terms of use'), description=_('help_disclaimer', default='Tick this box to indicate that you have found, read and accepted the terms of use for this site. Your email will not be further distributed to third parties. The registration is only used for reporting purposes to the EP and Council.'), required=True, constraint=validateAccept)","sub_path":"pycfiles/land.copernicus.content-3.8-py2.7/userdataschema.py","file_name":"userdataschema.py","file_ext":"py","file_size_in_byte":7895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"127940945","text":"import numpy as np\nimport pandas as pd\nimport requests\nimport json\nfrom argparse import ArgumentParser\n\nparser = ArgumentParser()\nparser.add_argument(\"key\", type=str, help=\"add the X-Api-Key\")\nargs = parser.parse_args()\n\nnum_results_max = 1000\nrecords = 250000\nfeatures = 30\n\n# test\n# url = 'https://data-science-challenge-235313.appspot.com/get_data?start_at=1&no_results=3'\n# header = {'X-Api-Key': args.key}\n# data = requests.get(url, headers=header).json()\n# data = json.dumps(data, indent=4, sort_keys=True)\n# print(data)\n\nfor i in range(records // num_results_max):\n url = f'https://data-science-challenge-235313.appspot.com/get_data?start_at={i*num_results_max}&no_results={num_results_max}'\n header = {'X-Api-Key': args.key}\n data = requests.get(url, headers=header).json()\n if i == 0:\n df = pd.DataFrame(data['xs'], columns=[\n f'x{i}' for i in range(features)])\n df['ids'] = data['ids']\n df['ys'] = data['ys']\n else:\n df2 = pd.DataFrame(data['xs'], columns=[\n f'x{i}' for i in range(features)])\n df2['ids'] = data['ids']\n df2['ys'] = data['ys']\n df = df.append(df2, ignore_index=True)\n\ndf = df.sort_values(by=['ids'])\ndf.to_csv('datset.csv', index=False)\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"271202088","text":"from qcloud_cos import CosConfig\nfrom qcloud_cos import CosS3Client\nfrom SaaS.local_settings import *\n\nsecret_id = TENCENT_MY_COS_ID\nsecret_key = TENCENT_MY_COS_KEY\n\nregion = 'ap-chengdu' # 替换为用户的 Region\n\nconfig = CosConfig(Region=region, SecretId=secret_id, SecretKey=secret_key)\n\nclient = CosS3Client(config)\n\nresponse = client.create_bucket(\n Bucket='test-1251317460',\n ACL=\"public-read\" # private / public-read / public-read-write\n)","sub_path":"saas/scripts/cos_bulket_demo.py","file_name":"cos_bulket_demo.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"357102529","text":"from tkinter import *\nfrom tkinter import messagebox\n\nclass Gui(Tk):\n\n def __init__(self):\n super().__init__()\n\n # load resources\n self.default_image = PhotoImage(file=\"C:/Users/kkcolin.ENTERPRISE/Documents/GitHub/COM404/2-guis/ReviewTCA2/CurrencyConverter/box1.gif\")\n self.tick_image = PhotoImage(file=\"C:/Users/kkcolin.ENTERPRISE/Documents/GitHub/COM404/2-guis/ReviewTCA2/CurrencyConverter/tick1a.gif\")\n self.cross_image = PhotoImage(file=\"C:/Users/kkcolin.ENTERPRISE/Documents/GitHub/COM404/2-guis/ReviewTCA2/CurrencyConverter/cross1a.gif\")\n\n # set window properties\n self.title(\"Converter Program\")\n self.configure(bg=\"#ffe8e8\")\n\n # add components\n self.__add_outer_frame()\n self.__add_heading_label()\n self.__add_amount_label()\n self.__add_amount_entry()\n self.__add_default_image_label()\n self.__add_from_label()\n self.__add_from_optionmenu()\n self.__add_to_label()\n self.__add_to_optionmenu()\n self.__add_buttons_frame()\n self.__add_clear_button()\n self.__add_message_box_label()\n self.__add_convert_button()\n \n \n# Outer frame \n def __add_outer_frame(self):\n self.outer_frame = Frame()\n self.outer_frame.grid(row=0, column=0) \n self.outer_frame.configure(bg=\"#ffe8e8\", height=400, width=500, padx=10, pady=10)\n\n def __add_heading_label(self):\n self.heading_label = Label(self.outer_frame)\n self.heading_label.grid(row=0, column=1, columnspan=2) \n self.heading_label.configure(bg=\"#ffe8e8\", font=\"Arial 18\", text=\"Currency Converter:\", padx=54)\n\n def __add_amount_label(self):\n self.instruction_label = Label(self.outer_frame)\n self.instruction_label.grid(row=1, column=0, columnspan=2, sticky=W)\n self.instruction_label.configure(bg=\"#ffe8e8\", font=\"Arial 10\", text=\"Amount\")\n\n#Entry Function\n def __add_amount_entry(self):\n self.amount_entry = Entry(self.outer_frame)\n self.amount_entry.grid(row=2, column=0, columnspan=5, sticky=W)\n self.amount_entry.configure(fg=\"#f00\", width=50)\n self.amount_entry.bind(\"\", self.__amount_keyboard_entry)\n \n# Add image label to Frame with grid\n def __add_default_image_label(self):\n self.default_image_label = Label(self.outer_frame)\n self.default_image_label.grid(row=2, column=4)\n self.default_image_label.configure(image=self.default_image, height=15, width=15) \n\n def __amount_keyboard_entry(self, event):\n response = self.amount_entry.get()\n if response == \"\":\n self.default_image_label.configure(image=self.cross_image)\n else:\n self.default_image_label.configure(image=self.tick_image)\n \n def __add_from_label(self):\n self.from_label = Label(self.outer_frame)\n self.from_label.grid(row=3, column=0, columnspan=2, sticky=W)\n self.from_label.configure(bg=\"#ffe8e8\", font=\"Arial 10\", text=\"From\") \n\n def __add_from_optionmenu(self):\n self.SelectionVar = StringVar()\n self.SelectionVar.set(\"GBP\")\n self.from_optionmenu = Spinbox(self.outer_frame, textvariable=self.SelectionVar, values=(\"GBP\", \"Euros\"))\n self.from_optionmenu.grid(row=4, column=0, columnspan=5, sticky=W)\n self.from_optionmenu.configure(width=50)\n\n def __add_to_label(self):\n self.to_label = Label(self.outer_frame)\n self.to_label.grid(row=5, column=0, columnspan=2, sticky=W)\n self.to_label.configure(bg=\"#ffe8e8\", font=\"Arial 10\", text=\"To\") \n\n def __add_to_optionmenu(self):\n self.Selection2Var = StringVar()\n self.Selection2Var.set(\"Euros\")\n self.to_optionmenu = Spinbox(self.outer_frame, textvariable=self.Selection2Var, values=(\"Euros\", \"USD\"))\n self.to_optionmenu.grid(row=6, column=0, columnspan=5, sticky=W)\n self.to_optionmenu.configure(width=50)\n\n# Button1 Frame\n def __add_buttons_frame(self):\n self.buttons_frame = Frame(self.outer_frame)\n self.buttons_frame.grid(row=7, column=0, columnspan=4, sticky=N+E+S+W)\n self.buttons_frame.configure(bg=\"#ffe8e8\", padx=80, pady=20)\n\n def __add_clear_button(self):\n self.clear_button = Button(self.buttons_frame)\n self.clear_button.pack(side=LEFT)\n self.clear_button.configure(bg=\"#FFF\", text=\"Clear\", width=10)\n self.clear_button.bind(\"\", self.__clear_button_clicked)\n\n def __add_convert_button(self):\n self.convert_button = Button(self.buttons_frame)\n self.convert_button.pack(side=RIGHT)\n self.convert_button.configure(bg=\"#FFF\", text=\"Convert\", width=10) \n self.convert_button.bind(\"\", self.__convert_button_clicked)\n\n def __add_message_box_label(self):\n self.MessageVar = StringVar()\n self.MessageVar.set(\"\\nSystem Message Displayed Here\\n\")\n self.message_box_label = Message(self.outer_frame)\n self.message_box_label.grid(row=8, column=0, columnspan=5, sticky=N+E+S+W, pady=10)\n self.message_box_label.configure(bg=\"#fffbce\", fg=\"blue\", relief=SUNKEN, textvariable=self.MessageVar, width=200)\n\n def __convert_button_clicked(self, event):\n amount = int(self.amount_entry.get())\n result = amount *1.15\n converting = self.__convert_button_clicked\n if converting != \"\":\n self.MessageVar.set(\"\\nConverting...\\n\")\n messagebox.showinfo(\"Output\",\"£\" + str(amount) + \" is \" + format(result, '.2f') + \" euros with a conversion rate of 1.15.\")\n else:\n self.MessageVar.set(\"\\nSystem Message Displayed Here\\n\")\n \n def __clear_button_clicked(self, event):\n clearing = self.__clear_button_clicked\n if clearing != \"\":\n self.MessageVar.set(\"\\nSystem Message Displayed Here\\n\")\n self.amount_entry.delete(0, 'end')\n \n \nif (__name__ == \"__main__\"):\n gui = Gui()\n gui.mainloop()\n","sub_path":"2-guis/ReviewTCA2/CurrencyConverter/b2.py","file_name":"b2.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621841616","text":"import os\nimport glob\nimport json\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom data import visualize\nfrom data import data_utils\n\ndef transforms_to_poses(transforms_file, out_dir):\n with open(transforms_file, 'r') as fp:\n transforms_data = json.load(fp)\n \n frames = transforms_data['frames']\n for frame in frames:\n filename = frame['file_path']\n filename = filename[filename.rfind('/')+1:]\n \n file = out_dir + filename + '.json'\n cam_data = {}\n cam_data['rotation_angle_x'] = frame['rotation']\n cam_data['pose'] = frame['transform_matrix']\n cam_data['width'] = 400\n cam_data['height'] = 400\n cam_data['fov'] = 40.0\n cam_data['farZ'] = 6.0\n cam_data['nearZ'] = 2.0\n\n with open(file, 'w') as fp:\n json.dump(cam_data, fp)\n \n print('Finished separating poses for each frame')\n\ndef project_img_on_sphere(color_file, cam_file, ply_file):\n center = torch.Tensor([0.0, 0.0, 0.0])\n radius = 3.0\n vol_params = ('sphere', center, radius)\n\n camera_angle_x = 0.6911112070083618\n near = 2.0\n far = 6.0\n\n\n colors, points, viewdirs, valid_mask = get_input_data(color_file, cam_file, vol_params, True, camera_angle_x, near, far, 'cartesian', 'cartesian')\n bg_mask = torch.eq(colors[0,...],0) & torch.eq(colors[1,...],0) & torch.eq(colors[2,...],0)\n valid_mask = valid_mask & ~bg_mask\n visualize.project_input_on_bounding_vol(points, colors, valid_mask, ply_file)\n\n\ndef project_imgs_on_sphere(data_dir):\n for f in sorted(glob.glob(data_dir + \"/*.json\")):\n timestamp = f[f.rfind('/')+1:f.rfind('.')]\n cam_file = f\n color_file = os.path.join(data_dir, timestamp+\".png\")\n ply_file = os.path.join(data_dir, timestamp+\".ply\")\n project_img_on_sphere(color_file, cam_file, ply_file)\n \n\ndef read_data_from_disk(img_file, cam_file, half_res, camera_angle_x):\n img = Image.open(img_file)\n if half_res:\n img = img.resize(size=(400, 400), resample=Image.NEAREST)\n \n img = np.array(img).astype(np.float32) / 255.\n\n H, W = img.shape[:2]\n focal = .5 * W / np.tan(.5 * camera_angle_x)\n # if half_res:\n # focal = focal / 2\n\n with open(cam_file, 'r') as fp:\n cam_info = json.load(fp)\n pose = torch.Tensor(cam_info['pose']).float()\n \n img = torch.Tensor(img[...,:3]).permute(2,0,1)\n \n return img, pose, [H, W, focal]\n\n\ndef get_rays(H, W, focal, c2w):\n \"\"\"\n Get ray origins, directions from a pinhole camera.\n rays_o -> tensor of shape [W, H, 3]\n rays_d -> tensor of shape [W, H, 3]\n \"\"\"\n j, i = torch.meshgrid(torch.arange(W, dtype=torch.float32),\n torch.arange(H, dtype=torch.float32))\n dirs = torch.stack([(i-W*.5)/focal, -(j-H*.5)/focal, -torch.ones_like(i)], -1)\n rays_d = torch.sum(dirs[..., None, :] * c2w[:3, :3], -1)\n rays_d = rays_d / torch.norm(rays_d,dim=-1)[...,None]\n rays_o = c2w[:3, -1].expand(rays_d.shape)\n return rays_o, rays_d\n\n\ndef get_input_data(img_file, cam_file, vol_params, half_res, camera_angle_x, near, far, points_type, viewdir_type):\n img, pose, [H, W, focal] = read_data_from_disk(img_file, cam_file, half_res, camera_angle_x)\n\n # Get the ray, viewdirs and 3d points\n rays = get_rays(H, W, focal, pose)\n points, viewdirs, valid_mask = data_utils.generate_input(rays, near, far, vol_params, points_type, viewdir_type)\n \n return img, points, viewdirs, valid_mask\n\n\ndef get_input_samples_on_rays(img_file, cam_file, vol_params, half_res, camera_angle_x, near, far, points_type, viewdir_type, n_samples):\n img, pose, [H, W, focal] = read_data_from_disk(img_file, cam_file, half_res, camera_angle_x)\n\n # Get the ray, viewdirs and 3d points\n rays = get_rays(H, W, focal, pose)\n\n rays_o, rays_d = rays\n rays_o = rays_o[100:300,100:300,:]\n rays_d = rays_d[100:300,100:300,:]\n img = img[:,100:300,100:300]\n rays = [rays_o, rays_d]\n points, z_vals, valid_mask = data_utils.sample_on_rays(rays, near, far, n_samples, vol_params)\n viewdirs = data_utils.get_viewdirs(rays_d, viewdir_type)\n\n dirs_shape = viewdirs.shape\n viewdirs = torch.reshape(viewdirs,[-1,dirs_shape[-1]])\n viewdirs[valid_mask == False] = 0.0\n \n return img, points, viewdirs, valid_mask, z_vals\n","sub_path":"src/data/nerf_helpers.py","file_name":"nerf_helpers.py","file_ext":"py","file_size_in_byte":4339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"130402182","text":"import openmdao.api as om\r\nimport numpy as np\r\nfrom n2_globals import LS, HG_DATA, file_check\r\n\r\nfor i in range(len(LS)):\r\n if not HG_DATA.last:\r\n name = HG_DATA.children[i].name\r\n else:\r\n name = HG_DATA.children[i][0]\r\n f_name = file_check(name)\r\n suf = '\\n\\np = om.Problem()'\\\r\n '\\nmodel = p.model'\\\r\n '\\nmodel.add_subsystem(\"{}\", {}(), promotes=[\"*\"])'\\\r\n '\\np.setup()' \\\r\n '\\nom.n2(p, outfile=\"{}.html\")'.format(name, name, f_name)\r\n string = LS[i] + suf\r\n exec(string)\r\n\r\n# om.n2(p, outfile=\"{}.py\", embeddable = True, show_browser=True)\r\n","sub_path":"src/n2_execute.py","file_name":"n2_execute.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"153764314","text":"# Copyright (C) DATADVANCE, 2010-2020\n\n\"\"\"pSeven typesystem value.\n\nMain class for storing data and properties, used for data serialization\nand validation.\n\"\"\"\n\nimport collections\nimport copy\nimport datetime\nimport json\nimport uuid\n\nimport jsonschema\nimport numpy\n\nfrom . import constants, schema, types\nfrom .utils import ErrorsList\n\n\nclass Value:\n \"\"\"Value object, combined value and properties together, allows lazy\n loading, serialization and validation.\n\n When created from File lazy loading is enabled (if\n supported by underlying type). Native value and hdf_file can not be\n specified together.\n\n Args:\n value: Native python value.\n value_properties: `value` properties.\n hdf_file: opened h5py.File instance.\n hdf_path: (str) path inside HDF5 file\n \"\"\"\n\n NAME = \"Value\"\n VALUE_PROPERTIES = {}\n\n @classmethod\n def update_properties(cls, value, value_properties):\n return value_properties\n\n @classmethod\n def compress_properties(cls, value_properties):\n return value_properties\n\n @staticmethod\n def update_schemas(schemas):\n return schemas\n\n @staticmethod\n def compress_schemas(schemas):\n return schemas\n\n @classmethod\n def to_native(cls, value, properties):\n return value\n\n @classmethod\n def from_native(cls, value, properties):\n return value\n\n def __init__(self, value=None, value_properties=None, hdf_file=None, hdf_path=None):\n value_properties = value_properties or {}\n assert isinstance(value_properties, collections.Mapping)\n assert (hdf_file is None) == (\n hdf_path is None\n ), \"hdf_file and hdf_path must be set together!\"\n\n self._hdf_file = hdf_file\n self._hdf_path = hdf_path\n\n if hdf_file is not None:\n node = hdf_file[hdf_path]\n value_type = node.attrs[\"@type\"]\n from .constants import TYPES\n\n self._type = TYPES[value_type]\n read_properties = json.loads(node.attrs[\"@properties\"])\n self._properties = self._type.update_properties(None, read_properties)\n self._data = self._type.read(hdf_file, hdf_path, self._properties)\n else:\n self._properties = copy.deepcopy(value_properties)\n self._type = resolve_type(value, self.properties)\n self._data = self._type.from_native(value, self.properties)\n\n self._update_and_validate_properties()\n\n def __repr__(self):\n # @TODO switch to reprlib\n return \"Value({!r})\".format(self.native)\n\n @property\n def type(self):\n \"\"\"Value type as type class.\"\"\"\n return self._type\n\n @property\n def native(self):\n \"\"\"Native python value.\"\"\"\n return self._type.to_native(self._data, self.properties)\n\n @native.setter\n def native(self, value):\n \"\"\"Change value without changing type and properties.\n\n Args:\n value: New native value.\n\n \"\"\"\n if resolve_type(value, self.properties) != self._type:\n raise NotImplementedError\n if self._hdf_file is not None:\n if not self._hdf_file:\n raise ValueError(\"File is closed\")\n attr_type = self._hdf_file[self._hdf_path].attrs[\"@type\"]\n attr_props = json.dumps(self.type.compress_properties(self.properties))\n del self._hdf_file[self._hdf_path]\n\n # NOTE: here the new Value won't be bound with the hdf-file\n # so changing its subvalues won't affect the file.\n self._data = self._type.from_native(value, self.properties)\n self._type.write(\n self._hdf_file, self._hdf_path, self._data, self.properties\n )\n self._hdf_file[self._hdf_path].attrs[\"@type\"] = attr_type\n self._hdf_file[self._hdf_path].attrs[\"@properties\"] = attr_props\n\n else:\n self._data = self._type.from_native(value, self.properties)\n\n @property\n def data(self):\n \"\"\"Get type-specific value object with lazy-loading if supported\n by type (may be the same as native value).\n \"\"\"\n return self._data\n\n @property\n def properties(self):\n \"\"\"Value properties.\"\"\"\n return self._properties\n\n @properties.setter\n def properties(self, value: dict):\n \"\"\"Set new properties.\"\"\"\n self._properties = value\n\n def validate(self, value_properties=None, raise_on_error=True):\n \"\"\"Validate value data against properties.\n\n Args:\n value_properties: Properties to validate against, if `None`\n own properties are used.\n raise_on_error: Raise error if any or store all errors in\n list and return them.\n\n Returns:\n Errors list if `raise_on_error` is `False` otherwise throws\n error.\n\n \"\"\"\n errors = ErrorsList(raise_on_error)\n self._validate(value_properties, errors)\n return errors.list()\n\n def _validate(self, value_properties, errors_list):\n value_schema = self._properties[schema.PropertiesKeys.SCHEMA]\n assert value_schema[schema.SchemaKeys.TYPE] == self._type.NAME\n if value_properties is None:\n schemas = [value_schema]\n value_schemas = (\n self._properties.get(schema.PropertiesKeys.SCHEMAS, None) or schemas\n )\n if schemas[0] not in value_schemas:\n errors_list.append(\n TypeError(\"Value schema not found in value schemas list!\")\n )\n else:\n schemas = value_properties.get(schema.PropertiesKeys.SCHEMAS, None)\n if schemas is None:\n schemas = [value_properties[schema.PropertiesKeys.SCHEMA]]\n schemas = [\n s for s in schemas if s[schema.SchemaKeys.TYPE] == self._type.NAME\n ]\n if not schemas:\n errors_list.append(\n TypeError(\"Value type does not match any schema in properties!\")\n )\n\n validation_results = []\n for s in schemas:\n schema_errors = ErrorsList(False)\n self._type.validate(self._data, s, schema_errors)\n if not schema_errors.list():\n validation_results = []\n break\n else:\n validation_results += schema_errors.list()\n for error in validation_results:\n errors_list.append(error)\n\n def write(self, hdf_file, hdf_path=None):\n \"\"\"Write value to the file.\n\n Args:\n file: File to write opened in binary mode and correct\n position must be set.\n\n \"\"\"\n if not hdf_path:\n hdf_path = f\"/{uuid.uuid4()}\"\n self.type.write(hdf_file, hdf_path, self.data, self.properties)\n node = hdf_file[hdf_path]\n node.attrs[\"@type\"] = self.type.NAME\n properties_to_write = self.type.compress_properties(self.properties)\n node.attrs[\"@properties\"] = json.dumps(properties_to_write)\n\n def _update_and_validate_properties(self):\n self._properties = self._type.update_properties(self._data, self.properties)\n if self._type is not Value:\n # validate only non Value types\n jsonschema.validate(self.properties, schema.PROPERTIES_SCHEMA(self._type))\n\n\ndef is_convertable_to_matrix(list_value):\n \"\"\"Check if a list value may be converted to a matrix.\"\"\"\n\n # Scalar types suitable for storing in matrix.\n MATRIX_ITEM_TYPES = (types.integer.Type, types.real.Type)\n\n def check_items_in_a_row(row):\n \"\"\"Check whether all elements in row are of suitable type.\"\"\"\n return all(resolve_type(item) in MATRIX_ITEM_TYPES for item in row)\n\n if not list_value:\n return True\n\n # Assume input is a list of scalars.\n if not isinstance(list_value[0], (list, tuple)):\n return check_items_in_a_row(list_value)\n\n # If input is a list of lists check rectangular shape first.\n item_length = len(list_value[0])\n for item in list_value[1:]:\n if not isinstance(item, (list, tuple)) or len(item) != item_length:\n return False\n if item_length == 0:\n return True\n\n # Check that all items have suitable type.\n return all(map(check_items_in_a_row, list_value))\n\n\ndef resolve_type(value, value_properties=None):\n \"\"\"Get type class for by value and properties.\n\n Args:\n value: Native value.\n value_properties: Value properties.\n\n Return:\n one of types from types module or (special case) Value\n\n\n Raises:\n ValueError - if value's type is not resolved\n NotImplementedError - if value and value_properties mismatch\n\n \"\"\"\n # We nave many types and dividing type checks is meaningless.\n # pylint: disable=too-many-branches\n\n INT_TYPES = (int, numpy.int_, numpy.uint8)\n FLOAT_TYPES = (float, numpy.float_)\n\n value_properties = value_properties or {}\n value_schema = value_properties.get(schema.PropertiesKeys.SCHEMA, {})\n schema_type = value_schema.get(schema.SchemaKeys.TYPE)\n\n if value is None:\n type_name = types.null.Type.NAME\n elif isinstance(value, (bytes, bytearray)):\n type_name = types.binary.Type.NAME\n elif isinstance(value, (bool, numpy.bool_)):\n type_name = types.boolean.Type.NAME\n elif isinstance(value, numpy.ndarray):\n type_name = types.table.Type.NAME\n if value.dtype.names is None or not value.dtype.names:\n if any(\n map(\n lambda t: numpy.issubdtype(value.dtype, numpy.dtype(t)),\n [numpy.int, numpy.float, numpy.uint8, numpy.uint],\n )\n ):\n type_name = types.matrix.Type.NAME\n elif isinstance(value, collections.Mapping):\n type_name = types.dictionary.Type.NAME\n if schema_type == types.structure.Type.NAME:\n type_name = schema_type\n elif isinstance(value, INT_TYPES):\n type_name = types.integer.Type.NAME\n if schema_type == types.real.Type.NAME:\n type_name = schema_type\n elif isinstance(value, FLOAT_TYPES):\n type_name = types.real.Type.NAME\n elif isinstance(value, slice):\n type_name = types.slice.Type.NAME\n elif isinstance(value, str):\n type_name = types.string.Type.NAME\n if schema_type in [\n types.enumeration.Type.NAME,\n types.path.Type.NAME,\n types.timestamp.Type.NAME,\n ]:\n type_name = schema_type\n elif isinstance(value, datetime.datetime):\n type_name = types.timestamp.Type.NAME\n elif isinstance(value, collections.Sequence):\n type_name = types.list.Type.NAME\n if schema_type == types.subset.Type.NAME:\n type_name = schema_type\n elif schema_type == types.matrix.Type.NAME and is_convertable_to_matrix(value):\n type_name = schema_type\n elif isinstance(value, Value):\n # special case\n return Value\n else:\n raise ValueError(f\"Unresolved type '{type(value)}'!\")\n\n if schema_type is not None and type_name != schema_type:\n if type_name == types.null.Type.NAME:\n if value_schema.get(schema.SchemaKeys.NULLABLE, False):\n return constants.TYPES[schema_type]\n raise NotImplementedError(\n f\"Value type and properties mismatch:\"\n f\" value: {value},\"\n f\" type of value: {type_name},\"\n f\" selected type: {schema_type}!\"\n )\n return constants.TYPES[type_name]\n","sub_path":"typesystem/value.py","file_name":"value.py","file_ext":"py","file_size_in_byte":11627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"555120957","text":"\ndef adjust_cr(img, alpha, beta):\n rows, cols, channels = img.shape\n dst = img.copy()\n\n for i in range(rows):\n for j in range(cols):\n for c in range(3):\n color = img[i, j][c] * alpha + beta\n if color > 255: # 防止像素值越界(0~255)\n dst[i, j][c] = 255\n elif color < 0: # 防止像素值越界(0~255)\n dst[i, j][c] = 0\n else:\n dst[i, j][c] = color\n return dst\n","sub_path":"augment/contrast_ratio.py","file_name":"contrast_ratio.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"416725923","text":"\"\"\" Pipelines expressed as dask components\n\"\"\"\n\n__all__ = ['simulate_list_rsexecute_workflow', 'corrupt_list_rsexecute_workflow',\n 'calculate_residual_from_gaintables_rsexecute_workflow',\n 'create_pointing_errors_gaintable_rsexecute_workflow',\n 'create_standard_mid_simulation_rsexecute_workflow',\n 'create_surface_errors_gaintable_rsexecute_workflow']\n\nimport logging\n\nimport numpy\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord, EarthLocation\n\nfrom rascil.data_models.memory_data_models import Visibility, SkyModel\nfrom rascil.data_models.polarisation import PolarisationFrame\n\nfrom rascil.processing_components.image.operations import create_empty_image_like\nfrom rascil.processing_components.visibility import copy_visibility\nfrom rascil.processing_components.simulation.simulation_helpers import plot_pointingtable, plot_gaintable\nfrom rascil.processing_components.simulation.simulation_helpers import find_times_above_elevation_limit\nfrom rascil.processing_components.util.coordinate_support import hadec_to_azel\nfrom rascil.processing_components.calibration import apply_gaintable, create_gaintable_from_blockvisibility\nfrom rascil.workflows.rsexecute.execution_support.rsexecute import rsexecute\nfrom rascil.processing_components.simulation import simulate_gaintable\nfrom rascil.processing_components.simulation import create_named_configuration\nfrom rascil.processing_components.visibility import create_blockvisibility, create_visibility\nfrom rascil.processing_components.visibility import convert_blockvisibility_to_visibility, \\\n convert_visibility_to_blockvisibility\nfrom rascil.processing_components.calibration.pointing import create_pointingtable_from_blockvisibility\nfrom rascil.processing_components.image import import_image_from_fits\nfrom rascil.processing_components.simulation.pointing import simulate_gaintable_from_pointingtable\nfrom rascil.processing_components.simulation.pointing import simulate_pointingtable, simulate_pointingtable_from_timeseries\nfrom rascil.processing_components.simulation import create_configuration_from_MIDfile\nfrom rascil.workflows.rsexecute.imaging.imaging_rsexecute import invert_list_rsexecute_workflow\nfrom rascil.workflows.rsexecute.skymodel.skymodel_rsexecute import predict_skymodel_list_compsonly_rsexecute_workflow\n\nlog = logging.getLogger(__name__)\n\n\ndef simulate_list_rsexecute_workflow(config='LOWBD2',\n phasecentre=SkyCoord(ra=+15.0 * u.deg, dec=-60.0 * u.deg, frame='icrs',\n equinox='J2000'),\n frequency=None, channel_bandwidth=None, times=None,\n polarisation_frame=PolarisationFrame(\"stokesI\"), order='frequency',\n format='blockvis',\n rmax=1000.0,\n zerow=False):\n \"\"\" A component to simulate an observation\n\n The simulation step can generate a single BlockVisibility or a list of BlockVisibility's.\n The parameter keyword determines the way that the list is constructed.\n If order='frequency' then len(frequency) BlockVisibility's with all times are created.\n If order='time' then len(times) BlockVisibility's with all frequencies are created.\n If order = 'both' then len(times) * len(times) BlockVisibility's are created each with\n a single time and frequency. If order = None then all data are created in one BlockVisibility.\n\n The output format can be either 'blockvis' (for calibration) or 'vis' (for imaging)\n\n :param config: Name of configuration: def LOWBDS-CORE\n :param phasecentre: Phase centre def: SkyCoord(ra=+15.0 * u.deg, dec=-60.0 * u.deg, frame='icrs', equinox='J2000')\n :param frequency: def [1e8]\n :param channel_bandwidth: def [1e6]\n :param times: Observing times in radians: def [0.0]\n :param polarisation_frame: def PolarisationFrame(\"stokesI\")\n :param order: 'time' or 'frequency' or 'both' or None: def 'frequency'\n :param format: 'blockvis' or 'vis': def 'blockvis'\n :return: vis_list with different frequencies in different elements\n \"\"\"\n if format == 'vis':\n create_vis = create_visibility\n else:\n create_vis = create_blockvisibility\n \n if times is None:\n times = [0.0]\n if channel_bandwidth is None:\n channel_bandwidth = [1e6]\n if frequency is None:\n frequency = [1e8]\n conf = create_named_configuration(config, rmax=rmax)\n \n if order == 'time':\n log.debug(\"simulate_list_rsexecute_workflow: Simulating distribution in %s\" % order)\n vis_list = list()\n for i, time in enumerate(times):\n vis_list.append(rsexecute.execute(create_vis, nout=1)(conf, numpy.array([times[i]]),\n frequency=frequency,\n channel_bandwidth=channel_bandwidth,\n weight=1.0, phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n zerow=zerow))\n \n elif order == 'frequency':\n log.debug(\"simulate_list_rsexecute_workflow: Simulating distribution in %s\" % order)\n vis_list = list()\n for j, _ in enumerate(frequency):\n vis_list.append(rsexecute.execute(create_vis, nout=1)(conf, times,\n frequency=numpy.array([frequency[j]]),\n channel_bandwidth=numpy.array(\n [channel_bandwidth[j]]),\n weight=1.0, phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n zerow=zerow))\n \n elif order == 'both':\n log.debug(\"simulate_list_rsexecute_workflow: Simulating distribution in time and frequency\")\n vis_list = list()\n for i, _ in enumerate(times):\n for j, _ in enumerate(frequency):\n vis_list.append(rsexecute.execute(create_vis, nout=1)(conf, numpy.array([times[i]]),\n frequency=numpy.array([frequency[j]]),\n channel_bandwidth=numpy.array(\n [channel_bandwidth[j]]),\n weight=1.0, phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n zerow=zerow))\n \n elif order is None:\n log.debug(\"simulate_list_rsexecute_workflow: Simulating into single %s\" % format)\n vis_list = list()\n vis_list.append(rsexecute.execute(create_vis, nout=1)(conf, times, frequency=frequency,\n channel_bandwidth=channel_bandwidth,\n weight=1.0, phasecentre=phasecentre,\n polarisation_frame=polarisation_frame,\n zerow=zerow))\n else:\n raise NotImplementedError(\"order $s not known\" % order)\n return vis_list\n\ndef corrupt_list_rsexecute_workflow(vis_list, gt_list=None, seed=None, **kwargs):\n \"\"\" Create a graph to apply gain errors to a vis_list\n\n :param vis_list:\n :param gt_list: Optional gain table graph\n :param kwargs:\n :return:\n \"\"\"\n \n def corrupt_vis(vis, gt, **kwargs):\n if isinstance(vis, Visibility):\n bv = convert_visibility_to_blockvisibility(vis)\n else:\n bv = vis\n if gt is None:\n gt = create_gaintable_from_blockvisibility(bv, **kwargs)\n gt = simulate_gaintable(gt, **kwargs)\n bv = apply_gaintable(bv, gt)\n \n if isinstance(vis, Visibility):\n return convert_blockvisibility_to_visibility(bv)\n else:\n return bv\n \n if gt_list is None:\n return [rsexecute.execute(corrupt_vis, nout=1)(vis_list[ivis], None, **kwargs)\n for ivis, v in enumerate(vis_list)]\n else:\n return [rsexecute.execute(corrupt_vis, nout=1)(vis_list[ivis], gt_list[ivis], **kwargs)\n for ivis, v in enumerate(vis_list)]\n\n\ndef calculate_residual_from_gaintables_rsexecute_workflow(sub_bvis_list, sub_components, sub_model_list,\n no_error_gt_list, error_gt_list):\n \"\"\"Calculate residual image corresponding to a set of gaintables\n\n The visibility difference for a set of components for error and no error gaintables\n are calculated and the residual images constructed\n\n :param sub_bvis_list:\n :param sub_components:\n :param sub_model_list:\n :param no_error_gt_list:\n :param error_gt_list:\n :return:\n \"\"\"\n error_sm_list = [[\n rsexecute.execute(SkyModel, nout=1)(components=[sub_components[i]], gaintable=error_gt_list[ibv][i])\n for i, _ in enumerate(sub_components)] for ibv, bv in enumerate(sub_bvis_list)]\n \n no_error_sm_list = [[\n rsexecute.execute(SkyModel, nout=1)(components=[sub_components[i]], gaintable=no_error_gt_list[ibv][i])\n for i, _ in enumerate(sub_components)] for ibv, bv in enumerate(sub_bvis_list)]\n \n # Predict each visibility for each skymodel. We keep all the visibilities separate\n # and add up dirty images at the end of processing. We calibrate which applies the voltage pattern\n no_error_bvis_list = [rsexecute.execute(copy_visibility, nout=1)(bvis, zero=True) for bvis in sub_bvis_list]\n no_error_bvis_list = [\n predict_skymodel_list_compsonly_rsexecute_workflow(no_error_bvis_list[ibv], no_error_sm_list[ibv],\n context='2d', docal=True)\n for ibv, bvis in enumerate(no_error_bvis_list)]\n \n error_bvis_list = [rsexecute.execute(copy_visibility, nout=1)(bvis, zero=True) for bvis in sub_bvis_list]\n error_bvis_list = [predict_skymodel_list_compsonly_rsexecute_workflow(error_bvis_list[ibv], error_sm_list[ibv],\n context='2d', docal=True)\n for ibv, bvis in enumerate(error_bvis_list)]\n \n # Inner nest is bvis per skymodels, outer is over vis's. Calculate residual visibility\n def subtract_vis_convert(error_bvis, no_error_bvis):\n error_bvis.data['vis'] = error_bvis.data['vis'] - no_error_bvis.data['vis']\n error_vis = convert_blockvisibility_to_visibility(error_bvis)\n return error_vis\n \n error_vis_list = [[rsexecute.execute(subtract_vis_convert)(error_bvis_list[ibvis][icomp],\n no_error_bvis_list[ibvis][icomp])\n for icomp, _ in enumerate(sub_components)]\n for ibvis, _ in enumerate(error_bvis_list)]\n \n # Now for each visibility/component, we make the component dirty images. We just add these\n # component dirty images since the weights should be the same\n def sum_images(images):\n sum_image = create_empty_image_like(images[0][0])\n for im in images:\n sum_image.data += im[0].data\n return sum_image, images[0][1]\n \n dirty_list = list()\n for vis in error_vis_list:\n result = invert_list_rsexecute_workflow(vis, sub_model_list, '2d')\n dirty_list.append(rsexecute.execute(sum_images)(result))\n \n return dirty_list\n\ndef create_pointing_errors_gaintable_rsexecute_workflow(sub_bvis_list, sub_components, sub_vp_list,\n use_radec=False, pointing_error=0.0, static_pointing_error=None,\n global_pointing_error=None, time_series='', time_series_type='',\n seed=None, pointing_directory=None, show=False, basename=''):\n if global_pointing_error is None:\n global_pointing_error = [0.0, 0.0]\n \n # One pointing table per visibility\n \n error_pt_list = [rsexecute.execute(create_pointingtable_from_blockvisibility)(bvis) for bvis in sub_bvis_list]\n no_error_pt_list = [rsexecute.execute(create_pointingtable_from_blockvisibility)(bvis) for bvis in sub_bvis_list]\n \n if time_series is '':\n error_pt_list = [rsexecute.execute(simulate_pointingtable)(pt, pointing_error=pointing_error,\n static_pointing_error=static_pointing_error,\n global_pointing_error=global_pointing_error,\n seed=seed)\n for ipt, pt in enumerate(error_pt_list)]\n else:\n error_pt_list = [rsexecute.execute(simulate_pointingtable_from_timeseries)(pt, type=time_series,\n time_series_type=time_series_type,\n pointing_directory=pointing_directory,\n seed=seed)\n for ipt, pt in enumerate(error_pt_list)]\n \n if show:\n tmp_error_pt_list = rsexecute.compute(error_pt_list, sync=True)\n if time_series != \"\":\n plot_file = 'pointing_error_%s.png' % (time_series_type)\n else:\n r2s = 180 * 3600.0 / numpy.pi\n plot_file = 'pointing_error_dynamic_%.2f_static_(%.2f,%.2f)_global_(%.2f,%.2f).png' % \\\n (r2s * pointing_error, r2s * static_pointing_error[0], r2s * static_pointing_error[1],\n r2s * global_pointing_error[0], r2s * global_pointing_error[1])\n \n plot_pointingtable(tmp_error_pt_list, plot_file=plot_file, title=basename)\n \n # Create the gain tables, one per Visibility and per component\n no_error_gt_list = [rsexecute.execute(simulate_gaintable_from_pointingtable)\n (bvis, sub_components, no_error_pt_list[ibv], sub_vp_list[ibv], use_radec=use_radec)\n for ibv, bvis in enumerate(sub_bvis_list)]\n error_gt_list = [rsexecute.execute(simulate_gaintable_from_pointingtable)\n (bvis, sub_components, error_pt_list[ibv], sub_vp_list[ibv], use_radec=use_radec)\n for ibv, bvis in enumerate(sub_bvis_list)]\n if show:\n tmp_gt_list = rsexecute.compute(error_gt_list, sync=True)\n \n if time_series_type != \"\":\n plot_file = 'gaintable_%s.png' % time_series_type\n else:\n r2s = 180 * 3600.0 / numpy.pi\n plot_file = 'gaintable_dynamic_%.2f_static_(%.2f,%.2f)_global_(%.2f,%.2f).png' % \\\n (r2s * pointing_error, r2s * static_pointing_error[0], r2s * static_pointing_error[1],\n r2s * global_pointing_error[0], r2s * global_pointing_error[1])\n \n plot_gaintable(tmp_gt_list, title=\"%s: dish 0 amplitude gain, %s\" % (basename, time_series_type),\n plot_file=plot_file)\n \n return no_error_gt_list, error_gt_list\n\ndef create_surface_errors_gaintable_rsexecute_workflow(band, sub_bvis_list, sub_components, vp_directory, use_radec=False,\n elevation_sampling=5.0, show=False, basename=''):\n def get_band_vp(band, el):\n \n if band == 'B1':\n vpa = import_image_from_fits('%s/B1_%d_0565_real_interpolated.fits' % (vp_directory, int(el)))\n vpa_imag = import_image_from_fits('%s/B1_%d_0565_imag_interpolated.fits' % (vp_directory, int(el)))\n elif band == 'B2':\n vpa = import_image_from_fits('%s/B2_%d_1360_real_interpolated.fits' % (vp_directory, int(el)))\n vpa_imag = import_image_from_fits('%s/B2_%d_1360_imag_interpolated.fits' % (vp_directory, int(el)))\n elif band == 'Ku':\n vpa = import_image_from_fits('%s/Ku_%d_11700_real_interpolated.fits' % (vp_directory, int(el)))\n vpa_imag = import_image_from_fits('%s/Ku_%d_11700_imag_interpolated.fits' % (vp_directory, int(el)))\n else:\n raise ValueError(\"Unknown band %s\" % band)\n \n vpa.data = vpa.data + 1j * vpa_imag.data\n return vpa\n \n def find_vp(band, vis):\n ha = numpy.pi * numpy.average(vis.time) / 43200.0\n dec = vis.phasecentre.dec.rad\n latitude = vis.configuration.location.lat.rad\n az, el = hadec_to_azel(ha, dec, latitude)\n \n el_deg = el * 180.0 / numpy.pi\n el_table = max(0.0,\n min(90.1, elevation_sampling * ((el_deg + elevation_sampling / 2.0) // elevation_sampling)))\n return get_band_vp(band, el_table)\n \n def find_vp_nominal(band):\n el_nominal_deg = 45.0\n return get_band_vp(band, el_nominal_deg)\n \n error_pt_list = [rsexecute.execute(create_pointingtable_from_blockvisibility)(bvis) for bvis in sub_bvis_list]\n no_error_pt_list = [rsexecute.execute(create_pointingtable_from_blockvisibility)(bvis) for bvis in sub_bvis_list]\n \n vp_nominal_list = [rsexecute.execute(find_vp_nominal)(band) for bv in sub_bvis_list]\n vp_actual_list = [rsexecute.execute(find_vp)(band, bv) for bv in sub_bvis_list]\n \n # Create the gain tables, one per Visibility and per component\n no_error_gt_list = [rsexecute.execute(simulate_gaintable_from_pointingtable)\n (bvis, sub_components, no_error_pt_list[ibv], vp_nominal_list[ibv], use_radec=use_radec)\n for ibv, bvis in enumerate(sub_bvis_list)]\n error_gt_list = [rsexecute.execute(simulate_gaintable_from_pointingtable)\n (bvis, sub_components, error_pt_list[ibv], vp_actual_list[ibv], use_radec=use_radec)\n for ibv, bvis in enumerate(sub_bvis_list)]\n if show:\n plot_file = 'gaintable.png'\n tmp_gt_list = rsexecute.compute(error_gt_list, sync=True)\n plot_gaintable(tmp_gt_list, plot_file=plot_file, title=basename)\n \n return no_error_gt_list, error_gt_list\n\ndef create_standard_mid_simulation_rsexecute_workflow(band, rmax, phasecentre, time_range, time_chunk, integration_time,\n shared_directory):\n \"\"\" Create the standard MID simulation\n \n :param band:\n :param rmax:\n :param ra:\n :param declination:\n :param time_range:\n :param time_chunk:\n :param integration_time:\n :param shared_directory:\n :return:\n \"\"\"\n \n # Set up details of simulated observation\n if band == 'B1':\n frequency = [0.765e9]\n elif band == 'B2':\n frequency = [1.36e9]\n elif band == 'Ku':\n frequency = [12.179e9]\n else:\n raise ValueError(\"Unknown band %s\" % band)\n \n channel_bandwidth = [1e7]\n mid_location = EarthLocation(lon=\"21.443803\", lat=\"-30.712925\", height=0.0)\n \n # Do each time_chunk in parallel\n start_times = numpy.arange(time_range[0] * 3600, time_range[1] * 3600, time_chunk)\n end_times = start_times + time_chunk\n \n start_times = find_times_above_elevation_limit(start_times, end_times, location=mid_location,\n phasecentre=phasecentre, elevation_limit=15.0)\n times = [numpy.arange(start_times[itime], end_times[itime], integration_time) for itime in\n range(len(start_times))]\n \n s2r = numpy.pi / (12.0 * 3600)\n rtimes = s2r * numpy.array(times)\n ntimes = len(rtimes.flat)\n nchunks = len(start_times)\n \n assert ntimes > 0, \"No data above elevation limit\"\n \n #print('%d integrations of duration %.1f s processed in %d chunks' % (ntimes, integration_time, nchunks))\n \n mid = create_configuration_from_MIDfile('%s/ska1mid_local.cfg' % shared_directory, rmax=rmax,\n location=mid_location)\n \n bvis_graph = [rsexecute.execute(create_blockvisibility)(mid, rtimes[itime], frequency=frequency,\n channel_bandwidth=channel_bandwidth, weight=1.0,\n phasecentre=phasecentre,\n polarisation_frame=PolarisationFrame(\"stokesI\"),\n zerow=True)\n for itime in range(nchunks)]\n \n return bvis_graph\n","sub_path":"rascil/workflows/rsexecute/simulation/simulation_rsexecute.py","file_name":"simulation_rsexecute.py","file_ext":"py","file_size_in_byte":21368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"10216239","text":"\"\"\"\nA simple instantiation of the PowerMate class to print the actions of the USB\ndevice as they happen. The methods also return :class:`.LedEvent` objects to\nlight the bottom of the Powermate when pressed and turn it off when released\n\"\"\"\nimport asyncio\nfrom powermate import Event, PowerMateBase\n\n\nclass SimplePowerMate(PowerMateBase):\n\n def on_start(self):\n \"\"\"\n Run once when the call loop is instantiated\n \"\"\"\n print('Starting to watch the PowerMate')\n\n def on_exit(self):\n \"\"\"\n Run when the event loop is done running\n \"\"\"\n print('Done watching the PowerMate')\n\n @asyncio.coroutine\n def rotated(self, val, pressed):\n \"\"\"\n Run when the PowerMate is rotated\n \"\"\"\n if pressed:\n print(\"PowerMate has been pressed and rotated {} counts ...\"\n \"\".format(val))\n else:\n print(\"PowerMate has been rotated {} counts ...\"\n \"\".format(val))\n\n @asyncio.coroutine\n def pressed(self):\n \"\"\"\n Run when the PowerMate is pressed\n \"\"\"\n print(\"PowerMate has been pressed\")\n return self.illuminate(100)\n\n @asyncio.coroutine\n def released(self, elapsed, rotated):\n \"\"\"\n Run when the PowerMate is released\n \"\"\"\n super().released(elapsed)\n print(\"PowerMate has been released after {} ms\".format(elapsed))\n if rotated:\n print(\"The PowerMate was rotated during this time\")\n if elapsed > 2500:\n return Event.stop()\n else:\n return self.illuminate(0)\n","sub_path":"examples/simple.py","file_name":"simple.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"160722644","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nmnist = input_data.read_data_sets('MNIST_data', one_hot=True)\nsess = tf.InteractiveSession()\n\n# x = input variable, y = output variable, W = Weight variable, b = Bias\n\nx = tf.placeholder(tf.float32, shape=[None, 784])\ny = tf.placeholder(tf.float32, shape=[None, 10])\n\n# in here, since we are using simple gradient descend method,\n# We do not initialize by Xavier initialization or etc.\n\nW = tf.Variable(tf.zeros([784, 10]))\nb = tf.Variable(tf.zeros([10]))\n\n\ny_result = tf.matmul(x, W) + b\n\ncross_entropy = tf.reduce_mean(\ntf.nn.softmax_cross_entropy_with_logits(labels=y, logits=y_result))\n\ntrain_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)\n\n\nsess.run(tf.global_variables_initializer())\n\ntraining = 2000\nbatch_size = 100\n\nfor i in range(training):\n batch = mnist.train.next_batch(batch_size)\n train_step.run(feed_dict={x: batch[0], y:batch[1]})\n print('train num :', '%d' % (i+1))\n\ncorrect_prediction = tf.equal(tf.argmax(y_result, 1), tf.argmax(y, 1))\n\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nprint(accuracy.eval(feed_dict = {x: mnist.test.images, y:mnist.test.labels}))\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"147307850","text":"# Image 3\nimport turtle\nfrom colorsys import *\n\ndef draw_star(t,l,n):\n for __ in range(n):\n a = 180.0 - (180.0 / n)\n t.forward(l)\n t.right(a)\n t.forward(l)\n\nscr = turtle.Screen()\nscr.setup()\n\nturtle.delay(0)\n\nt = list()\nfor x in range(4):\n t.append(turtle.Turtle())\n t[x].width(1)\n t[x].speed(0)\n t[x].penup()\n t[x].seth(45)\n t[x].left(90*x)\n t[x].forward(50)\n t[x].pendown()\n t[x].ht()\n \n h = .4\n \n for i in range(10):\n t[x].pencolor(hls_to_rgb(h,0.45,0.7))\n draw_star(t[x],40,9)\n h += .14\n t[x].penup()\n t[x].forward(30)\n t[x].right(15)\n t[x].pendown()\n","sub_path":"Image3/image3.py","file_name":"image3.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"385842628","text":"import torch\nimport numpy as np\nimport pickle\nimport os\nimport torchvision\nimport argparse\nfrom random import shuffle\ncpath = os.path.dirname(__file__)\n\nSAVE = True\nnp.random.seed(6)\n\n\ndef read_options():\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--dataset',\n help='name of dataset (mnist, cifar10);',\n type=str,\n default='mnist')\n parser.add_argument('--use_1d_feature',\n action='store_true',\n default=False,\n help='represent the image data by 1d feature')\n parser.add_argument('--imbalance_ratio',\n help='imbalance ratio from 0 to 1;',\n type=float,\n default=0.1)\n parser.add_argument('--num_client',\n help='number of clients',\n default=100,\n type=int)\n\n parsed = parser.parse_args()\n options = parsed.__dict__\n\n return options\n\n\nclass ImageDataset(object):\n def __init__(self, images, labels, options, normalize=False):\n if isinstance(images, torch.Tensor):\n if options['use_1d_feature']:\n self.data = images.view(-1, 784).numpy()/255\n else:\n self.data = images.numpy()\n else:\n self.data = images\n if normalize and options['use_1d_feature']:\n mu = np.mean(self.data.astype(np.float32), 0)\n sigma = np.std(self.data.astype(np.float32), 0)\n self.data = (self.data.astype(np.float32) - mu) / (sigma + 0.001)\n if not isinstance(labels, np.ndarray):\n labels = np.array(labels)\n self.target = labels\n\n def __len__(self):\n return len(self.target)\n\n\ndef data_split_imbalance(data, num_split, imbalance_raio, random=True):\n\n if random:\n np.random.shuffle(data)\n data_num = len(data)\n min_data_num = round(data_num * imbalance_raio / (imbalance_raio + 1))\n min_data = data[:min_data_num]\n maj_data = data[min_data_num:]\n\n delta, r = len(maj_data) // num_split, len(maj_data) % num_split\n maj_data_lst = []\n i, used_r = 0, 0\n while i < len(maj_data):\n if used_r < r:\n maj_data_lst.append(maj_data[i:i+delta+1])\n i += delta + 1\n used_r += 1\n else:\n maj_data_lst.append(maj_data[i:i+delta])\n i += delta\n\n delta, r = len(min_data) // num_split, len(min_data) % num_split\n min_data_lst = []\n i, used_r = 0, 0\n while i < len(min_data):\n if used_r < r:\n min_data_lst.append(min_data[i:i+delta+1])\n i += delta + 1\n used_r += 1\n else:\n min_data_lst.append(min_data[i:i+delta])\n i += delta\n\n return [maj_data_lst, min_data_lst]\n\n\ndef choose_two_digit_imbalance(num_class, num_split):\n split_list = []\n for i in range(num_split):\n while 1:\n x = [i for i in range(num_class)]\n shuffle(x)\n y = [i for i in range(num_class)]\n shuffle(y)\n if sum(np.array(x) == np.array(y)) == 0:\n break\n for xx, yy in zip(x, y):\n split_list.append([xx, yy])\n\n return split_list\n\n\ndef main():\n\n options = read_options()\n dataset_file_path = os.path.join(cpath, options['dataset'] + '/data')\n\n # Get data, normalize, and divide by level\n if options['dataset'] == 'mnist':\n print('>>> Get MNIST data.')\n trainset = torchvision.datasets.MNIST(dataset_file_path, download=True, train=True)\n testset = torchvision.datasets.MNIST(dataset_file_path, download=True, train=False)\n elif options['dataset'] == 'cifar10':\n print('>>> Get CIFAR10 data.')\n trainset = torchvision.datasets.CIFAR10(dataset_file_path, download=True, train=True)\n testset = torchvision.datasets.CIFAR10(dataset_file_path, download=True, train=False)\n else:\n raise Exception('Unknown dataset.')\n\n train = ImageDataset(trainset.data, trainset.targets, options)\n test = ImageDataset(testset.data, testset.targets, options)\n\n num_class = len(np.unique(train.target))\n assert(options['num_client'] % num_class == 0)\n num_split = int(options['num_client'] / num_class)\n traindata = []\n for number in range(num_class):\n idx = train.target == number\n traindata.append(train.data[idx])\n\n split_traindata = [] # num_class x 2 (maj and min) x num_split x num_data x num_feature\n for digit in traindata:\n split_traindata.append(data_split_imbalance(digit, num_split, options['imbalance_ratio']))\n\n testdata = []\n for number in range(num_class):\n idx = test.target == number\n testdata.append(test.data[idx])\n split_testdata = []\n for digit in testdata:\n split_testdata.append(data_split_imbalance(digit, num_split, options['imbalance_ratio']))\n\n data_distribution = np.array([len(v) for v in traindata])\n data_distribution = np.round(data_distribution / data_distribution.sum(), 3)\n print('>>> Train Number distribution: {}'.format(data_distribution.tolist()))\n\n maj_digit_count = np.array([len(v[0]) for v in split_traindata])\n min_digit_count = np.array([len(v[1]) for v in split_traindata])\n print('>>> Each digit in train data is split into majority class {} and minority class {}'.\n format(maj_digit_count.tolist(), min_digit_count.tolist()))\n\n maj_digit_count = np.array([len(v[0]) for v in split_testdata])\n min_digit_count = np.array([len(v[1]) for v in split_testdata])\n print('>>> Each digit in test data is split into majority class {} and minority class {}'.\n format(maj_digit_count.tolist(), min_digit_count.tolist()))\n\n # Assign train samples to each user\n train_X = [[] for _ in range(options['num_client'])]\n train_y = [[] for _ in range(options['num_client'])]\n val_X = [[] for _ in range(options['num_client'])]\n val_y = [[] for _ in range(options['num_client'])]\n test_X = [[] for _ in range(options['num_client'])]\n test_y = [[] for _ in range(options['num_client'])]\n\n split_list = choose_two_digit_imbalance(num_class, num_split)\n\n for user in range(options['num_client']):\n print(user, [[len(v[0]) for v in split_traindata], [len(v[1]) for v in split_traindata]])\n chosen_classes = split_list[user]\n train_size = [len(split_traindata[chosen_classes[0]][0][-1]), len(split_traindata[chosen_classes[1]][1][-1])]\n for i, d in enumerate(chosen_classes):\n tmp_data = split_traindata[d][i].pop().tolist()\n train_X[user] += tmp_data\n train_y[user] += (d * np.ones(train_size[i])).tolist()\n val_X[user] += tmp_data[:train_size[1]] # use the minority class size\n val_y[user] += (d * np.ones(train_size[1])).tolist()\n\n test_size = len(split_testdata[d][i][-1])\n test_X[user] += split_testdata[d][i].pop().tolist()\n test_y[user] += (d * np.ones(test_size)).tolist()\n\n image = 1 if not options['use_1d_feature'] else 0\n train_path = '{}/data/train/all_data_{}_imbalance.pkl'.format(os.path.join(cpath, options['dataset']), image)\n val_path = '{}/data/validate/all_data_{}_imbalance.pkl'.format(os.path.join(cpath, options['dataset']), image)\n test_path = '{}/data/test/all_data_{}_imbalance.pkl'.format(os.path.join(cpath, options['dataset']), image)\n\n dir_path = os.path.dirname(train_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n dir_path = os.path.dirname(val_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n dir_path = os.path.dirname(test_path)\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)\n\n # Create data structure\n train_data = {'users': [], 'user_data': {}, 'num_samples': []}\n val_data = {'users': [], 'user_data': {}, 'num_samples': []}\n test_data = {'users': [], 'user_data': {}, 'num_samples': []}\n\n # Setup 1000 users\n for i in range(options['num_client']):\n uname = i\n\n train_data['users'].append(uname)\n train_data['user_data'][uname] = {'x': train_X[i], 'y': train_y[i]}\n train_data['num_samples'].append(len(train_X[i]))\n\n val_data['users'].append(uname)\n val_data['user_data'][uname] = {'x': val_X[i], 'y': val_y[i]}\n val_data['num_samples'].append(len(val_X[i]))\n\n test_data['users'].append(uname)\n test_data['user_data'][uname] = {'x': test_X[i], 'y': test_y[i]}\n test_data['num_samples'].append(len(test_X[i]))\n\n print('>>> User data distribution: {}'.format(train_data['num_samples']))\n print('>>> Total training size: {}'.format(sum(train_data['num_samples'])))\n print('>>> Total validation size: {}'.format(sum(val_data['num_samples'])))\n print('>>> Total testing size: {}'.format(sum(test_data['num_samples'])))\n\n # Save user data\n if SAVE:\n with open(train_path, 'wb') as outfile:\n pickle.dump(train_data, outfile)\n with open(val_path, 'wb') as outfile:\n pickle.dump(val_data, outfile)\n with open(test_path, 'wb') as outfile:\n pickle.dump(test_data, outfile)\n\n print('>>> Save data.')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"data/generate_imbalance.py","file_name":"generate_imbalance.py","file_ext":"py","file_size_in_byte":9342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"165786088","text":"# -*- coding: utf-8 -*-\nfrom project import app\nfrom project import db\nfrom datetime import datetime\nfrom collections import OrderedDict\nfrom sqlalchemy import func\nfrom project.models import Users, Books, Devices, Log, BooksUsers, UsersUsers\nfrom flask import render_template, request, jsonify, send_file, session\nfrom flask.ext.wtf import Form, TextField, validators\n\n\nclass CreateForm(Form):\n text = TextField(u'Text:', [validators.Length(min=1, max=20)])\n\n\n#### controllers\n\n@app.route('/test')\ndef start():\n return render_template('main/index.html')\n\n\nclass UserPwd(db.Model):\n __tablename__ = 'user_pwd'\n name = db.Column(db.String(30), primary_key=True)\n passw = db.Column(db.String(32), unique=False)\n\n def __init__(self, name, passw):\n self.name = name\n self.passw = passw\n\n def __repr__(self):\n return '' % self.name\n\n\n# @app.route('/', methods=['POST', 'GET'])\n# def hello_world():\n# content = request.get_json()\n# res = jsonify(device_id=content['device_id'])\n# print('device_id ' + str(content['device_id']))\n# print('progress ' + str(content['progress']))\n# # print('scroll_y ' + str(content['scroll_y']))\n# # print('screen height ' + str(content['height']))\n# # print('total_height' + str(content['total_height']))\n# print('book_name ' + str(content['book_name']))\n# print('text_length ' + str(content['text_length']))\n# user = UserPwd.query.limit(1).all()\n# print user\n# return res\n\n@app.route('/')\ndef index():\n books = get_monthly_history()\n user = Users.Users.query.get(session['user_id'])\n\n return render_template('main/index.html', user=user, books=books)\n\n\n@app.route('/login')\ndef login():\n session['user_id'] = 1\n return 'OK'\n\n\n@app.route('/get_image//')\ndef get_image(category, filename):\n if category == 'user':\n filename = 'uploads/userpics/' + filename\n\n if category == 'book':\n filename = 'uploads/books/' + filename\n\n return send_file(filename, mimetype='image/jpeg,image/png')\n\n#### End of controllers\n\n\ndef get_monthly_history():\n \"\"\"\n gets monthly history of reading for current user\n returns dictionary:\n dict('month-year'=[dict(Books.Books), ...], ...)\n \"\"\"\n\n date = subtract_years(datetime.now(), 1)\n\n min_progress = 5\n max_progress = 98\n\n current_user_books = Log.Log.query\\\n .filter(Log.Log.device_id == session['user_id'], Log.Log.datetime >= date,\n Log.Log.progress <= 10)\\\n .group_by(Log.Log.book_id)\\\n .order_by(Log.Log.datetime.desc())\\\n .all()\n\n subquery_start = (db.session.query(\n Log.Log.book_id,\n Log.Log.datetime.label('start')\n ).filter(Log.Log.progress <= min_progress)\n .group_by(Log.Log.book_id).order_by(Log.Log.progress)\n ).subquery()\n\n subquery_finish = (db.session.query(\n Log.Log.book_id,\n Log.Log.datetime.label('finish')\n ).filter(Log.Log.progress >= max_progress)\n .group_by(Log.Log.book_id).order_by(Log.Log.progress.desc())\n ).subquery()\n\n subquery_progress = (db.session.query(\n Log.Log.book_id,\n (func.max(Log.Log.progress).label('progress'))\n ).group_by(Log.Log.book_id)\n ).subquery()\n\n detailed_books_list = OrderedDict()\n for book in current_user_books:\n\n\n book = (db.session.query(Books.Books,\n subquery_start.c.start,\n subquery_finish.c.finish,\n subquery_progress.c.progress)\n .outerjoin(subquery_start, (Books.Books.book_id == subquery_start.c.book_id))\n .outerjoin(subquery_finish, (Books.Books.book_id == subquery_finish.c.book_id))\n .outerjoin(subquery_progress, (Books.Books.book_id == subquery_progress.c.book_id))\n .filter(Books.Books.book_id == book.book_id)).first()\n print(book)\n book.progress = int(book.progress)\n if book.start:\n key = book.start.strftime(\"%B, %Y\")\n book.start = book.start.strftime('%d %B').decode('1251')\n\n if book.finish:\n book.finish = book.finish.strftime('%d %B').decode('1251')\n\n if key not in detailed_books_list:\n detailed_books_list[key] = []\n\n detailed_books_list[key].append(book)\n\n return detailed_books_list\n\n\ndef subtract_years(dt, years):\n try:\n dt = dt.replace(year=dt.year-years)\n except ValueError:\n dt = dt.replace(year=dt.year-years, day=dt.day-1)\n return dt\n\n\n","sub_path":"project/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"69394099","text":"import discord\r\n\r\nfrom discord.ext import commands\r\nfrom .utils.transformdict import IDAbleDict\r\nfrom .utils.database import Database, DB_FILE_PATH\r\n\r\nAFK_FILE_NAME = \"afk.json\"\r\n\r\nclass AFK:\r\n def __init__(self, bot):\r\n self.bot = bot\r\n self.db = Database.from_json(DB_FILE_PATH + AFK_FILE_NAME)\r\n\r\n def _set_afk(self, server : discord.Server,\r\n member : discord.Member, msg : str):\r\n if self.db.get_storage(server) is None:\r\n self.db[server] = IDAbleDict()\r\n self.db[server][member] = msg\r\n\r\n def _del_afk(self, server : discord.Server, member : discord.Member):\r\n try:\r\n self.db[server].pop(member)\r\n except KeyError:\r\n return False\r\n else:\r\n return True\r\n \r\n @commands.command(pass_context=True)\r\n async def afk(self, ctx, *, msg : str=None):\r\n server = ctx.message.server\r\n member = ctx.message.author\r\n if msg is None:\r\n if self._del_afk(server, member):\r\n await self.bot.say(\"You are no longer AFK\")\r\n else:\r\n self._set_afk(server, member, msg)\r\n await self.bot.say(\"You are AFK\")\r\n\r\n \r\n async def on_message(self, message):\r\n if message.author == self.bot.user:\r\n return\r\n mentions = set(message.mentions)\r\n server = message.server\r\n if not mentions:\r\n return\r\n fmt = \"{} is afk, proabably.\\nI think this is their message:\\n{}\"\r\n for user in mentions:\r\n try:\r\n user_afk_message = self.db[server][user]\r\n except KeyError:\r\n continue\r\n else:\r\n afk_message = fmt.format(user.mention, user_afk_message)\r\n await self.bot.send_message(message.channel, afk_message)\r\n \r\ndef setup(bot):\r\n afk = AFK(bot)\r\n bot.add_listener(afk.on_message, \"on_message\")\r\n bot.add_cog(afk)\r\n","sub_path":"cogs/afk.py","file_name":"afk.py","file_ext":"py","file_size_in_byte":1975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"424052891","text":"import sys\nM = 998244353\n\nn = int(input())\nd = list(map(int, input().split()))\nidx = [0]*n\nans = 1\nfor i in range(n):\n idx[d[i]] += 1\n\nif not d[0] == 0 or not idx[0] == 1:\n print(0)\nelse:\n for i in range(1,n):\n if idx[i-1] == 0:\n if not idx[i] == 0:\n print(0)\n sys.exit()\n else:\n ans *= idx[i-1]**idx[i]%M\n else:\n print(ans%M)\n","sub_path":"Others/nikkei/B.py","file_name":"B.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"451387407","text":"\"\"\"CFNgin exceptions.\"\"\"\n\n\nclass CancelExecution(Exception):\n \"\"\"Raised when we want to cancel executing the plan.\"\"\"\n\n\nclass ChangesetDidNotStabilize(Exception):\n \"\"\"Raised when the applying a changeset fails.\"\"\"\n\n def __init__(self, change_set_id):\n \"\"\"Instantiate class.\n\n Args:\n change_set_id (str): The changeset that failed.\n\n \"\"\"\n self.id = change_set_id\n message = \"Changeset '%s' did not reach a completed state.\" % (\n change_set_id\n )\n\n super(ChangesetDidNotStabilize, self).__init__(message)\n\n\nclass FailedLookup(Exception):\n \"\"\"Intermediary Exception to be converted to FailedVariableLookup.\n\n Should be caught by error handling and\n :class:`runway.cfngin.exceptions.FailedVariableLookup` raised instead to\n construct a propper error message.\n\n \"\"\"\n\n def __init__(self, lookup, error, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n lookup (:class:`runway.cfngin.variables.VariableValueLookup`):\n Attempted lookup and resulted in an exception being raised.\n error (Exception): The exception that was raised.\n\n \"\"\"\n self.lookup = lookup\n self.error = error\n super(FailedLookup, self).__init__(\"Failed lookup\", *args, **kwargs)\n\n\nclass FailedVariableLookup(Exception):\n \"\"\"Lookup could not be resolved.\n\n Raised when an exception is raised when trying to resolve a lookup.\n\n \"\"\"\n\n def __init__(self, variable_name, lookup, error, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n variable_name (str): Name of the variable that failed to be\n resolved.\n lookup (:class:`runway.cfngin.variables.VariableValueLookup`):\n Attempted lookup and resulted in an exception being raised.\n error (Exception): The exception that was raised.\n\n \"\"\"\n self.lookup = lookup\n self.error = error\n message = \"Couldn't resolve lookup in variable `%s`, \" % variable_name\n message += \"lookup: ${%s}: \" % repr(lookup)\n message += \"(%s) %s\" % (error.__class__, error)\n super(FailedVariableLookup, self).__init__(message, *args, **kwargs)\n\n\nclass GraphError(Exception):\n \"\"\"Raised when the graph is invalid (e.g. acyclic dependencies).\"\"\"\n\n def __init__(self, exception, stack, dependency):\n \"\"\"Instantiate class.\n\n Args:\n exception (Exception): The exception that was raised by the invalid\n graph.\n stack (str): Name of the stack causing the error.\n dependency (str): Name of the dependency causing the error.\n\n \"\"\"\n self.stack = stack\n self.dependency = dependency\n self.exception = exception\n message = (\n \"Error detected when adding '%s' \"\n \"as a dependency of '%s': %s\"\n ) % (dependency, stack, str(exception))\n super(GraphError, self).__init__(message)\n\n\nclass ImproperlyConfigured(Exception):\n \"\"\"Raised when a componenet is improperly configured.\"\"\"\n\n def __init__(self, cls, error, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n cls (Any): The class that was improperly configured.\n error (Exception): The exception that was raised when trying to\n use cls.\n \"\"\"\n message = \"Class \\\"%s\\\" is improperly configured: %s\" % (\n cls,\n error,\n )\n super(ImproperlyConfigured, self).__init__(message, *args, **kwargs)\n\n\nclass InvalidConfig(Exception):\n \"\"\"Provided config file is invalid.\"\"\"\n\n def __init__(self, errors):\n \"\"\"Instantiate class.\n\n Args:\n errors (Union[str, List[Union[Exception, str]]]): Errors or error\n messages that are raised to identify that a config is invalid.\n\n \"\"\"\n super(InvalidConfig, self).__init__(errors)\n self.errors = errors\n\n\nclass InvalidDockerizePipConfiguration(Exception):\n \"\"\"Raised when the provided configuration for dockerized pip is invalid.\"\"\"\n\n def __init__(self, msg):\n \"\"\"Instantiate class.\n\n Args:\n msg (str): The reason for the error being raised.\n\n \"\"\"\n self.message = msg\n super(InvalidDockerizePipConfiguration, self).__init__(self.message)\n\n\nclass InvalidLookupCombination(Exception):\n \"\"\"Improper use of lookups to result in a non-string return value.\"\"\"\n\n def __init__(self, lookup, lookups, value, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n lookup (:class:`runway.cfngin.variables.VariableValueLookup`): The\n variable lookup that was attempted but did not return a string.\n lookups (:class:`runway.cfngin.variables.VariableValueConcatenation`):\n The full variable concatenation the failing lookup is part of.\n value (Any): The non-string value returned by lookup.\n\n \"\"\"\n message = (\n \"Lookup: \\\"{}\\\" has non-string return value, must be only lookup \"\n \"present (not {}) in \\\"{}\\\"\"\n ).format(str(lookup), len(lookups), value)\n super(InvalidLookupCombination, self).__init__(message,\n *args,\n **kwargs)\n\n\nclass InvalidLookupConcatenation(Exception):\n \"\"\"Intermediary Exception to be converted to InvalidLookupCombination.\n\n Should be caught by error handling and\n :class:`runway.cfngin.exceptions.InvalidLookupCombination` raised instead\n to construct a propper error message.\n\n \"\"\"\n\n def __init__(self, lookup, lookups, *args, **kwargs):\n \"\"\"Instantiate class.\"\"\"\n self.lookup = lookup\n self.lookups = lookups\n super(InvalidLookupConcatenation, self).__init__(\"\", *args, **kwargs)\n\n\nclass InvalidUserdataPlaceholder(Exception):\n \"\"\"Raised when a placeholder name in raw_user_data is not valid.\n\n E.g ``${100}`` would raise this.\n\n \"\"\"\n\n def __init__(self, blueprint_name, exception_message, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n blueprint_name (str): Name of the blueprint with invalid userdata\n placeholder.\n exception_message (str): Message from the exception that was raised\n while parsing the userdata.\n\n \"\"\"\n message = exception_message + \". \"\n message += \"Could not parse userdata in blueprint \\\"%s\\\". \" % (\n blueprint_name)\n message += \"Make sure to escape all $ symbols with a $$.\"\n super(InvalidUserdataPlaceholder, self).__init__(\n message, *args, **kwargs)\n\n\nclass MissingEnvironment(Exception):\n \"\"\"Raised when an environment lookup is used but the key doesn't exist.\"\"\"\n\n def __init__(self, key, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n key (str): The key that was used but doesn't exist in the\n environment.\n\n \"\"\"\n self.key = key\n message = \"Environment missing key %s.\" % (key,)\n super(MissingEnvironment, self).__init__(message, *args, **kwargs)\n\n\nclass MissingParameterException(Exception):\n \"\"\"Raised if a required parameter with no default is missing.\"\"\"\n\n def __init__(self, parameters, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n parameters (List[str]): A list of the parameters that are missing.\n\n \"\"\"\n self.parameters = parameters\n message = \"Missing required cloudformation parameters: %s\" % (\n \", \".join(parameters),\n )\n super(MissingParameterException, self).__init__(message, *args,\n **kwargs)\n\n\nclass MissingVariable(Exception):\n \"\"\"Raised when a variable with no default is not provided a value.\"\"\"\n\n def __init__(self, blueprint_name, variable_name, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n blueprint_name (str): Name of the blueprint.\n variable_name (str): Name of the variable missing a value.\n\n \"\"\"\n message = \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" is missing\" % (\n variable_name, blueprint_name)\n super(MissingVariable, self).__init__(message, *args, **kwargs)\n\n\nclass OutputDoesNotExist(Exception):\n \"\"\"Raised when a specific stack output does not exist.\"\"\"\n\n def __init__(self, stack_name, output, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n stack_name (str): Name of the stack.\n output (str): The output that does not exist.\n\n \"\"\"\n self.stack_name = stack_name\n self.output = output\n\n message = \"Output %s does not exist on stack %s\" % (output,\n stack_name)\n super(OutputDoesNotExist, self).__init__(message, *args, **kwargs)\n\n\nclass PipError(Exception):\n \"\"\"Raised when pip returns a non-zero exit code.\"\"\"\n\n def __init__(self):\n \"\"\"Instantiate class.\"\"\"\n self.message = ('A non-zero exit code was returned when invoking '\n 'pip. More information can be found in the error '\n 'above.')\n super(PipError, self).__init__(self.message)\n\n\nclass PipenvError(Exception):\n \"\"\"Raised when pipenv returns a non-zero exit code.\"\"\"\n\n def __init__(self):\n \"\"\"Instantiate class.\"\"\"\n self.message = ('A non-zero exit code was returned when invoking '\n 'pipenv. Please ensure pipenv in installed and the '\n 'Pipfile being used is valid. More information can be '\n 'found in the error above.')\n super(PipenvError, self).__init__(self.message)\n\n\nclass PersistentGraphCannotLock(Exception):\n \"\"\"Raised when the persistent graph in S3 cannot be locked.\"\"\"\n\n def __init__(self, reason):\n \"\"\"Instantiate class.\"\"\"\n message = \"Could not lock persistent graph; %s\" % reason\n super(PersistentGraphCannotLock, self).__init__(message)\n\n\nclass PersistentGraphCannotUnlock(Exception):\n \"\"\"Raised when the persistent graph in S3 cannot be unlocked.\"\"\"\n\n def __init__(self, reason):\n \"\"\"Instantiate class.\"\"\"\n message = \"Could not unlock persistent graph; %s\" % reason\n super(PersistentGraphCannotUnlock, self).__init__(message)\n\n\nclass PersistentGraphLocked(Exception):\n \"\"\"Raised when the persistent graph in S3 is lock.\n\n The action being executed requires it to be unlocked before attempted.\n\n \"\"\"\n\n def __init__(self, message=None, reason=None):\n \"\"\"Instantiate class.\"\"\"\n if not message:\n message = (\"Persistant graph is locked. {}\".format(\n reason or (\"This action requires the graph to be \"\n \"unlocked to be executed.\")\n ))\n super(PersistentGraphLocked, self).__init__(message)\n\n\nclass PersistentGraphLockCodeMissmatch(Exception):\n \"\"\"Raised when the provided persistent graph lock code does not match.\n\n The code used to unlock the persistent graph must match the s3 object lock\n code.\n\n \"\"\"\n\n def __init__(self, provided_code, s3_code):\n \"\"\"Instantiate class.\"\"\"\n message = (\"The provided lock code '%s' does not match the S3 \"\n \"object lock code '%s'\" % (provided_code, s3_code))\n super(PersistentGraphLockCodeMissmatch, self).__init__(message)\n\n\nclass PersistentGraphUnlocked(Exception):\n \"\"\"Raised when the persistent graph in S3 is unlock.\n\n The action being executed requires it to be locked before attempted.\n\n \"\"\"\n\n def __init__(self, message=None, reason=None):\n \"\"\"Instantiate class.\"\"\"\n if not message:\n message = (\"Persistant graph is unlocked. {}\".format(\n reason or (\"This action requires the graph to be \"\n \"locked to be executed.\")\n ))\n super(PersistentGraphUnlocked, self).__init__(message)\n\n\nclass PlanFailed(Exception):\n \"\"\"Raised if any step of a plan fails.\"\"\"\n\n def __init__(self, failed_steps, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n failed_steps (List[:class:`runway.cfngin.plan.Step`]): The steps\n that failed.\n\n \"\"\"\n self.failed_steps = failed_steps\n\n step_names = ', '.join(step.name for step in failed_steps)\n message = \"The following steps failed: %s\" % (step_names,)\n\n super(PlanFailed, self).__init__(message, *args, **kwargs)\n\n\nclass StackDidNotChange(Exception):\n \"\"\"Raised when there are no changes to be made by the provider.\"\"\"\n\n\nclass StackDoesNotExist(Exception):\n \"\"\"Raised when a stack does not exist in AWS.\"\"\"\n\n def __init__(self, stack_name, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n stack_name (str): Name of the stack that does not exist.\n\n \"\"\"\n message = (\"Stack: \\\"%s\\\" does not exist in outputs or the lookup is \"\n \"not available in this CFNgin run\") % (stack_name,)\n super(StackDoesNotExist, self).__init__(message, *args, **kwargs)\n\n\nclass StackUpdateBadStatus(Exception):\n \"\"\"Raised if the state of a stack can't be handled.\"\"\"\n\n def __init__(self, stack_name, stack_status, reason, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n stack_name (str): Name of the stack.\n stack_status (str): The stack's status.\n reason (str): The reason for the current status.\n\n \"\"\"\n self.stack_name = stack_name\n self.stack_status = stack_status\n\n message = (\"Stack: \\\"%s\\\" cannot be updated nor re-created from state \"\n \"%s: %s\" % (stack_name, stack_status, reason))\n super(StackUpdateBadStatus, self).__init__(message, *args, **kwargs)\n\n\nclass StackFailed(Exception):\n \"\"\"Raised when a stack action fails.\n\n Primarily used with hooks that act on stacks.\n\n \"\"\"\n\n def __init__(self, stack_name, status_reason=None):\n \"\"\"Instantiate class.\n\n Args:\n stack_name (str): Name of the stack.\n status_reason (str): The reason for the current status.\n\n \"\"\"\n self.stack_name = stack_name\n self.status_reason = status_reason\n\n message = 'Stack \"{}\" failed'.format(stack_name)\n if status_reason:\n message += ' with reason \"{}\"'.format(status_reason)\n super(StackFailed, self).__init__(message)\n\n\nclass UnableToExecuteChangeSet(Exception):\n \"\"\"Raised if changeset execution status is not ``AVAILABLE``.\"\"\"\n\n def __init__(self, stack_name, change_set_id, execution_status):\n \"\"\"Instantiate class.\n\n Args:\n stack_name (str): Name of the stack.\n change_set_id (str): The changeset that failed.\n execution_status (str): The value of the changeset's\n ``ExecutionStatus`` attribute.\n\n \"\"\"\n self.stack_name = stack_name\n self.id = change_set_id\n self.execution_status = execution_status\n\n message = (\"Changeset '%s' on stack '%s' had bad execution status: \"\n \"%s\" % (change_set_id, stack_name, execution_status))\n\n super(UnableToExecuteChangeSet, self).__init__(message)\n\n\nclass UnhandledChangeSetStatus(Exception):\n \"\"\"Raised when creating a changeset failed for an unhandled reason.\n\n Handled failure reasons include: no changes\n\n \"\"\"\n\n def __init__(self, stack_name, change_set_id, status, status_reason):\n \"\"\"Instantiate class.\n\n Args:\n stack_name (str): Name of the stack.\n change_set_id (str): The changeset that failed.\n status (str): The state that could not be handled.\n status_reason (str): Cause of the current state.\n\n \"\"\"\n self.stack_name = stack_name\n self.id = change_set_id\n self.status = status\n self.status_reason = status_reason\n message = (\n \"Changeset '%s' on stack '%s' returned an unhandled status \"\n \"'%s: %s'.\" % (change_set_id, stack_name, status,\n status_reason)\n )\n\n super(UnhandledChangeSetStatus, self).__init__(message)\n\n\nclass UnknownLookupType(Exception):\n \"\"\"Lookup type provided does not match a registered lookup.\n\n Example:\n If a lookup of ``${ query}`` is used and ````\n is not a registered lookup, this exception will be raised.\n\n \"\"\"\n\n def __init__(self, lookup_type, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n lookup_type (str): Lookup type that was used but not registered.\n\n \"\"\"\n message = \"Unknown lookup type: \\\"{}\\\"\".format(lookup_type)\n super(UnknownLookupType, self).__init__(message, *args, **kwargs)\n\n\nclass UnresolvedVariable(Exception):\n \"\"\"Raised when trying to use a variable before it has been resolved.\"\"\"\n\n def __init__(self, blueprint_name, variable, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n blueprint_name (str): Name of the blueprint that tried to use\n the unresolved variables.\n variable (:class:`runway.cfngin.variables.Variable`): The\n unresolved variable.\n\n \"\"\"\n message = (\n \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" hasn't been resolved\" % (\n variable.name, blueprint_name\n )\n )\n super(UnresolvedVariable, self).__init__(message, *args, **kwargs)\n\n\nclass UnresolvedVariableValue(Exception):\n \"\"\"Intermediary Exception to be converted to UnresolvedVariable.\n\n Should be caught by error handling and\n :class:`runway.cfngin.exceptions.UnresolvedVariable` raised instead to\n construct a propper error message.\n\n \"\"\"\n\n def __init__(self, lookup, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n lookup (:class:`runway.cfngin.variables.VariableValueLookup`): The\n lookup that is not resolved.\n\n \"\"\"\n self.lookup = lookup\n super(UnresolvedVariableValue, self).__init__(\n \"Unresolved lookup\", *args, **kwargs)\n\n\nclass UnresolvedVariables(Exception):\n \"\"\"Raised when trying to use variables before they has been resolved.\"\"\"\n\n def __init__(self, blueprint_name, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n blueprint_name (str): Name of the blueprint that tried to use\n the unresolved variables.\n\n \"\"\"\n message = \"Blueprint: \\\"%s\\\" hasn't resolved it's variables\" % (\n blueprint_name)\n super(UnresolvedVariables, self).__init__(message, *args, **kwargs)\n\n\nclass ValidatorError(Exception):\n \"\"\"Used for errors raised by custom validators of blueprint variables.\"\"\"\n\n def __init__(self, variable, validator, value, exception=None):\n \"\"\"Instantiate class.\n\n Args:\n variable (str): The variable that failed validation.\n validator (str): The validator that was not passed.\n value (str): The value of the variable that did not pass the\n validator.\n exception (Exception): The exception raised by the validator.\n\n \"\"\"\n self.variable = variable\n self.validator = validator\n self.value = value\n self.exception = exception\n self.message = (\"Validator '%s' failed for variable '%s' with value \"\n \"'%s'\") % (self.validator, self.variable, self.value)\n\n if self.exception:\n self.message += \": %s: %s\" % (self.exception.__class__.__name__,\n str(self.exception))\n super(ValidatorError, self).__init__()\n\n def __str__(self):\n \"\"\"Return the exception's message when converting to a string.\"\"\"\n return self.message\n\n\nclass VariableTypeRequired(Exception):\n \"\"\"Raised when a variable defined in a blueprint is missing a type.\"\"\"\n\n def __init__(self, blueprint_name, variable_name, *args, **kwargs):\n \"\"\"Instantiate class.\n\n Args:\n blueprint_name (str): Name of the blueprint.\n variable_name (str): Name of the variable missing a type.\n\n \"\"\"\n message = (\n \"Variable \\\"%s\\\" in blueprint \\\"%s\\\" does not have a type\" % (\n variable_name, blueprint_name)\n )\n super(VariableTypeRequired, self).__init__(message, *args, **kwargs)\n","sub_path":"runway/cfngin/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":20587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196284259","text":"import os\nimport sys\nimport argparse\nfrom collections import deque, namedtuple\n\nimport numpy as np\nimport torch\nfrom unityagents import UnityEnvironment\nfrom agent import DDPGAgent\n\n\ndef save_checkpoint(agent, total_episode, logdir):\n torch.save(agent.actor_local.state_dict(), os.path.join(logdir, \"actor_local.pth\"))\n torch.save(agent.actor_target.state_dict(), os.path.join(logdir, \"actor_target.pth\"))\n torch.save(agent.critic_local.state_dict(), os.path.join(logdir, \"critic_local.pth\"))\n torch.save(agent.critic_target.state_dict(), os.path.join(logdir, \"critic_target.pth\"))\n\n torch.save(total_episode, os.path.join(logdir, \"episodes.pth\"))\n\n\ndef load_checkpoint(logdir):\n Checkpoint = namedtuple(\"Checkpoint\",\n ['actor_local',\n 'actor_target',\n 'critic_local',\n 'critic_target',\n 'episodes'])\n try:\n actor_local = torch.load(os.path.join(logdir, \"actor_local.pth\"))\n actor_target = torch.load(os.path.join(logdir, \"actor_target.pth\"))\n critic_local = torch.load(os.path.join(logdir, \"critic_local.pth\"))\n critic_target = torch.load(os.path.join(logdir, \"critic_target.pth\"))\n episodes = torch.load(os.path.join(logdir, \"episodes.pth\"))\n return Checkpoint(actor_local, actor_target, critic_local, critic_target, episodes)\n except FileNotFoundError:\n if not os.path.exists(logdir):\n os.mkdir(logdir)\n\n\ndef train(agent, n_episodes, max_t, logdir):\n checkpoint = load_checkpoint(logdir)\n total_episode = 0\n if checkpoint:\n agent.load(checkpoint)\n total_episode += checkpoint.episodes\n\n scores_deque = deque(maxlen=100)\n scores = []\n max_score = -np.Inf\n best_avg = 0\n for i_episode in range(1, n_episodes+1):\n total_episode += 1\n\n env_info = env.reset(train_mode=True)[brain_name]\n state = env_info.vector_observations\n agent.reset()\n score = 0\n\n for t in range(max_t):\n action = agent.act(state)\n\n env_info = env.step(action)[brain_name]\n next_state = env_info.vector_observations\n reward = env_info.rewards\n done = env_info.local_done\n\n agent.step(state, action, reward, next_state, done)\n state = next_state\n score += np.mean(reward)\n if all(done):\n break\n\n max_score = max(score, max_score)\n\n scores_deque.append(score)\n scores.append(score)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tScore: {:.2f}\\tMax Score: {:.2f}'.format(\n total_episode, np.mean(scores_deque), score, max_score), end=\"\")\n sys.stdout.flush()\n\n if i_episode % 100 == 0:\n save_checkpoint(agent, total_episode, logdir)\n\n np.save(os.path.join(logdir, 'scores.npy'), scores)\n\n avg_score = np.mean(scores_deque)\n print('\\rEpisode {}\\tAverage Score: {:.2f}\\tMax Score: {:.2f}'.format(\n total_episode, avg_score, max_score), end=\"\")\n\n if avg_score > best_avg:\n best_avg = avg_score\n best_logdir = os.path.join(logdir, 'best')\n if not os.path.exists(best_logdir):\n os.mkdir(best_logdir)\n save_checkpoint(agent, total_episode, best_logdir)\n\n print(' Best model saved!', end=\"\")\n\n print(\" \")\n\n return scores\n\n\nif __name__ == \"__main__\":\n # Parse\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--episodes\", type=int, default=1000)\n parser.add_argument(\"--max-t\", type=int, default=1000)\n parser.add_argument(\"--logdir\", type=str, default=\"checkpoint\")\n ARGS = parser.parse_args()\n\n # Create environment.\n env = UnityEnvironment(file_name='environment/Tennis_Linux/Tennis.x86')\n\n # get the default brain\n brain_name = env.brain_names[0]\n brain = env.brains[brain_name]\n\n # reset the environment\n env_info = env.reset(train_mode=True)[brain_name]\n\n # number of agents\n num_agents = len(env_info.agents)\n\n # size of each action\n action_size = brain.vector_action_space_size\n\n # examine the state space\n states = env_info.vector_observations\n state_size = states.shape[1]\n\n # Create agent\n agent = DDPGAgent(state_size=state_size, action_size=action_size, random_seed=10)\n\n scores = train(agent, ARGS.episodes, ARGS.max_t, ARGS.logdir)\n np.save(os.path.join(ARGS.logdir, 'scores.npy'), scores)\n","sub_path":"project3-collab_compete/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"517532705","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nimport xlrd\n\ndef main():\n rb = xlrd.open_workbook('c:/tmp/Новый.xls',formatting_info=True)\n sheet = rb.sheet_by_index(0)\n for rownum in range(sheet.nrows):\n row = sheet.row_values(rownum)\n print(int(row[1]))\n #for c_el in row:\n # print (c_el)\n\nif __name__ == '__main__':\n main()","sub_path":"get_xls.py","file_name":"get_xls.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"613165920","text":"import microbit\nfrom uln2003 import *\n# (c) IDWizard 2017\n# MIT License.\n\nmicrobit.display.off()\n\n\nclass Stepper(StepperBase):\n def _set_pin(self, pin, value):\n self.pins[pin].write_digital(value)\n\n def _wait(self):\n microbit.sleep(self.delay)\n\n\nif __name__ == '__main__':\n FULL_ROTATION = 4096\n s1 = Stepper(HALF_STEP, microbit.pin16, microbit.pin15,\n microbit.pin14, microbit.pin13, delay=5)\n s2 = Stepper(HALF_STEP, microbit.pin6, microbit.pin5,\n microbit.pin4, microbit.pin3, delay=5)\n # s1.step(FULL_ROTATION)\n # s2.step(FULL_ROTATION)\n\n runner = Driver()\n runner.run([Command(s1, FULL_ROTATION, 1),\n Command(s2, FULL_ROTATION/2, -1)])\n","sub_path":"uln2003_microbit.py","file_name":"uln2003_microbit.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"128627078","text":"import ConfigParser\nimport os\n\n# Default values\ndefaults = {\n \"mysql\": {\n \"address\": \"0.0.0.0\",\n \"username\": \"user\",\n \"password\": \"pass\",\n \"database\": \"db\"\n },\n \"output\": {\n \"location\": \"~/\"\n },\n \"general\": {\n \"interval\": \"10s\"\n }\n}\n\nparser = ConfigParser.ConfigParser() # Get a parser instance\nsettings = None\n\n\ndef read_config():\n global settings\n\n for (section, local_settings) in defaults.iteritems(): # Iterate the sections\n parser.add_section(section) # Add the section to the parser\n for setting, value in local_settings.iteritems(): # Iterate the values\n parser.set(section, setting, value) # Add the section, setting, and value to the defaults\n\n if os.path.isfile(\"config.ini\"):\n parser.read(\"config.ini\")\n else:\n parser.write(open(\"config.ini\", \"wb\"))\n\n settings = parser._sections\n\n\ndef get(section, key):\n return settings[section][key]\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"65289691","text":"from mock import MagicMock, PropertyMock, call\nfrom pytest import fixture\n\nfrom paws.dns.dns_updater import DnsUpdater\nfrom paws.dns.dns_updater import Dns\n\nclass TestDnsUpdater(object):\n\n @fixture\n def logger(self):\n return MagicMock()\n\n @fixture\n def dns_updater(self, dns, aws, logger):\n return DnsUpdater(\"dev\", dns, aws, logger)\n\n @fixture\n def dns(self):\n return MagicMock(spec_set = Dns)\n\n @fixture\n def aws(self):\n return MagicMock()\n\n @fixture\n def instances(self):\n instances = [ MagicMock(), MagicMock() ]\n instances[0].name = \"validdevsrv123\"\n instances[1].name = \"validdevsrv456\"\n instances[0].private_ip_address = \"10.0.1.2\"\n instances[1].private_ip_address = \"10.0.1.3\"\n for i in [0, 1]:\n instances[i].tags = {}\n instances[i].requested_dns_records = { \"A\": [], \"MX\": [] }\n interface = MagicMock()\n instances[i].interfaces = [interface]\n interface.attachment.device_index = 0\n interface.private_ip_address = instances[i].private_ip_address\n return instances\n\n @fixture\n def elbs(self):\n elbs = [ MagicMock(), MagicMock() ]\n elbs[0].name = \"aaaaaaaaa\"\n elbs[1].name = \"aaaaaaaaa\"\n return elbs\n\n @fixture(autouse = True)\n def mock_aws(self, aws, instances, elbs):\n aws.get_all_instances.return_value = instances\n aws.elb_connection.get_all_load_balancers.return_value = elbs\n\n def test_updates_instances(self, dns_updater, dns, instances):\n dns_updater.update_instances()\n\n dns.delete_a.assert_has_calls([ call(i.name) for i in instances ])\n dns.add_a.assert_has_calls([ call(i.name, i.private_ip_address) for i in instances ])\n dns.delete_ptr.assert_has_calls([ call(i.private_ip_address) for i in instances ])\n dns.add_ptr.assert_has_calls([ call(i.private_ip_address, i.name) for i in instances ])\n\n def test_creates_specified_a_records(self, dns_updater, dns, instances):\n i = instances[1]\n a_record = MagicMock()\n a_record.device_index = 0\n i.requested_dns_records[\"A\"] = [ a_record ]\n\n dns_updater.update_instances()\n\n dns.delete_a.assert_has_calls([ call(a_record.name) ])\n dns.add_a.assert_has_calls([ call(a_record.name, i.interfaces[0].private_ip_address) ])\n\n def test_doesnt_update_instances_with_invalid_names(self, dns_updater, dns, instances):\n instances[1].name = \"invalidserver\"\n\n dns_updater.update_instances()\n\n assert dns.add_a.call_count == 1\n\n def test_doesnt_update_instances_with_invalid_ips(self, dns_updater, dns, instances):\n instances[1].private_ip_address = \"INVALID\"\n\n dns_updater.update_instances()\n\n assert dns.add_a.call_count == 1\n\n def test_copes_with_unnamed_instances(self, dns_updater, dns, instances, logger):\n instances[0].name = PropertyMock(side_effect = KeyError(\"Name\"))\n\n dns_updater.update_instances()\n\n assert dns.add_a.call_count == 1\n\n def test_copes_with_bad_dns_tags(self, dns_updater, dns, instances, logger):\n instances[0].requested_dns_records = PropertyMock(side_effect = Exception(\"Can't parse tag\"))\n\n dns_updater.update_instances()\n\n assert dns.add_a.call_count == 1\n\n def test_updates_rds(self, dns_updater, aws, dns):\n rds = [ MagicMock(), MagicMock() ]\n aws.rds_connection.get_all_dbinstances.return_value = rds\n\n dns_updater.update_rds()\n\n dns.delete_cname.assert_has_calls([ call(r.id) for r in rds])\n dns.add_cname.assert_has_calls([ call(r.id, r._address) for r in rds])\n\n def test_updates_elb(self, dns_updater, dns, elbs):\n dns_updater.update_elb()\n\n dns.delete_cname.assert_has_calls([ call(e.name) for e in elbs])\n dns.add_cname.assert_has_calls([ call(e.name, e.dns_name) for e in elbs])\n\n def test_doesnt_update_invalid_elbs(self, dns_updater, dns, elbs):\n elbs[0].name = \"~invalid name\"\n\n dns_updater.update_elb()\n\n assert dns.delete_cname.call_args_list == [ call(elbs[1].name) ]\n assert dns.add_cname.call_args_list == [ call(elbs[1].name, elbs[1].dns_name) ]\n\n def test_updates_instance_mx_records(self, dns_updater, dns, instances):\n i = instances[1]\n mx_record = MagicMock()\n i.requested_dns_records[\"MX\"] = [ mx_record ]\n\n dns_updater.update_mx()\n\n dns.delete_mx.assert_has_calls([ call(mx_record.domain) ])\n dns.add_mx.assert_has_calls([ call(mx_record.domain, mx_record.priority, instances[1].name) ])\n\n def test_doesnt_update_mx_records_for_invalid_hostnames(self, dns_updater, dns, instances):\n instances[1].name = \"invalidserver\"\n instances[1].tags[\"DNS\"] = '[\"MX 20 something.com\"]'\n\n dns_updater.update_mx()\n\n assert not dns.add_mx.called\n\n def test_doesnt_update_mx_records_for_instances_in_other_environments(self, dns_updater, dns, instances):\n instances[1].name = \"validprdsrv123\"\n instances[1].tags[\"DNS\"] = '[\"MX 20 something.com\"]'\n\n dns_updater.update_mx()\n\n assert not dns.add_mx.called\n","sub_path":"tests/dns/dns_updater_test.py","file_name":"dns_updater_test.py","file_ext":"py","file_size_in_byte":5206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"420046734","text":"import random\r\nroll3 = random.choices(range(1, 7), k=3)\r\nroll4 = random.choices(range(1, 7), k=4)\r\nroll5 = random.choices(range(1, 7), k=5)\r\nscc = [6, 5, 4]\r\nsc = [6, 5]\r\ncc = [5, 4]\r\ncrew = [4]\r\ncaptain = [5]\r\nship = [6]\r\nn = 0\r\nwhile n <= 2:\r\n roll5 = random.choices(range(1, 7), k=5)\r\n print(roll5)\r\n if set(scc).issubset(roll5):\r\n result = [i for i in roll5 if not i in scc or scc.remove(i)]\r\n total = sum(result)\r\n print(\"Total: \" + str(total))\r\n break\r\n if set(sc).issubset(roll5):\r\n m = 0\r\n while m <= 1:\r\n roll3 = random.choices(range(1, 7), k=3)\r\n print(roll3)\r\n if set(crew).issubset(roll3):\r\n result4 = [i for i in roll3 if not i in crew or crew.remove(i)]\r\n total4 = sum(result4)\r\n print(\"Total: \" +str(total4))\r\n break\r\n else:\r\n m = m + 1\r\n break\r\n if 6 in roll5 and 5 not in roll5:\r\n q = 0\r\n while q <= 1:\r\n roll4 = random.choices(range(1, 7), k=4)\r\n print(roll4)\r\n if set(cc).issubset(roll4):\r\n result2 = [i for i in roll4 if not i in cc or cc.remove(i)]\r\n total2 = sum(result2)\r\n print(\"Total: \" + str(total2))\r\n break\r\n if set(captain).issubset(roll4) and n < 2:\r\n roll3 = random.choices(range(1, 7), k=3)\r\n print(roll3)\r\n if set(crew).issubset(roll3):\r\n result3 = [i for i in roll3 if not i in crew or crew.remove(i)]\r\n total3 = sum(result3)\r\n print(\"Total: \" + str(total3))\r\n break\r\n else:\r\n break\r\n else:\r\n q = q + 1\r\n if q > 1 and n < 2:\r\n break\r\n break\r\n if n >= 2:\r\n break\r\n else:\r\n n = n + 1\r\n# Problem: when only 6 is rolled in 2nd round, it continues rolling for 2 more rounds,\\n\r\n# when it should ony be 1 more round.\r\n","sub_path":"onumajuru_catherine_hw3_ex1.py","file_name":"onumajuru_catherine_hw3_ex1.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384845971","text":"def lcm(a,b):\n\n if(a>b):\n min1=a\n else:\n min1=b\n while(True):\n if(min1%a==0 and min1%b==0):\n print(\"lcm is:\",min1)\n break\n min1=min1+1\n \nlcm(4,3)\nlcm(4,4)\n","sub_path":"TSRS, TNRN, TSRN, TNRS/Lcm of2 number take something retun nothinh.py","file_name":"Lcm of2 number take something retun nothinh.py","file_ext":"py","file_size_in_byte":185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"9315949","text":"import PySimpleGUI as sg\nimport requests\nimport json\n\ndef get_weather(city):\n # 在线环境不支持中文输入,所以我改用城市编号的接口,如果你在电脑上开发,就不受影响\n r = requests.get(\"http://wthrcdn.etouch.cn/weather_mini?citykey=\" + city)\n result = json.loads(r.text)\n return result[\"data\"][\"forecast\"][0][\"type\"]\n\n\n# 让所有文本居中\nsg.SetOptions(text_justification='center') \n\n# 蓝图不需要放到循环中\nlayout = [ \n [ sg.Text(\"City\", size = (20, 1)), sg.Input(key = \"-CITY-\") ],\n [ sg.Text(\"Weather\", size = (20, 1)), sg.Input(key = \"-WEATHER-\") ],\n [ sg.Button(\"Submit\")]\n ]\n\n# 视窗也只需要创建一次,不要放到循环里\nwindow = sg.Window(\"Weather App\", layout)\n\nwhile True:\n event, values = window.read()\n\n print(event, values)\n\n # 当点击 window 右上角的 X,event 是 None,此时应当退出循环\n if event is None:\n break\n\n # values 是一个字典,访问可输入组件的 key(\b定义蓝图时指定了这个参数)可以获得组件的输入\n city = values[\"-CITY-\"]\n\n weather = get_weather(city)\n\n print(weather)\n\n # 找到天气输入框\n weather_wind = window[\"-WEATHER-\"]\n\n # 将天气更新到输入框\n weather_wind.update(weather)\n\n # 程序已经不会直接退出了,下一行就不需要了\n # window.read()\n\n# 当退出循环的时候,就是程序退出之时,关掉 window\nwindow.close()\n","sub_path":"PySimpleGUIDemos/demo4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403603503","text":"\"\"\"SFU Utilities.\n\n- Web cameras: see road conditions in realtime.\n- Campus report: fetched from the Road Report API.\n\"\"\"\nfrom io import BytesIO\nimport datetime\nimport json\nimport requests\nfrom bs4 import BeautifulSoup\nimport discord\nfrom redbot.core import commands\nfrom redbot.core.bot import Red\nfrom redbot.core.commands.context import Context\n\nWEBCAM_GAGLARDI = (\n \"http://ns-webcams.its.sfu.ca/public/images/gaglardi-current.jpg\"\n \"?nocache=0.8678792633247998&update=15000&timeout=1800000&offset=4\")\nWEBCAM_TRS = (\"http://ns-webcams.its.sfu.ca/public/images/towers-current.jpg\"\n \"?nocache=0.9550930672504077&update=15000&timeout=1800000\")\nWEBCAM_TRN = (\"http://ns-webcams.its.sfu.ca/public/images/towern-current.jpg\"\n \"?nocache=1&update=15000&timeout=1800000\")\nWEBCAM_UDN = (\"http://ns-webcams.its.sfu.ca/public/images/udn-current.jpg\"\n \"?nocache=1&update=15000&timeout=1800000&offset=4\")\nWEBCAM_AQPOND = (\"http://ns-webcams.its.sfu.ca/public/images/aqn-current.jpg\"\n \"?nocache=1&update=15000&timeout=1800000\")\nWEBCAM_SUB = (\"http://ns-webcams.its.sfu.ca/public/images/aqsw-current.jpg\"\n \"?nocache=0.3346598630889852&update=15000&timeout=1800000\")\nWEBCAM_TFF = (\"http://ns-webcams.its.sfu.ca/public/images/terryfox-current.jpg\"\n \"?nocache=1&update=15000&timeout=1800000\")\nROAD_API = \"http://www.sfu.ca/security/sfuroadconditions/api/3/current\"\n\nCAMPUSES = \"campuses\"\nBUR = \"burnaby\"\nSUR = \"surrey\"\nVAN = \"vancouver\"\nROADS = \"roads\"\nSTATUS = \"status\"\nANNOUNCE = \"announcements\"\n\n\nclass SFURoads(commands.Cog): # pylint: disable=too-few-public-methods\n \"\"\"Various SFU Utilities.\"\"\"\n def __init__(self, bot: Red):\n self.bot = bot\n\n @commands.command(name=\"cam\")\n @commands.guild_only()\n async def cam(self, ctx: Context, cam: str = \"\"):\n \"\"\"SFU webcam, defaults to Gaglardi.\n\n Parameters:\n -----------\n cam: str\n One of the following short strings:\n aqpond: AQ Pond\n sub: AQ overlooking student union building\n tff: Terry Fox Field\n trn: Tower Road North\n trs: Tower Road South\n udn: University Drive North\n \"\"\"\n await ctx.trigger_typing()\n\n # We need a custom header or else we get a HTTP 403 Unauthorized\n headers = {\"User-agent\": \"Mozilla/5.0\"}\n\n try:\n if cam.lower() == \"aqpond\":\n fetchedData = requests.get(WEBCAM_AQPOND, headers=headers)\n elif cam.lower() == \"help\":\n await self.bot.send_help_for(ctx, self.cam)\n return\n elif cam.lower() == \"sub\":\n fetchedData = requests.get(WEBCAM_SUB, headers=headers)\n elif cam.lower() == \"tff\":\n fetchedData = requests.get(WEBCAM_TFF, headers=headers)\n elif cam.lower() == \"trn\":\n fetchedData = requests.get(WEBCAM_TRN, headers=headers)\n elif cam.lower() == \"trs\":\n fetchedData = requests.get(WEBCAM_TRS, headers=headers)\n elif cam.lower() == \"udn\":\n fetchedData = requests.get(WEBCAM_UDN, headers=headers)\n else:\n fetchedData = requests.get(WEBCAM_GAGLARDI, headers=headers)\n fetchedData.raise_for_status()\n except requests.exceptions.HTTPError:\n await ctx.send(\":warning: This webcam is currently unavailable!\")\n # self.logger.error(exc_info=True)\n return\n\n if not fetchedData.content:\n # Make sure we don't fetch a zero byte file\n await ctx.send(\":warning: This webcam is currently unavailable!\")\n return\n\n camPhoto = discord.File(BytesIO(fetchedData.content),\n filename=\"cam.jpg\")\n await ctx.send(file=camPhoto)\n\n @commands.command(name=\"report\")\n @commands.guild_only()\n async def report(self, ctx: Context):\n \"\"\"Show the SFU Campus Report.\"\"\"\n fetchedData = requests.get(ROAD_API)\n results = json.loads(fetchedData.content)\n\n embed = discord.Embed()\n embed.title = \"SFU Campus Report\"\n\n # We need to use BeautifulSoup to parse the HTML within the JSON.\n if results[CAMPUSES][BUR][ANNOUNCE]:\n announce = BeautifulSoup(results[CAMPUSES][BUR][ANNOUNCE],\n \"html.parser\").get_text()\n roads = results[CAMPUSES][BUR][ROADS][STATUS]\n burnAnnounce = (\"**__Roads__**:\\n{}\\n\\n**__Announcements__**:\"\n \"\\n{}\".format(roads, announce))\n else:\n burnAnnounce = \"No updates.\"\n\n if results[CAMPUSES][SUR][ANNOUNCE]:\n surreyAnnounce = BeautifulSoup(results[CAMPUSES][SUR][ANNOUNCE],\n \"html.parser\").get_text()\n else:\n surreyAnnounce = \"No updates.\"\n\n if results[CAMPUSES][VAN][ANNOUNCE]:\n vanAnnounce = BeautifulSoup(results[CAMPUSES][VAN][ANNOUNCE],\n \"html.parser\").get_text()\n else:\n vanAnnounce = \"No updates.\"\n\n embed.add_field(name=\"Burnaby\", value=burnAnnounce)\n embed.add_field(name=\"Vancouver\", value=vanAnnounce)\n embed.add_field(name=\"Surrey\", value=surreyAnnounce)\n\n lastUpdated = datetime.datetime.fromtimestamp(\n results[\"lastUpdated\"] / 1000).strftime(\"%Y-%m-%d %H:%M:%S\")\n embed.set_footer(\n text=\"This report was last updated on {}\".format(lastUpdated))\n await ctx.send(embed=embed)\n","sub_path":"sfu/roads.py","file_name":"roads.py","file_ext":"py","file_size_in_byte":5646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"251637713","text":"#!/usr/bin/env python3\n\nimport sys\nimport argparse\nimport os\nimport shutil\nimport subprocess\nimport re\nimport ctypes\nimport datetime\n\n# _ _ _ _ _\n# | | | | | (_) |\n# | | | | |_ _| |___\n# | | | | __| | / __|\n# | |_| | |_| | \\__ \\\n# \\___/ \\__|_|_|___/\n#\n\n# Stolen from meson\ndef Popen_safe(args, write=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):\n import locale\n encoding = locale.getpreferredencoding()\n if sys.version_info < (3, 6) or not sys.stdout.encoding or encoding.upper() != 'UTF-8':\n return Popen_safe_legacy(args, write=write, stdout=stdout, stderr=stderr, **kwargs)\n p = subprocess.Popen(args, universal_newlines=True, close_fds=False,\n stdout=stdout, stderr=stderr, **kwargs)\n o, e = p.communicate(write)\n return p, o, e\n\n# Also stolen from meson\ndef Popen_safe_legacy(args, write=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, **kwargs):\n p = subprocess.Popen(args, universal_newlines=False,\n stdout=stdout, stderr=stderr, **kwargs)\n if write is not None:\n write = write.encode('utf-8')\n o, e = p.communicate(write)\n if o is not None:\n if sys.stdout.encoding:\n o = o.decode(encoding=sys.stdout.encoding, errors='replace').replace('\\r\\n', '\\n')\n else:\n o = o.decode(errors='replace').replace('\\r\\n', '\\n')\n if e is not None:\n if sys.stderr.encoding:\n e = e.decode(encoding=sys.stderr.encoding, errors='replace').replace('\\r\\n', '\\n')\n else:\n e = e.decode(errors='replace').replace('\\r\\n', '\\n')\n return p, o, e\n\n\ndef cmake_version(cmexe):\n try:\n p, out = Popen_safe([cmexe, '--version'])[0:2]\n if p.returncode != 0:\n return False, 'Invalid cmake exe \"{}\"'.format(cmexe)\n except (FileNotFoundError, PermissionError):\n return False, 'Invalid cmake exe \"{}\"'.format(cmexe)\n\n return True, re.sub(r'\\s*cmake version\\s*', '', out.split('\\n')[0]).strip()\n\n# _____ ___ ___ _ _____ _\n# / __ \\| \\/ | | | / ___| | |\n# | / \\/| . . | __ _| | _____ \\ `--. ___| |_ _ _ _ __\n# | | | |\\/| |/ _` | |/ / _ \\ `--. \\/ _ \\ __| | | | '_ \\\n# | \\__/\\| | | | (_| | < __/ /\\__/ / __/ |_| |_| | |_) |\n# \\____/\\_| |_/\\__,_|_|\\_\\___| \\____/ \\___|\\__|\\__,_| .__/\n# | |\n# |_|\n\ndef new_build(path):\n if os.path.exists(path):\n shutil.rmtree(path)\n\n os.makedirs(path)\n os.chdir(path)\n\ndef setup(cmvers, compiler):\n print('Setting up CMake for improved preformance')\n with open('CMakeCache.txt', 'w') as fp:\n fp.write('CMAKE_PLATFORM_INFO_INITIALIZED:INTERNAL=1\\n')\n\n os.makedirs('CMakeFiles')\n os.chdir('CMakeFiles')\n os.makedirs(cmvers)\n os.chdir(cmvers)\n\n with open('CMakeCCompiler.cmake', 'w') as fp:\n fp.write('''# Fake CMake file to skip the boring and slow stuff\nset(CMAKE_C_COMPILER \"{}\")\nset(CMAKE_C_COMPILER_ID \"GNU\")\nset(CMAKE_COMPILER_IS_GNUCC 1)\nset(CMAKE_C_COMPILER_LOADED 1)\nset(CMAKE_C_COMPILER_WORKS TRUE)\nset(CMAKE_C_ABI_COMPILED TRUE)\nset(CMAKE_SIZEOF_VOID_P \"{}\")\n'''.format(compiler, ctypes.sizeof(ctypes.c_voidp)))\n\n with open('CMakeCXXCompiler.cmake', 'w') as fp:\n fp.write('''# Fake CMake file to skip the boring and slow stuff\nset(CMAKE_CXX_COMPILER \"{}\")\nset(CMAKE_CXX_COMPILER_ID \"GNU\")\nset(CMAKE_COMPILER_IS_GNUCXX 1)\nset(CMAKE_CXX_COMPILER_LOADED 1)\nset(CMAKE_CXX_COMPILER_WORKS TRUE)\nset(CMAKE_CXX_ABI_COMPILED TRUE)\nset(CMAKE_SIZEOF_VOID_P \"{}\")\n'''.format(compiler, ctypes.sizeof(ctypes.c_voidp)))\n\n os.chdir('..')\n os.chdir('..')\n\n# ______ _ _ _ _ ___ _ _______ _____ _____ _____ _____\n# | ___ \\ | | | \\ | | / _ \\ | \\ | | _ \\ |_ _| ___/ ___|_ _|\n# | |_/ / | | | \\| | / /_\\ \\| \\| | | | | | | | |__ \\ `--. | |\n# | /| | | | . ` | | _ || . ` | | | | | | | __| `--. \\ | |\n# | |\\ \\| |_| | |\\ | | | | || |\\ | |/ / | | | |___/\\__/ / | |\n# \\_| \\_|\\___/\\_| \\_/ \\_| |_/\\_| \\_/___/ \\_/ \\____/\\____/ \\_/\n#\n\nclass CMakeTraceLine:\n class_reg = re.compile(r'^.*/')\n def __init__(self, file, line, func, args):\n self.file = file\n self.line = line\n self.func = func.lower()\n self.args = args\n\n def __repr__(self):\n s = '{0}:{1} -- {2}({3})'\n return s.format(CMakeTraceLine.class_reg.sub('', self.file), self.line, self.func, self.args)\n\ndef _lex_trace(trace):\n # The trace format is: '(): ( )\\n'\n reg_tline = re.compile(r'\\s*(.*\\.(cmake|txt))\\(([0-9]+)\\):\\s*(\\w+)\\(([\\s\\S]*?) ?\\)\\s*\\n', re.MULTILINE)\n reg_other = re.compile(r'[^\\n]*\\n')\n reg_genexp = re.compile(r'\\$<.*>')\n loc = 0\n while loc < len(trace):\n mo_file_line = reg_tline.match(trace, loc)\n if not mo_file_line:\n skip_match = reg_other.match(trace, loc)\n if not skip_match:\n print(trace[loc:])\n raise 'Failed to parse CMake trace'\n\n loc = skip_match.end()\n continue\n\n loc = mo_file_line.end()\n\n file = mo_file_line.group(1)\n line = mo_file_line.group(3)\n func = mo_file_line.group(4)\n args = mo_file_line.group(5).split(' ')\n args = list(map(lambda x: x.strip(), args))\n args = list(map(lambda x: reg_genexp.sub('', x), args)) # Remove generator expressions\n\n yield CMakeTraceLine(file, line, func, args)\n\ndef run_cmake(cmexe, tofind):\n ts1 = datetime.datetime.now()\n p, out, err = Popen_safe([cmexe, '--trace-expand', '-DNAME={}'.format(tofind), '..'])\n rc = p.returncode\n ts2 = datetime.datetime.now()\n\n if rc != 0:\n print('CMake returned {}'.format(rc))\n print(out)\n print(err)\n\n lexer = _lex_trace(err)\n lineCount = 0\n for i in lexer:\n print(i)\n lineCount += 1\n\n ts3 = datetime.datetime.now()\n delta1 = ts2 - ts1\n delta2 = ts3 - ts2\n print('CMake took: {}ms'.format(int(delta1.total_seconds() * 1000)))\n print('Lexer took: {}ms'.format(int(delta2.total_seconds() * 1000)))\n print('Num trace lines: {}'.format(lineCount))\n\n# ___ ___ ___ _____ _ _\n# | \\/ | / _ \\|_ _| \\ | |\n# | . . |/ /_\\ \\ | | | \\| |\n# | |\\/| || _ | | | | . ` |\n# | | | || | | |_| |_| |\\ |\n# \\_| |_/\\_| |_/\\___/\\_| \\_/\n#\n\ndef main():\n parser = argparse.ArgumentParser(prog='run')\n parser.add_argument('-c', '--cmake', default='cmake', type=str, help='CMake command / executable')\n parser.add_argument('-b', '--build', default='build', type=str, help='CMake build directory')\n parser.add_argument('-p', '--pkg', default='ZLIB', type=str, help='The package to find')\n parser.add_argument('-S', '--no-setup', action='store_true', help='Disable the custom setup to speed up CMake')\n parser.add_argument('-C', '--no-cmake', action='store_true', help='Do not run CMake')\n args = parser.parse_args()\n\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir(dname)\n\n found, vers = cmake_version(args.cmake)\n if not found:\n print('CMake not found. Error:\\n{}'.format(vers))\n return 1\n\n print('Using CMake \"{}\" version {}'.format(args.cmake, vers))\n\n new_build(args.build)\n\n if not args.no_setup:\n setup(vers, os.path.join(dname, 'dummyGCC.sh'))\n\n if not args.no_cmake:\n run_cmake(args.cmake, args.pkg)\n\n return 0\n\nif __name__ == '__main__':\n sys.exit(main())","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390933783","text":"\"\"\"\nCreate a function named max_num() that takes a list of numbers named nums as a parameter.\n\nThe function should return the largest number in nums\n\"\"\"\n\ndef max_num_no_loop(nums):\n return max(nums)\n\nprint(max_num_no_loop([50, -10, 0, 75, 20]))\n\n# no need for a loop\n# will try a loop though since that is what the lesson is asking for\n\n# declare your function `max_num` with input `lst`\ndef max_num(lst):\n\n # set variable `biggest_number` which is equal to index 0 of lst\n biggest_number = lst[0]\n\n # do for each `nums` in the range, where the range is the length of lst\n # ah this is the key part I was missing, you have to include the range to tell the function to check each of the entries in the list and not just stop when it reaches the first one!!!\n # so in order to iterate over the entire list you have to tell it to: do this x times, where x is the length of the list provided!\n for nums in range(len(lst)):\n\n # if index `nums` of lst, is greater than, the variable of biggest number, which is set to `lst[0]` to begin with\n if lst[nums] > biggest_number:\n\n # then update the variable `biggest number` to equal `lst[nums]`\n biggest_number = lst[nums]\n \n # then return `biggest number`\n return biggest_number\n\nprint(max_num([50, -10, 0, 75, 20]))\nprint(max_num([100, 20, 34, 150]))","sub_path":"python/second-challenges/max-num.py","file_name":"max-num.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605263028","text":"#\nfrom mesh import Mesh\nimport reading_module\nimport mesh_tracking\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint('\\n' * 100)\n\nmesh_list = []\nmesh_tracking_list = []\nNx_list = []\nerror_list_L1 = []\nerror_list_L2 = []\nerror_list_inf = []\n\nfor mesh_index in range(0,6):\n i_tmp = 2**(mesh_index+2)\n j_tmp = 2**(mesh_index+1)\n mesh_filename = \"p242_T_directe_\" + str(i_tmp) + \"x\" + str(j_tmp) + \".txt\"\n mesh_tmp = reading_module.read_mesh(mesh_filename,mesh_index)\n mesh_list.append(mesh_tmp)\n\nfor mesh_index in range(0,5):\n\n mesh_tracking_list.append(mesh_tracking.track_meshes(mesh_list[mesh_index],mesh_list[mesh_index+1]))\n \n mesh_tracking_list[mesh_index].err_calc(mesh_list)\n \n Nx_list.append(1.0/(float(mesh_list[mesh_index+1].i)))\n error_list_L1.append(mesh_tracking_list[mesh_index].Error_L1)\n error_list_L2.append(mesh_tracking_list[mesh_index].Error_L2)\n error_list_inf.append(mesh_tracking_list[mesh_index].Error_inf)\n\n print(\"\\nMaillages %d et %d : \" %(mesh_index+1,mesh_index+2))\n print(\"Norme L1 : \\t\\t%.4f\" %(mesh_tracking_list[mesh_index].Error_L1))\n print(\"Norme L2 : \\t\\t%.4f\" %(mesh_tracking_list[mesh_index].Error_L2))\n print(\"Norme infinie : \\t%.4f\" %(mesh_tracking_list[mesh_index].Error_inf))\n \nregression_L1 = np.polyfit(np.log(Nx_list),np.log(error_list_L1),1)\nregression_expression_L1 = np.poly1d(regression_L1)\n\nregression_L2 = np.polyfit(np.log(Nx_list),np.log(error_list_L2),1)\nregression_expression_L2 = np.poly1d(regression_L2)\n\nregression_inf = np.polyfit(np.log(Nx_list),np.log(error_list_inf),1)\nregression_expression_inf = np.poly1d(regression_inf)\n\nprint(\"\\nLa pente correspondant a l'ordre de precision de la methode par la norme L1 est :\")\nprint(\"%0.4f \\n\" %(regression_expression_L1.coeffs[0]))\n\nprint(\"\\nLa pente correspondant a l'ordre de precision de la methode par la norme L2 est :\")\nprint(\"%0.4f \\n\" %(regression_expression_L2.coeffs[0]))\n\nprint(\"\\nLa pente correspondant a l'ordre de precision de la methode par la norme infinie est :\")\nprint(\"%0.4f \\n\" %(regression_expression_inf.coeffs[0]))\n\nplt.figure(1)\nplt.scatter(np.log(Nx_list),np.log(error_list_L1),s=8,color=\"red\",marker = \"D\",label = 'Norme L1')\nplt.scatter(np.log(Nx_list),np.log(error_list_L2),s=8,color=\"blue\",marker = \"D\",label = 'Norme L2')\nplt.scatter(np.log(Nx_list),np.log(error_list_inf),s=8,color=\"green\",marker = \"D\",label = r'Norme $\\infty$')\n\nplt.plot(np.log(Nx_list),regression_expression_L1(np.log(Nx_list)),\"--\",color=\"red\")\nplt.plot(np.log(Nx_list),regression_expression_L2(np.log(Nx_list)),\"--\",color=\"blue\")\nplt.plot(np.log(Nx_list),regression_expression_inf(np.log(Nx_list)),\"--\",color=\"green\")\n\nplt.xlabel(\"ln(1/Nx)\")\nplt.ylabel(\"ln(E)\")\nplt.title(\"ln(E) vs ln(1/Nx)\")\nplt.legend()\nplt.grid()\nplt.show()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124884729","text":"\nimport site, shutil, os, sys\n\n#print(site.getusersitepackages()) # https://stackoverflow.com/questions/122327/how-do-i-find-the-location-of-my-python-site-packages-directory\n\ns = site.getusersitepackages()\nwhere = input (\"Where do you like to install Pyabr? \")\n\nshutil.copyfile(s+\"/pyabr/pyabr\",where+\"/pyabr.zip\")\nos.mkdir(where+\"/pyabr-install\")\nshutil.unpack_archive(where+\"/pyabr.zip\",where+\"/pyabr-install\",\"zip\")\nos.system(\"cd \"+where+\"/pyabr-install && \\\"\"+sys.executable+\"\\\" install.py\")\nif os.path.isdir(where+\"/Pyabr\"): shutil.rmtree(where+\"/Pyabr\")\nshutil.copytree(where+\"/pyabr-install/stor\",where+\"/Pyabr\")","sub_path":"pypi_install_script/pyabr-0.0.3-py3-none-any/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"518573948","text":"# Copyright 2020 The XLS Authors\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n\"\"\"Tests for xls.ir.python.function.\"\"\"\n\nfrom xls.ir.python import bits\nfrom xls.ir.python import function_builder\nfrom xls.ir.python import package\nfrom xls.ir.python import value\nfrom absl.testing import absltest\n\n\ndef build_function(name='function_name'):\n pkg = package.Package('pname')\n builder = function_builder.FunctionBuilder(name, pkg)\n builder.add_param('x', pkg.get_bits_type(32))\n builder.add_literal_value(value.Value(bits.UBits(7, 8)))\n return builder.build()\n\n\nclass FunctionTest(absltest.TestCase):\n\n def test_methods(self):\n fn = build_function('function_name')\n\n self.assertIn('function_name', fn.dump_ir())\n self.assertEqual(fn.get_param_count(), 1)\n self.assertEqual(fn.get_param_bit_count(0), 32)\n self.assertEqual('function_name', fn.name)\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"xls/ir/python/function_test.py","file_name":"function_test.py","file_ext":"py","file_size_in_byte":1414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"28395804","text":"#!/usr/bin/env python\n\n# Copyright 2020 Deductiv Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Python 2 and 3 compatible\n# s3ep_search.py\n# Push Splunk search results to AWS S3 - Search Command\n#\n# Author: J.R. Murray \n# Version: 1.1.3 (2020-11-11)\n\nfrom __future__ import print_function\nfrom builtins import str\nfrom future import standard_library\nstandard_library.install_aliases()\nimport logging\nimport sys, os, platform\nimport time, datetime\nimport random\nimport re\nfrom deductiv_helpers import setup_logger, eprint\n\n# Add lib folders to import path\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), 'lib'))\nsys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'lib'))\n# pylint: disable=import-error\nfrom splunk.clilib import cli_common as cli\nimport splunk.entity as entity\nimport splunklib.client as client\nimport splunklib.results as results\nfrom splunklib.searchcommands import ReportingCommand, dispatch, Configuration, Option, validators\nimport event_file\nimport boto3\n\n# https://github.com/HurricaneLabs/splunksecrets/blob/master/splunksecrets.py\nfrom splunksecrets import decrypt\n\ndef str2bool(v):\n\treturn str(v).lower() in (\"yes\", \"y\", \"true\", \"t\", \"1\") or v == 1\n\n# Define class and type for Splunk command\n@Configuration()\nclass s3ep(ReportingCommand):\n\tdoc='''\n\t**Syntax:**\n\tsearch | s3ep bucket= outputfile= outputformat=[json|raw|kv|csv|tsv|pipe]\n\n\t**Description**\n\tPush Splunk events to AWS S3 over JSON or raw text.\n\t'''\n\n\t#Define Parameters\n\tcredential = Option(\n\t\tdoc='''\n\t\t**Syntax:** **credential=****\n\t\t**Description:** The name of the AWS credential given by the user \n\t\t**Default:** The credential defined in hep.conf, aws stanza''',\n\t\trequire=False)\n\n\tbucket = Option(\n\t\tdoc='''\n\t\t**Syntax:** **bucket=****\n\t\t**Description:** The name of the destination S3 bucket\n\t\t**Default:** The bucket name defined in hep.conf, aws stanza''',\n\t\trequire=False)\n\n\toutputfile = Option(\n\t\tdoc='''\n\t\t**Syntax:** **outputfile=****\n\t\t**Description:** The name of the file to be written to the S3 bucket\n\t\t**Default:** The name of the user plus the timestamp and the output format, e.g. admin_1588000000.log\n\t\t\tjson=.json, csv=.csv, tsv=.tsv, pipe=.log, kv=.log, raw=.log''',\n\t\trequire=False)\n\n\toutputformat = Option(\n\t\tdoc='''\n\t\t**Syntax:** **outputformat=***[json|raw|kv|csv|tsv|pipe]*\n\t\t**Description:** The format written for the output events/search results\n\t\t**Default:** *csv*''',\n\t\trequire=False) \n\n\tfields = Option(\n\t\tdoc='''\n\t\t**Syntax:** **fields=***\"field1, field2, field3\"*\n\t\t**Description:** Limit the fields to be written to the S3 file\n\t\t**Default:** All (Unspecified)''',\n\t\trequire=False, validate=validators.List()) \n\n\tcompression = Option(\n\t\tdoc='''\n\t\t**Syntax:** **compression=***[true|false]*\n\t\t**Description:** Option to compress the output file into .gz format before writing to S3\n\t\t**Default:** False, or True if .gz is in the filename''',\n\t\trequire=False, validate=validators.Boolean())\n\n\t# Validators found @ https://github.com/splunk/splunk-sdk-python/blob/master/splunklib/searchcommands/validators.py\n\t\n\tdef __getitem__(self, key):\n\t\treturn getattr(self,key)\n\t\n\tdef map(self, events):\n\t\tfor e in events:\n\t\t\tyield(e)\n\n\t#define main function\n\tdef reduce(self, events):\n\n\t\ttry:\n\t\t\tcfg = cli.getConfStanza('hep','settings')\n\t\texcept BaseException as e:\n\t\t\traise Exception(\"Could not read configuration: \" + repr(e))\n\t\t\n\t\t# Facility info - prepended to log lines\n\t\tfacility = os.path.basename(__file__)\n\t\tfacility = os.path.splitext(facility)[0]\n\t\ttry:\n\t\t\tlogger = setup_logger(cfg[\"log_level\"], 'hep.log', facility)\n\t\texcept BaseException as e:\n\t\t\traise Exception(\"Could not create logger: \" + repr(e))\n\n\t\t#script = os.path.basename(__file__)\n\n\t\t#logger = setup_logging('hep')\n\t\tlogger.info('S3EP search command initiated')\n\n\t\t# Enumerate proxy settings\n\t\thttp_proxy = os.environ.get('HTTP_PROXY')\n\t\thttps_proxy = os.environ.get('HTTPS_PROXY')\n\t\tproxy_exceptions = os.environ.get('NO_PROXY')\n\n\t\tif http_proxy is not None:\n\t\t\tlogger.debug(\"HTTP proxy: %s\" % http_proxy)\n\t\tif https_proxy is not None:\n\t\t\tlogger.debug(\"HTTPS proxy: %s\" % https_proxy)\n\t\tif proxy_exceptions is not None:\n\t\t\tlogger.debug(\"Proxy Exceptions: %s\" % proxy_exceptions)\n\t\n\t\t# Enumerate settings\n\t\t#session_key = self._metadata.searchinfo.session_key\n\t\t#splunkd_uri = self._metadata.searchinfo.splunkd_uri\n\t\tapp = self._metadata.searchinfo.app\n\t\tuser = self._metadata.searchinfo.username\n\t\t#owner = self._metadata.searchinfo.owner\n\t\tdispatch = self._metadata.searchinfo.dispatch_dir\n\n\t\ttry:\n\t\t\taws_cfg = cli.getConfStanza('hep','aws')\n\t\t\tlogger.debug(str(aws_cfg))\n\t\texcept BaseException as e:\n\t\t\tlogger.critical(\"Error reading app configuration. No target servers: \" + repr(e))\n\t\t\texit(1)\n\t\t\n\t\t# Check to see if we have credentials or if use_arn is specified\n\t\t# Check first for credential being specified\n\t\tif self.credential is not None or len(aws_cfg['default_credential']) > 0:\n\t\t\t# A credential was given explicitly or a default is specified.\n\t\t\tcredentials = {}\n\t\t\tlogger.debug(\"Default credential: %s\", str(aws_cfg['default_credential']))\n\t\t\tfor key, value in list(aws_cfg.items()):\n\t\t\t\tif value is not None and len(value) > 0:\n\t\t\t\t\t#logger.debug(\"Key string: \" + key)\n\t\t\t\t\t#logger.debug(\"Key value: \" + value)\n\t\t\t\t\tif key[:10] == 'credential':\n\t\t\t\t\t\tlogger.debug(\"Parsing %s (%s)\", key, value)\n\t\t\t\t\t\tdefault = False\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\talias, username, password_encrypted = value.split(':')\n\t\t\t\t\t\t\tif aws_cfg['default_credential'] in [alias, key]:\n\t\t\t\t\t\t\t\tdefault = True\n\t\t\t\t\t\t\tcredentials[alias] = {\n\t\t\t\t\t\t\t\t'username':\t\t\t\tusername,\n\t\t\t\t\t\t\t\t'password_encrypted': \tpassword_encrypted,\n\t\t\t\t\t\t\t\t'is_default':\t\t\tdefault\n\t\t\t\t\t\t\t}\n\n\t\t\t\t\t\texcept BaseException as e:\n\t\t\t\t\t\t\t# Invalid record\n\t\t\t\t\t\t\tlogger.warning(\"Discarded credential %s (%s): %s\", key, value, repr(e))\n\n\n\t\t\t#logger.debug(\"Creds: \" + str(credentials))\n\t\t\ttry:\n\t\t\t\t# If a credential was specified in the search arguments\n\t\t\t\tif self.credential is not None:\n\t\t\t\t\t# Look for the credential alias specified\n\t\t\t\t\tself.credential = credentials[self.credential]\n\t\t\t\t\tlogger.debug(\"Using specified credential\")\n\t\t\t\telse:\n\t\t\t\t\t# Use the default credential\n\t\t\t\t\tfor cred, cred_dict in list(credentials.items()):\n\t\t\t\t\t\tif cred_dict['is_default']:\n\t\t\t\t\t\t\tself.credential = cred_dict\n\t\t\t\tlogger.debug(self.credential)\n\n\t\t\t\t# Check for encryption\n\t\t\t\tif self.credential['password_encrypted'][:1] == '$':\n\t\t\t\t\t# Decrypt the password\n\t\t\t\t\t# Read the splunk.secret file\n\t\t\t\t\twith open(os.path.join(os.getenv('SPLUNK_HOME'), 'etc', 'auth', 'splunk.secret'), 'r') as ssfh:\n\t\t\t\t\t\tsplunk_secret = ssfh.readline()\n\t\t\t\t\t#logger.debug(splunk_secret)\n\n\t\t\t\t\t# Call the decrypt function from splunksecrets.py\n\t\t\t\t\tself.credential['password'] = decrypt(splunk_secret, self.credential['password_encrypted'])\n\t\t\t\telse:\n\t\t\t\t\t# Not encrypted in the config\n\t\t\t\t\tself.credential['password'] = self.credential['password_encrypted']\n\n\t\t\t\taws_access_key = self.credential['username']\n\t\t\t\taws_secret_key = self.credential['password']\n\t\t\t\t#logger.debug(credential['password'])\n\t\t\texcept BaseException as e: \n\t\t\t\tlogger.exception(\"Could not find or decrypt the specified credential: \" + repr(e))\n\t\t\t\tprint(\"Could not find or decrypt the specified credential\")\n\t\t\t\texit(230494)\n\t\t\t\n\t\telif str2bool(aws_cfg['use_arn']):\n\t\t\tlogger.debug(\"Using ARN to connect\")\n\t\t\n\t\tif self.bucket is None:\n\t\t\tif 'default_s3_bucket' in list(aws_cfg.keys()):\n\t\t\t\tt = aws_cfg['default_s3_bucket']\n\t\t\t\tif t is not None and len(t) > 0:\n\t\t\t\t\tself.bucket = t\n\t\t\t\telse:\n\t\t\t\t\tlogger.critical(\"No bucket specified\")\n\t\t\t\t\texit(4)\n\t\t\telse:\n\t\t\t\tlogger.critical(\"No bucket specified\")\n\t\t\t\texit(5)\n\t\t\n\t\tfile_extensions = {\n\t\t\t'raw': '.log',\n\t\t\t'kv': '.log',\n\t\t\t'pipe': '.log',\n\t\t\t'csv': '.csv',\n\t\t\t'tsv': '.tsv',\n\t\t\t'json': '.json'\n\t\t}\n\n\t\tif self.outputformat is None:\n\t\t\tself.outputformat = 'csv'\n\n\t\tif self.outputfile is None:\n\t\t\t# Boto is special. We need repr to give it the encoding it expects to match the hashing.\n\t\t\tnow = str(int(time.time()))\n\t\t\tself.outputfile = repr(app + '_' + user + '_' + now + file_extensions[self.outputformat]).strip(\"'\")\n\n\t\tif self.compression:\n\t\t\tlogger.debug('Compression: %s', self.compression)\n\t\telse:\n\t\t\ttry:\n\t\t\t\tself.compression = aws_cfg.get('compression')\n\t\t\texcept:\n\t\t\t\tself.compression = False\n\t\t\n\t\tstaging_filename = 'eventpush_staging.txt'\n\t\tlocal_output_file = os.path.join(dispatch, staging_filename)\n\n\t\t# Append .gz to the output file if compression=true\n\t\tif not self.compression and len(self.outputfile) > 3:\n\t\t\t# We have a .gz extension when compression was not specified. Enable compression.\n\t\t\tif self.outputfile[-3:] == '.gz':\n\t\t\t\tself.compression = True\n\t\telif self.compression and len(self.outputfile) > 3:\n\t\t\tif self.outputfile[-3:] != '.gz':\n\t\t\t\tself.outputfile = self.outputfile + '.gz'\n\n\t\tif self.compression:\n\t\t\tlocal_output_file = local_output_file + '.gz'\n\t\t\n\t\tevent_counter = 0\n\t\t# Write the output file to disk in the dispatch folder\n\t\tlogger.debug(\"Writing events to file %s in %s format. Compression=%s\\n\\tfields=%s\", local_output_file, self.outputformat, self.compression, self.fields)\n\t\tfor event in event_file.write_events_to_file(events, self.fields, local_output_file, self.outputformat, self.compression):\n\t\t\tyield event\n\t\t\tevent_counter += 1\n\n\t\tnow = str(int(time.time()))\n\t\tnowft = datetime.datetime.now().strftime(\"%F_%H%M%S\")\n\t\ttoday = datetime.datetime.now().strftime(\"%F\")\n\t\tself.outputfile = self.outputfile.replace(\"__now__\", now)\n\t\tself.outputfile = self.outputfile.replace(\"__nowft__\", nowft)\n\t\tself.outputfile = self.outputfile.replace(\"__today__\", today)\n\t\t\n\t\tlogger.debug(\"Staging file: %s\" % local_output_file)\n\n\t\tuse_arn = aws_cfg['use_arn']\n\t\trandom_number = str(random.randint(10000, 100000))\n\n\t\tif self.credential is not None:\n\t\t\t\n\t\t\t# Use the credential to connect to S3\n\t\t\ttry:\n\t\t\t\ts3 = boto3.client(\n\t\t\t\t\t's3',\n\t\t\t\t\taws_access_key_id=aws_access_key,\n\t\t\t\t\taws_secret_access_key=aws_secret_key)\n\t\t\t\tlogger.debug(\"Connected using OAuth credential\")\n\t\t\texcept BaseException as e:\n\t\t\t\tlogger.critical(\"Could not connect to S3 using OAuth keys: \" + repr(e))\n\t\t\t\tprint(\"Could not connect to S3 using OAuth keys: \" + repr(e))\n\t\t\t\texit(6)\n\t\t\n\t\telif str2bool(use_arn):\n\t\t\t# Get the ARN from the configuration\n\t\t\t# If a specific ARN is specified, use that\n\n\t\t\t# Otherwise, use the current/caller identity from the EC2 instance\n\t\t\t# \n\t\t\t# Use the ARN to connect to S3\n\t\t\ttry:\n\t\t\t\t\n\t\t\t\taccount_arn_current = boto3.client('sts').get_caller_identity().get('Arn')\n\t\t\t\t# arn:aws:sts::800000000000:assumed-role/SplunkInstance_ReadOnly/...\n\t\t\t\tm = re.search(r'arn:aws:sts::(\\d+):[^\\/]+\\/([^\\/]+)', account_arn_current)\n\t\t\t\taws_account = m.group(1)\n\t\t\t\taws_role = m.group(2)\n\n\t\t\t\tsts_client = boto3.client('sts')\n\t\t\t\trole_arn = \"arn:aws:iam::\" + aws_account + \":role/\" + aws_role\n\t\t\t\tassumed_role_object = sts_client.assume_role(\n\t\t\t\t\tRoleArn=role_arn,\n\t\t\t\t\tRoleSessionName=\"AssumeRoleSession\" + random_number\n\t\t\t\t)\n\n\t\t\t\tcredentials = assumed_role_object['Credentials']\n\t\t\t\ts3 = boto3.client(\n\t\t\t\t\t's3',\n\t\t\t\t\taws_access_key_id=credentials['AccessKeyId'],\n\t\t\t\t\taws_secret_access_key=credentials['SecretAccessKey'],\n\t\t\t\t\taws_session_token=credentials['SessionToken'],\n\t\t\t\t)\n\t\t\t\tlogger.debug(\"Connected using assumed role %s\", role_arn)\n\t\t\texcept BaseException as e:\n\t\t\t\tlogger.critical(\"Could not connect to S3. Failed to assume role: \" + repr(e))\n\t\t\t\tprint(\"Could not connect to S3. Failed to assume role: \" + repr(e))\n\t\t\t\texit(7)\n\t\telse:\n\t\t\tlogger.critical(\"ARN not configured and credential not specified.\")\n\t\t\tprint(\"ARN not configured and credential not specified.\")\n\t\t\texit(8)\n\t\t\n\t\t# Upload file to s3\n\t\ttry:\n\t\t\twith open(local_output_file, \"rb\") as f:\n\t\t\t\ts3.upload_fileobj(f, self.bucket, self.outputfile)\n\t\t\ts3 = None\n\t\t\tsts_client = None\n\t\t\tlogger.info(\"Successfully pushed events to s3. app=%s count=%s bucket=%s file=%s user=%s\" % (app, event_counter, self.bucket, self.outputfile, user))\n\t\t\tos.remove(local_output_file)\n\t\texcept s3.exceptions.NoSuchBucket as e:\n\t\t\tlogger.critical(\"Error: No such bucket\")\n\t\t\tprint(\"Error: No such bucket\")\n\t\t\texit(123833)\n\t\texcept BaseException as e:\n\t\t\tlogger.critical(\"Could not upload file to S3: \" + repr(e))\n\t\t\tprint(\"Could not upload file to S3: \" + repr(e))\n\t\t\texit(9)\n\t\t\n\t\t\n\ndispatch(s3ep, sys.argv, sys.stdin, sys.stdout, __name__)\n\n\n","sub_path":"bin/s3ep_search.py","file_name":"s3ep_search.py","file_ext":"py","file_size_in_byte":12909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"622989885","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 23 22:30:21 2021\r\n\r\n@author: DELL\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport random as rnd\r\nimport os\r\n\r\n# visualization\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\n\r\npath= \"D:/Titanic Classification/\"\r\nos.listdir(path)\r\ndf_train= pd.read_csv(path+ 'train.csv')\r\ndf_test= pd.read_csv(path+ 'test.csv')\r\ncombine = [df_train, df_test]\r\ndf_train.columns\r\n###########################################\r\ndf_train.info()\r\n'''\r\nWe observe the name does not affect on target\r\nWe observe the Ticket does not affect on target\r\n\r\nso we will drop this features\r\n'''\r\ndf_train=df_train.drop(['Name','Ticket'],axis=1)\r\ndf_test=df_test.drop(['Name','Ticket'],axis=1)\r\ndf_test.shape\r\ndf_train.shape\r\n###########################################\r\n# Which features contain blank, null or empty values?\r\ndf_train.info()\r\n'''\r\nWe observe the Age has missing values(891 - 714 = 177)\r\nso we will Impute mean value\r\nWe observe the Cabin contain (891 - 204 = 687 missing values).\r\n'''\r\ndf_train=df_train.drop(['Cabin'],axis=1)\r\ndf_test=df_test.drop(['Cabin'],axis=1) \r\n\r\nfrom numpy import nan\r\nmean_Age=df_train['Age'].mean()\r\n\r\nvalues = {'Age':mean_Age}\r\ndf_train=df_train.fillna(value=values)\r\nvalues = {'Embarked':'C'}\r\ndf_train=df_train.fillna(value=values)\r\n\r\n\r\nmean_Age=df_test['Age'].mean()\r\nvalues = {'Age':mean_Age}\r\ndf_test=df_test.fillna(value=values)\r\n\r\nmean_Fare=df_test['Fare'].mean()\r\nvalues = {'Fare':mean_Age}\r\ndf_test=df_test.fillna(value=values)\r\n\r\ndf_test.info()\r\ndf_train.info()\r\n# there is no missing values\r\n##############################################\r\n# Which features are categorical?\r\n# Which features are numerical?\r\ndf_train.describe()# only numerical features \r\ndf_train.describe(include=['O'])# categiorical features types\r\n'''\r\nsex (Nominal Data) has two categorical that has (male, female) and does not\r\nhave rank (Nominal Data) For example, Gender (Male/Female/Other),\r\n Age Groups (Young/Adult/Old), etc\r\n### One-Hot encoding ######\r\n'''\r\nDataDummies = pd.get_dummies(df_train['Sex'])\r\ndf_train=df_train.drop(['Sex'],axis=1)\r\n\r\ndf_train=pd.concat([df_train, DataDummies],axis=1)\r\n############\r\nDataDummies = pd.get_dummies(df_test['Sex'])\r\ndf_test=df_test.drop(['Sex'],axis=1)\r\n\r\ndf_test=pd.concat([df_test, DataDummies],axis=1)\r\n#################################################################\r\n'''\r\n# Age is ordinal data that a kind of categorical data with a set order\r\n# '''\r\n# ### train\r\n# # Let us create Age bands and determine correlations with Survived\r\n# df_train['AgeBand'] = pd.cut(df_train['Age'], 4)\r\n# df_train[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False).mean().sort_values(by='AgeBand', ascending=True)\r\n\r\n# # Let us replace Age with ordinals based on these bands.\r\n \r\n# df_train.loc[ df_train['Age'] <= 20, 'Age'] = 0\r\n# df_train.loc[(df_train['Age'] > 20) & (df_train['Age'] <= 40), 'Age'] = 1\r\n# df_train.loc[(df_train['Age'] > 40) & (df_train['Age'] <= 60), 'Age'] = 2\r\n# df_train.loc[ df_train['Age'] > 60, 'Age'] = 3\r\n# compare= pd.concat([df_train['Age'], df_train['AgeBand']],axis=1)\r\n# df_train= df_train.drop(['AgeBand'],axis=1)\r\n# df_train.head(10)\r\n\r\n#####################################\r\n\r\n### test\r\n# Let us create Age bands and determine correlations with Survived\r\n# df_test['AgeBand'] = pd.cut(df_test['Age'], 4)\r\n\r\n# # Let us replace Age with ordinals based on these bands.\r\n \r\n# df_test.loc[ df_test['Age'] <= 20, 'Age'] = 0\r\n# df_test.loc[(df_test['Age'] > 20) & (df_test['Age'] <= 40), 'Age'] = 1\r\n# df_test.loc[(df_test['Age'] > 40) & (df_test['Age'] <= 60), 'Age'] = 2\r\n# df_test.loc[ df_test['Age'] > 60, 'Age'] = 3\r\n# compare= pd.concat([df_test['Age'], df_test['AgeBand']],axis=1)\r\n# df_test= df_test.drop(['AgeBand'],axis=1)\r\n# df_test.head(10)\r\n#################################################################\r\n# replase values by dictionary\r\n\r\n# df_train.describe(include=['O'])# categiorical features types\r\n# df_train= df_train.drop([61])# Embarked in 61 index= 29.6991\r\n# df_train[['Embarked', 'Survived']].groupby(['Embarked'], as_index=False).mean()\r\n\r\n\r\n# Embarked_Dict = { 'S':0, 'Q':1, 'C':2 } \r\n# df_train['Embarked'] = df_train.Embarked.map(Embarked_Dict)\r\n# df_test['Embarked'] = df_test.Embarked.map(Embarked_Dict)\r\n# df_train.dropna(inplace=True)\r\n\r\n# df_test.info()\r\n# df_train.info()\r\n############################ handle Embarked by Target Encoding ################################\r\nencodings = df_train.groupby('Embarked')['Survived'].mean().reset_index()\r\ndf_train = df_train.merge(encodings, how='left', on='Embarked')\r\ndf_train.drop('Embarked', axis=1, inplace=True)\r\ndf_train.columns\r\n#################################### splite the data and scaling\r\nX = df_train.drop('Survived_x',axis=1)\r\ny = df_train['Survived_x']\r\nX.shape\r\ny.shape\r\n\r\n##### scaling data\r\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\r\n\r\n#ss = StandardScaler()\r\n#X = ss.fit_transform(X)\r\nminmax = MinMaxScaler()\r\nX = minmax.fit_transform(X)\r\n##############\r\n### Perform train and test split\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=128)\r\nX_train.shape\r\ny_train.shape\r\n################################# Logistic Regression Classifier ##################\r\nfrom sklearn.linear_model import LogisticRegression\r\n\r\n\r\nlrClassifier = LogisticRegression()\r\nlrClassifier.fit(X_train,y_train)\r\n### Prediction on test data\r\nprediction = lrClassifier.predict(X_test)\r\nprediction[:10] # predict for the first ten rows\r\nprint(y_test[:10])\r\n## Measure accuracy of the classifier\r\nfrom sklearn.metrics import accuracy_score\r\n\r\nacc_log= accuracy_score(y_true=y_test, y_pred=prediction)\r\n\r\n########################### Support Vector Machines ##########################\r\nfrom sklearn.svm import SVC, LinearSVC\r\n\r\nsvc = SVC()\r\nsvc.fit(X_train,y_train)\r\nprediction = svc.predict(X_test)\r\nprediction[:10] # predict for the first ten rows\r\nprint(y_test[:10])\r\n## Measure accuracy of the classifier\r\n\r\nacc_svc= accuracy_score(y_true=y_test, y_pred=prediction)\r\n\r\n############################ Random Forest #################################\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nrandom_forest = RandomForestClassifier(n_estimators=100)\r\nrandom_forest.fit(X_train, y_train)\r\nprediction = random_forest.predict(X_test)\r\nprediction[:10] # predict for the first ten rows\r\nprint(y_test[:10])\r\n## Measure accuracy of the classifier\r\n\r\nacc_random_forest= accuracy_score(y_true=y_test, y_pred=prediction)\r\n######################### Decision Tree #####################################\r\nfrom sklearn.tree import DecisionTreeClassifier\r\n\r\ndecision_tree = DecisionTreeClassifier()\r\ndecision_tree.fit(X_train, y_train)\r\nprediction = decision_tree.predict(X_test)\r\nprediction[:10] # predict for the first ten rows\r\nprint(y_test[:10])\r\n## Measure accuracy of the classifier\r\n\r\nacc_decision_tree= accuracy_score(y_true=y_test, y_pred=prediction)\r\n########################## KNN ##########################################\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nknn = KNeighborsClassifier(n_neighbors = 3)\r\nknn.fit(X_train, y_train)\r\nprediction = knn.predict(X_test)\r\nprediction[:10] # predict for the first ten rows\r\nprint(y_test[:10])\r\n## Measure accuracy of the classifier\r\n\r\nacc_knn= accuracy_score(y_true=y_test, y_pred=prediction)\r\n############################ Gaussian Naive Bayes ########################\r\nfrom sklearn.naive_bayes import GaussianNB\r\n\r\nNB = GaussianNB()\r\nNB.fit(X_train, y_train)\r\nprediction = NB.predict(X_test)\r\nprediction[:10] # predict for the first ten rows\r\nprint(y_test[:10])\r\n## Measure accuracy of the classifier\r\n\r\nacc_NB=accuracy_score(y_true=y_test, y_pred=prediction)\r\n###################### Perceptron #######################################\r\nfrom sklearn.linear_model import Perceptron\r\n\r\nperceptron = Perceptron()\r\nperceptron.fit(X_train, y_train)\r\nprediction = perceptron.predict(X_test)\r\n## Measure accuracy of the classifier\r\n\r\nacc_perceptron= accuracy_score(y_true=y_test, y_pred=prediction)\r\n######################## SGD ##########################################\r\nfrom sklearn.linear_model import SGDClassifier\r\nsgd = SGDClassifier()\r\nsgd.fit(X_train, y_train)\r\nperceptron = sgd.predict(X_test)\r\n## Measure accuracy of the classifier\r\n\r\nacc_sgd=accuracy_score(y_true=y_test, y_pred=prediction)\r\n############### Model evaluation ######################################\r\n############### Model evaluation ######################################\r\nmodels = pd.DataFrame({\r\n 'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression', \r\n 'Random Forest', 'Naive Bayes', 'Perceptron', \r\n 'Stochastic Gradient Decent', \r\n 'Decision Tree'],\r\n 'Score': [acc_svc, acc_knn, acc_log, \r\n acc_random_forest, acc_NB, acc_perceptron, \r\n acc_sgd, acc_decision_tree]})\r\nmodels.sort_values(by='Score', ascending=False)","sub_path":"Titanic Classification/Titanic without feature selection.py","file_name":"Titanic without feature selection.py","file_ext":"py","file_size_in_byte":8995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"198090320","text":"from itertools import cycle\nimport pandas as pd\nimport numpy as np\nfrom sklearn import preprocessing\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score\nfrom sklearn.linear_model import Perceptron\nfrom sklearn.metrics import auc\nfrom matplotlib import pyplot as plt\nfrom sklearn.preprocessing import label_binarize\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import GridSearchCV\nimport itertools\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import confusion_matrix, roc_curve, classification_report\n\ndef plot_confusion(cm, classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n fig = plt.figure(figsize=(10, 8))\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes)\n plt.yticks(tick_marks, classes)\n\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, cm[i, j],\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.show()\n\ndef OperateGridSearch(X_train,y_train):\n #Grid Search\n parameter_space = {\n 'hidden_layer_sizes': [(50,100,50),(100,100,100),(400,400,400)],\n 'max_iter': [1000],\n 'activation': ['logistic'],\n 'solver': ['adam'],\n # 'alpha': [0.0001, 0.05],\n 'learning_rate': ['constant','invscaling','adaptive'],\n }\n clf = GridSearchCV(MLPClassifier(), parameter_space, n_jobs=-1)\n clf.fit(X_train, y_train)\n print('Best parameters found:\\n', clf.best_params_)\n return clf\n\ndef TrainMLP(X_train,y_train):\n # Best parameters found:\n # {'activation': 'logistic', 'hidden_layer_sizes': (400, 400, 400), 'max_iter': 1000, 'solver': 'adam'}\n clf = MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(400, 400), max_iter=1000,\n solver='adam', learning_rate='constant')\n clf.fit(X_train, y_train)\n return clf\n\ndef TrainPerceptron(X_train,y_train):\n clf = Perceptron(eta0=0.01, random_state=1, max_iter= 100)\n clf = CalibratedClassifierCV(clf)\n clf.fit(X_train, y_train)\n return clf\n\ndef CalculateMAE(X_train, X_val, y_train, y_val):\n my_mae = []\n layer_name = []\n layers = [50,100,150,200,300,400,500]\n\n # for 2 Layers\n for max_layer in layers:\n model = MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(max_layer,max_layer), max_iter=1000, solver= 'adam',learning_rate = 'constant')\n model.fit(X_train, y_train)\n preds_val = model.predict(X_val)\n mae = mean_absolute_error(y_val.argmax(axis=1), preds_val.argmax(axis=1))\n my_mae.append(mae)\n layer_name.append(\"\"+str(max_layer)+\",\"+str(max_layer))\n\n #for 3 Layers\n for max_layer in layers:\n model = MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(max_layer,max_layer,max_layer), max_iter=1000, solver= 'adam',learning_rate = 'constant')\n model.fit(X_train, y_train)\n preds_val = model.predict(X_val)\n mae = mean_absolute_error(y_val.argmax(axis=1), preds_val.argmax(axis=1))\n my_mae.append(mae)\n layer_name.append(\"\" + str(max_layer) + \",\" + str(max_layer)+\",\"+str(max_layer))\n\n for max_layer in layers:\n model = MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(max_layer,max_layer,max_layer,max_layer), max_iter=1000, solver= 'adam',learning_rate = 'constant')\n model.fit(X_train, y_train)\n preds_val = model.predict(X_val)\n mae = mean_absolute_error(y_val.argmax(axis=1), preds_val.argmax(axis=1))\n my_mae.append(mae)\n layer_name.append(\"\" + str(max_layer) + \",\" + str(max_layer)+\",\"+str(max_layer)+\",\"+str(max_layer))\n print(my_mae)\n\n plt.plot(layer_name,my_mae)\n plt.title('Mean Absolute Error of Test set')\n plt.xticks(rotation=45)\n plt.show()\n\ndef FindConfusion(y_test,clf_predict,title):\n cm = confusion_matrix(y_test.argmax(axis=1), clf_predict.argmax(axis=1))\n plot_confusion(cm, classes=[\"Low\", \"Medium\", \"High\"],\n title=title) # disp.figure_.suptitle(\"Confusion Matrix\")\n # print(\"Confusion matrix:\\n%s\" % disp.confusion_matrix))\n plt.show()\n\ndef CrossValidate(X_train,y_train):\n models = []\n models.append(('Perceptron', Perceptron(eta0=0.1, random_state=0, max_iter=100)))\n models.append(('MLP (50,100,50)',\n MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(50, 100, 50), max_iter=1000,\n solver='adam', learning_rate='adaptive')))\n models.append(('MLP (100,100,100)',\n MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(100, 100, 100),\n max_iter=1000, solver='adam', learning_rate='constant')))\n models.append(('MLP (400,400)',\n MLPClassifier(random_state=0, activation='logistic', hidden_layer_sizes=(400, 400), max_iter=1000,\n solver='adam', learning_rate='constant')))\n\n # evaluate each model in turn\n results = []\n names = []\n for name, model in models:\n kfold = StratifiedKFold(n_splits=10, random_state=1)\n cv_results = cross_val_score(model, X_train, y_train.argmax(axis=1), cv=kfold, scoring='accuracy')\n results.append(cv_results)\n names.append(name)\n print('%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()))\n # Compare Algorithms\n plt.boxplot(results, labels=names)\n plt.title('10-fold cross-validation on VideoGame dataset')\n plt.show()\n\ndef ROCPlot(y_test, y_score, n_classes):\n # ============================================================================\n # ROC Curve Setup\n # ============================================================================\n # Plot linewidth.\n lw = 2\n\n # Compute ROC curve and ROC area for each class\n fpr = dict()\n tpr = dict()\n thresholds = dict()\n roc_auc = dict()\n for i in range(n_classes):\n fpr[i], tpr[i], thresholds[i] = roc_curve(y_test[:, i], y_score[:, i])\n roc_auc[i] = auc(fpr[i], tpr[i])\n\n # Compute micro-average ROC curve and ROC area\n fpr[\"micro\"], tpr[\"micro\"], thresholds[\"micro\"] = roc_curve(y_test.ravel(), y_score.ravel())\n roc_auc[\"micro\"] = auc(fpr[\"micro\"], tpr[\"micro\"])\n\n # Compute macro-average ROC curve and ROC area\n # First aggregate all false positive rates\n all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))\n\n # Then interpolate all ROC curves at this points\n mean_tpr = np.zeros_like(all_fpr)\n for i in range(n_classes):\n mean_tpr += np.interp(all_fpr, fpr[i], tpr[i])\n\n # Finally average it and compute AUC\n mean_tpr /= n_classes\n\n fpr[\"macro\"] = all_fpr\n tpr[\"macro\"] = mean_tpr\n roc_auc[\"macro\"] = auc(fpr[\"macro\"], tpr[\"macro\"])\n\n # ============================================================================\n # Plot all ROC curves\n # ============================================================================\n plt.figure(1)\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\n colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\n for i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\n\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('MLP Video Games Data ROC')\n plt.legend(loc=\"lower right\")\n plt.show()\n\n # Zoom in view of the upper left corner.\n plt.figure(2)\n plt.xlim(0, 0.2)\n plt.ylim(0.8, 1)\n plt.plot(fpr[\"micro\"], tpr[\"micro\"],\n label='micro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"micro\"]),\n color='deeppink', linestyle=':', linewidth=4)\n\n plt.plot(fpr[\"macro\"], tpr[\"macro\"],\n label='macro-average ROC curve (area = {0:0.2f})'\n ''.format(roc_auc[\"macro\"]),\n color='navy', linestyle=':', linewidth=4)\n\n colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])\n for i, color in zip(range(n_classes), colors):\n plt.plot(fpr[i], tpr[i], color=color, lw=lw,\n label='ROC curve of class {0} (area = {1:0.2f})'\n ''.format(i, roc_auc[i]))\n\n plt.plot([0, 1], [0, 1], 'k--', lw=lw)\n plt.xlabel('False Positive Rate')\n plt.ylabel('True Positive Rate')\n plt.title('Zoomed in ROC')\n plt.legend(loc=\"lower right\")\n plt.show()\n\ndef ClassificationReport(clf,X_train,y_train,X_test, y_test,clf_predict):\n # Accuracy factors\n print('acc for training data: {:.3f}'.format(clf.score(X_train, y_train)))\n print('acc for test data: {:.3f}'.format(clf.score(X_test, y_test)))\n print('MLP Classification report:\\n\\n', classification_report(y_test, clf_predict))\n\ndef main():\n # crimedata=pd.read_csv(r'~/Workspace/ML_CW/CrimeDT.csv')\n crimedata=pd.read_csv('Preprocessed_VideoGames.csv')\n crimedata = crimedata.drop(crimedata.columns[0],axis=1)\n\n categorise = {'Low': 0, 'Medium': 1, 'High': 2}\n crimedata[\"GameTier\"] = crimedata[\"GameTier\"].map(categorise)\n\n start = crimedata.columns.get_loc('Platform')\n end = crimedata.columns.get_loc('User_normalised_by_year')\n label = crimedata.columns.get_loc('GameTier')\n\n X = crimedata.values[:,start:end]\n enc = preprocessing.OrdinalEncoder()\n enc.fit(X)\n X = enc.transform(X)\n print (X)\n\n # Extracting target/ class labels\n y = crimedata.values[:,label].astype(float)\n print (y)\n\n y = label_binarize(y, classes=[0, 1, 2])\n n_classes = y.shape[1]\n\n ######################################################################################################\n\n #In case that validation dataset is needed\n # X_tr, X_test, y_tr, y_test = train_test_split(X, y, random_state = 14, test_size = 0.20)\n # X_train, X_val, y_train, y_val = train_test_split(X_tr, y_tr, random_state = 14, test_size = 0.20)\n\n # Uncomment this to implement Mean Absolute Value\n # CalculateMAE(X_train, X_val, y_train, y_val)\n\n # Split dataset in to Train:Test - 80:20\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 14, test_size = 0.25)\n\n #Uncomment this to implement Grid Search\n # clf = OperateGridSearch(X_train, y_train)\n\n #Training Perceptron\n # clf = TrainPerceptron(X_train,y_train)\n\n #Training Multilayer Perceptron\n clf = TrainMLP(X_train,y_train)\n\n # Instead of targets, store output as prediction probabilities\n y_score = clf.predict_proba(X_test)\n\n clf_predict = clf.predict(X_test)\n clf_predict_on_train = clf.predict(X_train)\n\n #Generate Classification Report\n ClassificationReport(clf, X_train, y_train, X_test, y_test, clf_predict)\n\n #Generate Confusion Matrix\n FindConfusion(y_test,clf_predict,title=\"Test Set Confusion matrix\")\n FindConfusion(y_train,clf_predict_on_train,title=\"Training Set Confusion matrix\")\n\n\n #Implement 10-fold CrossValidation\n CrossValidate(X_train,y_train)\n\n #Generate ROC Curve\n ROCPlot(y_test, y_score, n_classes)\n\nif __name__ == \"__main__\":\n main()","sub_path":"VideoGames/VideoGameTierPredicter.py","file_name":"VideoGameTierPredicter.py","file_ext":"py","file_size_in_byte":12230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"400927821","text":"\"\"\"\nAbstract base class for several objects used by manim. In particular, both\n:class:`~.Scene` and :class:`~.Mobject` inherit from Container.\n\"\"\"\n\n\n__all__ = [\"Container\"]\n\n\nfrom abc import ABC, abstractmethod\n\nfrom . import logger\n\n\nclass Container(ABC):\n \"\"\"Abstract base class for several objects used by manim. In particular, both\n :class:`~.Scene` and :class:`~.Mobject` inherit from Container.\n\n Parameters\n ----------\n kwargs : Any\n\n \"\"\"\n\n def __init__(self, **kwargs):\n if kwargs:\n logger.debug(\"Container received extra kwargs: %s\", kwargs)\n\n if hasattr(self, \"CONFIG\"):\n logger.error(\n \"CONFIG has been removed from ManimCommunity. Please use keyword arguments instead.\"\n )\n\n @abstractmethod\n def add(self, *items):\n \"\"\"Abstract method to add items to Container.\n\n Parameters\n ----------\n items : Any\n Objects to be added.\n \"\"\"\n\n @abstractmethod\n def remove(self, *items):\n \"\"\"Abstract method to remove items from Container.\n\n Parameters\n ----------\n items : Any\n Objects to be added.\n \"\"\"\n","sub_path":"manim/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":1190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"414205572","text":"import pygame\n\n\n\nclass Controller:\n def __init__(self, number):\n self.buttonMap = { 0:\"x\", 1:\"a\", 2:\"b\", 3:\"y\", 4:\"l\", 5:\"r\", 8:\"select\", 9:\"start\"} \n pygame.init()\n ## find controllers\n pygame.joystick.init()\n self.joystick = pygame.joystick.Joystick(number)\n self.joystick.init()\n self.clock = pygame.time.Clock()\n self.done = False\n\n def getInput(self):\n # EVENT PROCESSING STEP\n pygame.event.get()\n ## axis control ##\n for i in range( 2 ):\n axis = self.joystick.get_axis(i)\n if axis >= 0.9 or axis == -1.0:\n if axis > 0 and i == 0:\n return \"right\"\n if axis < 0 and i == 0:\n return \"left\"\n if axis > 0 and i == 1:\n return \"down\"\n if axis < 0 and i == 1:\n return \"up\"\n \n ## button control ##\n for i in range(10):\n button = self.joystick.get_button(i)\n if button == 1:\n return self.buttonMap[i]\n return None\n","sub_path":"files/engine/controllerIO.py","file_name":"controllerIO.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"365087144","text":"\"\"\"Augment pandas DataFrame with methods for machine learning\"\"\"\n__version__ = '0.2.7'\nimport os\nimport re\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.sdist import sdist\n\nURL = 'https://github.com/KIC/pandas-ml-quant'\nNAME = 'pandas-ml-utils'\n\n\nclass SDist(sdist):\n\n def fix_github_links(self, lines):\n # ist https://github.com/KIC/pandas-ml-quant/pandas-ml-common/tree/0.2.0/./examples/\n # soll https://github.com/KIC/pandas-ml-quant/tree/0.2.0/pandas-ml-common/./examples/\n def fix_line(line):\n fixed_images = re.sub(r'(^\\[ghi\\d+]:\\s+)', f'\\\\1{URL}/raw/{__version__}/{NAME}/', line)\n fixed_location = re.sub(r'(^\\[ghl\\d+]:\\s+)', f'\\\\1{URL}/tree/{__version__}/{NAME}/', fixed_images)\n fixed_files = re.sub(r'(^\\[ghf\\d+]:\\s+)', f'\\\\1{URL}/blob/{__version__}/{NAME}/', fixed_location)\n return fixed_files\n\n return [fix_line(line) for line in lines]\n\n def make_release_tree(self, base_dir, files):\n # create the regular distribution files\n super().make_release_tree(base_dir, files)\n\n # but then fix the github links\n readme_file = os.path.join(base_dir, 'Readme.md')\n readme_lines = open(readme_file).readlines()\n\n with open(readme_file, 'w') as f:\n f.writelines(self.fix_github_links(readme_lines))\n\n\nsetup(\n name=NAME,\n version=__version__,\n author='KIC',\n author_email='',\n packages=find_packages(),\n scripts=[],\n url=f'{URL}/{NAME}',\n license='MIT',\n description=__doc__,\n long_description=open('Readme.md').read(),\n long_description_content_type='text/markdown',\n install_requires=open(\"requirements.frozen.txt\").read().splitlines() + [f\"pandas-ml-common=={__version__}\"],\n extras_require={\n \"dev\": open(\"dev-requirements.frozen.txt\").read().splitlines(),\n },\n include_package_data=True,\n classifiers=[\n # Chose either \"3 - Alpha\", \"4 - Beta\" or \"5 - Production/Stable\" as the current state of your package\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: Developers',\n 'Topic :: Software Development :: Build Tools',\n 'License :: OSI Approved :: MIT License',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.8',\n ],\n keywords=['pandas', 'ml', 'util', 'quant'],\n cmdclass={\n 'sdist': SDist\n },\n)\n","sub_path":"pandas-ml-utils/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"359998576","text":"print(\"DATA ANALYSIS 1\")\n#Beyond the Coding: Files verses Data structures (lists)\n\n#\tFiles are used to store the data so the data remains when\n#\tthe program isn't running. A data structure is where the data\n#\tlives when the program is running. When using data from files\n#\tit must be copied into a file. When the program is done running\n#\tthe information must be copied back into the file.\n\n#Beyond the Coding: \n#\tKnowing how the data in your file is organizedis essential. The \n#\talgoritms used to format the data into something that is useable \n#\tis based on knowing how it is stored. \n\n#Big Skill: Reading from text file.\ndata = open(\"dataAnalysis1.txt\",\"r\"); \ndataString = data.read()\ndataList = dataString.split(\"\\n\")\n\n#Beyond the Coding: \n#\tThe order of the data could be important\n#\tif any of the analysis requires changing \n#\tthe order you need to make a copy of the \n#\tinformation\n#\nprint(dataList)\n\n#Big Skill: Looping through a list using counted loop. \nfor i in range(0, len(dataList),1):\n\t#Big Skill: Removing Elements\n\tdataList[i] = dataList[i].replace(\",\",\"\")\n\t#Big Skill: Casting\n\tdataList[i] = float(dataList[i])\n\nprint(dataList)","sub_path":"Transistion Exercise/dataAnalysis1.py","file_name":"dataAnalysis1.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475675498","text":"def solution(number, k):\n number = list(number)\n picked = []\n\n picked.append(number[0])\n for i in range(1, len(number)):\n # k를 다 소진하지 않고, 비교할 대상이 있고, 비교할 대상이 더 작다면\n while(k > 0 and picked and number[i] > picked[-1]):\n picked.pop()\n k -= 1\n # k를 다 소진했다면 남은 숫자들과 합친다.\n if(k == 0):\n picked += number[i:]\n return ''.join(picked)\n picked.append(number[i])\n return ''.join(picked[:-k])\n\n\n# 새로 배운 점\n'''\n1. 슬라이싱과 max로 인해 시간초과가 났다. 가장 앞에서부터 돌면서 자신보다 작은 애가 뒤에 있다면 걔를 없애고 k를 하나 줄인다. 그러면 O(n)만큼 걸린다.\n'''\n","sub_path":"two/큰 수 만들기.py","file_name":"큰 수 만들기.py","file_ext":"py","file_size_in_byte":794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"566641121","text":"c#!/usr/bin/env python\n# Pset 6 credit.py (more comfortable)\n\n# This program checks if a credit\n# card number entered by a user is\n# a valid credit card number by\n# using Luhn's Algorithm to verify\n# the checksum, and it identifies if\n# it's an American Express, Mastercard,\n# Visa credit card, or if the card is\n# invalid.\nfrom cs50 import get_string\n\n\ndef main():\n # Prompts user for card number\n cc_number = get_string(\"Number: \")\n checksum(cc_number)\n\n\ndef checksum(num):\n # Checks if number has right number of digits\n mult_by_2, rest_of_digits = 0, 0\n\n if not num.isdigit():\n print(\"INVALID..\")\n return False\n\n else:\n length = len(num)\n\n if length == 13 or length == 15 or length == 16:\n # Multiplies every other number by 2\n for digit in range(-2, (-length - 1), -2):\n x = int(num[digit]) * 2\n\n # Adds both digits of two-digit numbers to the total\n if x > 9:\n mult_by_2 += x // 10\n mult_by_2 += x % 10\n\n else:\n mult_by_2 += x\n\n # Adds digits that were not multiplied by two\n for digit in range(-1, (-length - 1), -2):\n rest_of_digits += int(num[digit])\n\n # Computes total and modulo divides by ten\n total = mult_by_2 + rest_of_digits\n\n if total % 10 == 0:\n # Checks if Visacard\n if length == 13 or length == 16:\n if num[0] == \"4\":\n print(\"VISA\")\n return True\n\n # Checks if American Express\n if length == 15:\n if num[0] == \"3\" and (num[1] == \"4\" or num[1] == \"7\"):\n print(\"AMEX\")\n return True\n\n # Checks if Mastercard\n if length == 16:\n if num[0] == \"5\" and (num[1] == \"1\" or num[1] == \"2\" or num[1] == \"3\" or\n num[1] == \"4\" or num[1] == \"5\"):\n print(\"MASTERCARD\")\n return True\n\n print(\"INVALID\")\n return False\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"pset6/credit/credit.py","file_name":"credit.py","file_ext":"py","file_size_in_byte":2246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"603905980","text":"import datetime\n\nimport pandas as pd\nimport numpy as np\nfrom OptionStrategyLib.OptionReplication.synthetic_option import SytheticOption\nfrom PricingLibrary.Options import EuropeanOption\nfrom back_test.model.base_account import BaseAccount\nfrom back_test.model.base_instrument import BaseInstrument\nfrom back_test.model.constant import Util, OptionType, LongShort, ExecuteType\nfrom back_test.model.trade import Trade\nfrom data_access.get_data import get_dzqh_cf_daily, get_dzqh_cf_c1_daily, \\\n get_dzqh_cf_c1_minute, get_index_mktdata\nfrom OptionStrategyLib.VolatilityModel.historical_volatility import historical_volatility_model as Histvol\n\nstart_date = datetime.date(2018, 4, 1)\nend_date = datetime.date(2018, 6, 1)\nhist_date = start_date - datetime.timedelta(days=40)\ndf_future_c1 = get_dzqh_cf_c1_minute(start_date, end_date, 'if')\ndf_future_c1_daily = get_dzqh_cf_c1_daily(hist_date, end_date, 'if')\ndf_futures_all_daily = get_dzqh_cf_daily(start_date, end_date, 'if') # daily data of all future contracts\ndf_index = get_index_mktdata(start_date, end_date, 'index_300sh') # daily data of underlying index\ndf_index = df_index[df_index[Util.DT_DATE].isin(Util.DZQH_CF_DATA_MISSING_DATES)==False].reset_index(drop=True)\ndf_vol_1m = Histvol.hist_vol(df_future_c1_daily)\n# df_parkinson_1m = Histvol.parkinson_number(df_future_c1_daily)\n# df_garman_klass = Histvol.garman_klass(df_future_c1_daily)\n# df_hist_vol = df_vol_1m.join(df_parkinson_1m, how='left')\n# df_hist_vol = df_hist_vol.join(df_garman_klass, how='left')\n\ndf_future_c1_daily = df_future_c1_daily[df_future_c1_daily[Util.DT_DATE]>=start_date].reset_index(drop=True)\nsynthetic_option = SytheticOption(df_c1_minute=df_future_c1,\n df_c1_daily=df_future_c1_daily,\n df_futures_all_daily=df_futures_all_daily,\n df_index_daily=df_index)\nsynthetic_option.init()\nunderlying = BaseInstrument(df_data=df_index)\nunderlying.init()\naccount = BaseAccount(2*Util.BILLION, leverage=10.0, rf=0.0)\ntrading_desk = Trade()\n\n#####################################################################\n# \"\"\" Init position \"\"\"\nstrike = synthetic_option.underlying_index_state_daily[Util.AMT_CLOSE]\ndt_maturity = synthetic_option.eval_date + datetime.timedelta(days=30)\nvol = 0.2\nOption = EuropeanOption(strike, dt_maturity, OptionType.PUT)\ndelta = synthetic_option.get_black_delta(Option, vol)\nid_future = synthetic_option.current_state[Util.ID_INSTRUMENT]\nsynthetic_unit = synthetic_option.get_synthetic_unit(delta)\nif synthetic_unit > 0:\n long_short = LongShort.LONG\nelse:\n long_short = LongShort.SHORT\n\n# \"\"\" 用第一天的日收盘价开仓标的现货多头头寸 \"\"\"\nunderlying_unit = np.floor(Util.BILLION/underlying.mktprice_close())\norder_underlying = account.create_trade_order(underlying, LongShort.LONG, underlying_unit)\nexecution_record = underlying.execute_order(order_underlying, slippage=0, execute_type=ExecuteType.EXECUTE_ALL_UNITS)\naccount.add_record(execution_record, underlying)\nunderlying.next()\n# \"\"\" 用第一天的成交量加权均价初次开仓复制期权头寸 \"\"\"\norder = account.create_trade_order(synthetic_option,\n long_short,\n synthetic_unit)\nexecution_record = synthetic_option.execute_order_by_VWAP(order, slippage=0, execute_type=ExecuteType.EXECUTE_ALL_UNITS)\naccount.add_record(execution_record, synthetic_option)\n#####################################################################\nwhile synthetic_option.has_next() and synthetic_option.eval_date < dt_maturity:\n\n if id_future != synthetic_option.current_state[Util.ID_INSTRUMENT]:\n open_long_short = account.trade_book.loc[id_future, Util.TRADE_LONG_SHORT]\n hold_unit = account.trade_book.loc[id_future, Util.TRADE_UNIT]\n spot = synthetic_option.current_daily_state[Util.AMT_CLOSE]\n delta = synthetic_option.get_black_delta(Option, vol, spot)\n synthetic_unit = synthetic_option.get_synthetic_unit(delta)\n id_c2 = synthetic_option.current_state[Util.ID_INSTRUMENT]\n close_execution_record, open_execution_record \\\n = synthetic_option.shift_contract_by_VWAP(id_c1=id_future,\n id_c2=id_c2,\n hold_unit=hold_unit,\n open_unit=synthetic_unit,\n hold_long_short=open_long_short,\n slippage=0,\n execute_type=ExecuteType.EXECUTE_ALL_UNITS)\n account.add_record(close_execution_record, synthetic_option)\n synthetic_option._id_instrument = id_c2\n account.add_record(open_execution_record, synthetic_option)\n id_future = id_c2\n account.daily_accounting(synthetic_option.eval_date) # 该日的收盘结算\n print(synthetic_option.eval_date, account.account.loc[synthetic_option.eval_date, Util.PORTFOLIO_NPV], underlying.eval_date)\n underlying.next()\n\n if synthetic_option.eval_date != synthetic_option.get_next_state_date():\n date = synthetic_option.eval_date\n account.daily_accounting(date) # 该日的收盘结算\n print(date, account.account.loc[date, Util.PORTFOLIO_NPV], underlying.eval_date)\n underlying.next()\n synthetic_option.next()\n\n if synthetic_option.eval_datetime.minute % 10 != 0:\n synthetic_option.next()\n continue\n\n delta = synthetic_option.get_black_delta(Option, vol)\n rebalance_unit = synthetic_option.get_synthetic_option_rebalancing_unit(delta)\n if rebalance_unit > 0:\n long_short = LongShort.LONG\n elif rebalance_unit < 0:\n long_short = LongShort.SHORT\n else:\n synthetic_option.next()\n continue\n order = account.create_trade_order(synthetic_option,\n long_short,\n rebalance_unit)\n execution_record = synthetic_option.execute_order(order, slippage=0, execute_type=ExecuteType.EXECUTE_ALL_UNITS)\n account.add_record(execution_record, synthetic_option)\n\nclose_out_orders = account.creat_close_out_order()\nfor order in close_out_orders:\n execution_record = account.dict_holding[order.id_instrument].execute_order(order, slippage=0, execute_type=ExecuteType.EXECUTE_ALL_UNITS)\n account.add_record(execution_record, account.dict_holding[order.id_instrument])\naccount.daily_accounting(synthetic_option.eval_date)\nprint(synthetic_option.eval_date, account.account.loc[synthetic_option.eval_date, Util.PORTFOLIO_NPV], underlying.eval_date)\ndf_records = pd.DataFrame(account.list_records)\ndf_records.to_csv('trade_records.csv')\ntotal_pnl = df_records[Util.TRADE_REALIZED_PNL].sum()\nfinal_npv = (2*Util.BILLION + total_pnl) / (2*Util.BILLION)\nprint('calculate final npv from adding up realized pnl ; ', final_npv)\n","sub_path":"OptionStrategyLib/OptionReplication/synthetic_option_test.py","file_name":"synthetic_option_test.py","file_ext":"py","file_size_in_byte":7035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307990704","text":"# Copyright (C) 2020 Cancer Care Associates\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n\n# http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nfrom pymedphys._imports import plt\nfrom pymedphys._imports import streamlit as st\n\nfrom pymedphys import _losslessjpeg as lljpeg\n\n# from pymedphys._wlutz import findbb, findfield, imginterp, iview, reporting\nfrom pymedphys._streamlit.utilities import misc\n\nfrom pymedphys._experimental.streamlit.apps.wlutz import _dbf, _filtering, _frames\n\n\n@st.cache()\ndef read_image(path):\n return lljpeg.imread(path)\n\n\ndef main():\n st.title(\"Winston-Lutz Arc\")\n\n _, database_directory = misc.get_site_and_directory(\"Database Site\", \"iviewdb\")\n\n st.write(\"## Load iView databases for a given date\")\n refresh_cache = st.button(\"Re-query databases\")\n merged = _dbf.load_and_merge_dbfs(database_directory, refresh_cache)\n\n st.write(\"## Filtering\")\n filtered = _filtering.filter_image_sets(merged)\n filtered.sort_values(\"datetime\", ascending=False, inplace=True)\n\n st.write(filtered)\n\n if len(filtered) == 0:\n st.stop()\n\n st.write(\"## Loading database image frame data\")\n\n try:\n table = _frames.dbf_frame_based_database(\n database_directory, refresh_cache, filtered\n )\n except FileNotFoundError:\n table = _frames.xml_frame_based_database(database_directory, filtered)\n\n st.write(table)\n\n selected_filepath = st.selectbox(\"Select single filepath\", table[\"filepath\"])\n\n resolved_path = database_directory.joinpath(selected_filepath)\n st.write(resolved_path)\n\n fig, ax = plt.subplots()\n ax.imshow(read_image(resolved_path))\n st.pyplot(fig)\n\n # # st.write(files)\n # sorted_files = sorted(files, key=get_modified_time, reverse=True)\n # image_path = st.selectbox(\"Image to select\", options=sorted_files[0:10])\n\n # st.write(\"## Parameters\")\n\n # width = st.number_input(\"Width (mm)\", 20)\n # length = st.number_input(\"Length (mm)\", 24)\n # edge_lengths = [width, length]\n\n # # initial_rotation = 0\n # bb_diameter = st.number_input(\"BB Diameter (mm)\", 8)\n # penumbra = st.number_input(\"Penumbra (mm)\", 2)\n\n # # files = sorted(IMAGES_DIR.glob(\"*.jpg\"), key=lambda t: -os.stat(t).st_mtime)\n # # most_recent = files[0:5]\n\n # # most_recent\n\n # if st.button(\"Show Image\"):\n # fig = plt.figure()\n # fig.imshow(read_image(image_path))\n # st.pyplot(fig)\n\n # if st.button(\"Calculate\"):\n # img = read_image(image_path)\n # x, y, img = iview.iview_image_transform(img)\n # field = imginterp.create_interpolated_field(x, y, img)\n # initial_centre = findfield.get_centre_of_mass(x, y, img)\n # (field_centre, field_rotation) = findfield.field_centre_and_rotation_refining(\n # field, edge_lengths, penumbra, initial_centre, fixed_rotation=0\n # )\n\n # bb_centre = findbb.optimise_bb_centre(\n # field, bb_diameter, edge_lengths, penumbra, field_centre, field_rotation\n # )\n # fig = reporting.image_analysis_figure(\n # x,\n # y,\n # img,\n # bb_centre,\n # field_centre,\n # field_rotation,\n # bb_diameter,\n # edge_lengths,\n # penumbra,\n # )\n\n # st.write(fig)\n # st.pyplot()\n","sub_path":"lib/pymedphys/_experimental/streamlit/apps/wlutz/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"639290078","text":"import torch\nimport torch.nn as nn\nimport itertools as it\n\nACTIVATIONS = {\"relu\": nn.ReLU, \"lrelu\": nn.LeakyReLU}\nPOOLINGS = {\"avg\": nn.AvgPool2d, \"max\": nn.MaxPool2d}\n\n\nclass ConvClassifier(nn.Module):\n \"\"\"\n A convolutional classifier model based on PyTorch nn.Modules.\n\n The architecture is:\n [(CONV -> ACT)*P -> POOL]*(N/P) -> (FC -> ACT)*M -> FC\n \"\"\"\n\n def __init__(\n self,\n in_size,\n out_classes: int,\n channels: list,\n pool_every: int,\n hidden_dims: list,\n conv_params: dict = {},\n activation_type: str = \"relu\",\n activation_params: dict = {},\n pooling_type: str = \"max\",\n pooling_params: dict = {},\n ):\n \"\"\"\n :param in_size: Size of input images, e.g. (C,H,W).\n :param out_classes: Number of classes to output in the final layer.\n :param channels: A list of of length N containing the number of\n (output) channels in each conv layer.\n :param pool_every: P, the number of conv layers before each max-pool.\n :param hidden_dims: List of of length M containing hidden dimensions of\n each Linear layer (not including the output layer).\n :param conv_params: Parameters for convolution layers.\n :param activation_type: Type of activation function; supports either 'relu' or\n 'lrelu' for leaky relu.\n :param activation_params: Parameters passed to activation function.\n :param pooling_type: Type of pooling to apply; supports 'max' for max-pooling or\n 'avg' for average pooling.\n :param pooling_params: Parameters passed to pooling layer.\n \"\"\"\n super().__init__()\n assert channels and hidden_dims\n\n self.in_size = in_size\n self.out_classes = out_classes\n self.channels = channels\n self.pool_every = pool_every\n self.hidden_dims = hidden_dims\n self.conv_params = conv_params\n self.activation_type = activation_type\n self.activation_params = activation_params\n self.pooling_type = pooling_type\n self.pooling_params = pooling_params\n\n if activation_type not in ACTIVATIONS or pooling_type not in POOLINGS:\n raise ValueError(\"Unsupported activation or pooling type\")\n\n self.feature_extractor = self._make_feature_extractor()\n self.classifier = self._make_classifier()\n\n def _make_feature_extractor(self):\n in_channels, in_h, in_w, = tuple(self.in_size)\n\n layers = []\n # TODO: Create the feature extractor part of the model:\n # [(CONV -> ACT)*P -> POOL]*(N/P)\n # Use only dimension-preserving 3x3 convolutions.\n # Apply activation function after each conv, using the activation type and\n # parameters.\n # Apply pooling to reduce dimensions after every P convolutions, using the\n # pooling type and pooling parameters.\n # Note: If N is not divisible by P, then N mod P additional\n # CONV->ACTs should exist at the end, without a POOL after them.\n # ====== YOUR CODE: ======\n\n P = self.pool_every\n N = len(self.channels)\n M = self.hidden_dims\n\n\n def ceil(n):\n res = int(n)\n return res if res == n else res + 1\n\n # update the dimensions along while adding the layers\n curr_h = in_h\n curr_w = in_w\n\n # print(in_channels, in_h, in_w)\n\n conv_act_pool_num = ceil(N / P) - 1 if N % P != 0 else ceil(N / P)\n\n def add_activation_function(layers_in, activation_type, **activation_params):\n if activation_type == 'relu':\n layers_in.append(nn.ReLU(**activation_params))\n else:\n layers_in.append(nn.LeakyReLU(**activation_params))\n return layers_in\n\n def add_pool_function(layers_in, pooling_type, **pooling_params):\n if pooling_type == 'max':\n layers_in.append(nn.MaxPool2d(**pooling_params))\n else:\n layers_in.append(nn.AvgPool2d(**pooling_params))\n return layers_in\n\n def update_size_filter(input_size, dim, last_filter):\n # after filter, size changes:\n # size_out = ((size_in +2*padding - (dilation * (kernel_size - 1)) -1 ) / stride) +1\n\n # dilation = 1 for all our cases, so:\n # size_out = (size_in +2*padding - ( kernel_size-1 ) -1 ) / stride) + 1\n\n if type(last_filter) == torch.nn.modules.pooling.MaxPool2d:\n padding = last_filter.padding\n dilation = last_filter.dilation\n kernel_size = last_filter.kernel_size\n stride = last_filter.stride\n elif type(last_filter) == torch.nn.modules.pooling.AvgPool2d:\n padding = last_filter.padding\n dilation = 1\n kernel_size = last_filter.kernel_size\n stride = last_filter.stride\n else:\n padding = last_filter.padding[dim]\n dilation = last_filter.dilation[dim]\n kernel_size = last_filter.kernel_size[dim]\n stride = last_filter.stride[dim]\n\n return int(((input_size + 2 * padding - (dilation * (kernel_size - 1)) - 1) / stride) + 1)\n\n\n in_ch_list = [in_channels]\n conv_ch_list = in_ch_list + self.channels\n\n for i in range(conv_act_pool_num):\n\n # CONV -> ACT\n for j in range(P):\n layers.append(nn.Conv2d(conv_ch_list[i*P + j], conv_ch_list[i*P + (j+1)], **self.conv_params))\n curr_h = update_size_filter(curr_h, 0, layers[-1])\n curr_w = update_size_filter(curr_w, 1, layers[-1])\n\n layers = add_activation_function(layers, self.activation_type, **self.activation_params)\n\n\n # POOL\n layers = add_pool_function(layers, self.pooling_type, **self.pooling_params)\n curr_h = update_size_filter(curr_h, 0, layers[-1])\n curr_w = update_size_filter(curr_w, 1, layers[-1])\n\n\n # check if need conv layer without pool\n if N % P > 0:\n\n init_i = conv_act_pool_num * P\n\n for i in range(N % P):\n layers.append(nn.Conv2d(conv_ch_list[init_i + i], conv_ch_list[init_i + i + 1], **self.conv_params))\n curr_h = update_size_filter(curr_h, 0, layers[-1])\n curr_w = update_size_filter(curr_w, 1, layers[-1])\n # print(f\"layer size {curr_h}\")\n layers = add_activation_function(layers, self.activation_type, **self.activation_params)\n\n self.classified_input_size = int(curr_h), int(curr_w)\n\n # ========================\n seq = nn.Sequential(*layers)\n return seq\n\n def _make_classifier(self):\n layers = []\n # TODO: Create the classifier part of the model:\n # (FC -> ACT)*M -> Linear\n # You'll first need to calculate the number of features going in to\n # the first linear layer.\n # The last Linear layer should have an output dim of out_classes.\n # ====== YOUR CODE: ======\n in_channels, in_h, in_w, = tuple(self.in_size)\n\n input_h, input_w = tuple(self.classified_input_size)\n\n M = self.hidden_dims\n\n def add_activation_function(layers_in, activation_type, **activation_params):\n if activation_type == 'relu':\n layers_in.append(nn.ReLU(**activation_params))\n else:\n layers_in.append(nn.LeakyReLU(**activation_params))\n return layers_in\n\n # add the FCNs\n last_cnn_out_c = self.channels[-1]\n\n # print(f\"input params: w: {input_w}, h : {input_h}, c: {last_cnn_out_c}\")\n last_cnn_params_n = input_w * input_h * last_cnn_out_c\n\n layers.append(nn.Linear(last_cnn_params_n, self.hidden_dims[0]))\n layers = add_activation_function(layers, self.activation_type, **self.activation_params)\n\n for i in range(len(M) - 1):\n layers.append(nn.Linear(self.hidden_dims[i], self.hidden_dims[i + 1]))\n layers = add_activation_function(layers, self.activation_type, **self.activation_params)\n\n # last layer - connect to the output features amount\n layers.append(nn.Linear(self.hidden_dims[-1], self.out_classes))\n # ========================\n seq = nn.Sequential(*layers)\n return seq\n\n def forward(self, x):\n # TODO: Implement the forward pass.\n # Extract features from the input, run the classifier on them and\n # return class scores.\n # ====== YOUR CODE: ======\n features = self.feature_extractor(x)\n features = features.view(features.shape[0], -1)\n classification = self.classifier(features)\n out = classification\n # ========================\n return out\n\n\nclass ResidualBlock(nn.Module):\n \"\"\"\n A general purpose residual block.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n channels: list,\n kernel_sizes: list,\n batchnorm=False,\n dropout=0.0,\n activation_type: str = \"relu\",\n activation_params: dict = {},\n **kwargs,\n ):\n \"\"\"\n :param in_channels: Number of input channels to the first convolution.\n :param channels: List of number of output channels for each\n convolution in the block. The length determines the number of\n convolutions.\n :param kernel_sizes: List of kernel sizes (spatial). Length should\n be the same as channels. Values should be odd numbers.\n :param batchnorm: True/False whether to apply BatchNorm between\n convolutions.\n :param dropout: Amount (p) of Dropout to apply between convolutions.\n Zero means don't apply dropout.\n :param activation_type: Type of activation function; supports either 'relu' or\n 'lrelu' for leaky relu.\n :param activation_params: Parameters passed to activation function.\n \"\"\"\n super().__init__()\n assert channels and kernel_sizes\n assert len(channels) == len(kernel_sizes)\n assert all(map(lambda x: x % 2 == 1, kernel_sizes))\n\n if activation_type not in ACTIVATIONS:\n raise ValueError(\"Unsupported activation type\")\n\n self.main_path, self.shortcut_path = None, None\n\n # TODO: Implement a generic residual block.\n # Use the given arguments to create two nn.Sequentials:\n # - main_path, which should contain the convolution, dropout,\n # batchnorm, relu sequences (in this order).\n # Should end with a final conv as in the diagram.\n # - shortcut_path which should represent the skip-connection and\n # may contain a 1x1 conv.\n # Notes:\n # - Use convolutions which preserve the spatial extent of the input.\n # - Use bias in the main_path conv layers, and no bias in the skips.\n # - For simplicity of implementation, assume kernel sizes are odd.\n # - Don't create layers which you don't use! This will prevent\n # correct comparison in the test.\n # ====== YOUR CODE: ======\n\n # main path\n layers = []\n\n # each convolution layer is followed by\n # dropout (optional)\n # batch normalization (optional)\n # relu\n\n in_ch_list = [in_channels]\n conv_ch_list = in_ch_list + channels\n\n for i in range(len(conv_ch_list) - 1):\n\n # this should preserve the size\n\n padding = int((kernel_sizes[i] - 1) / 2) # calculated\n stride = 1\n dilation = 1\n layers.append(nn.Conv2d(in_channels=conv_ch_list[i],\n out_channels=conv_ch_list[i + 1],\n kernel_size=kernel_sizes[i],\n padding=padding,\n stride=stride,\n dilation=dilation,\n bias=True))\n\n if i < len(conv_ch_list) - 2:\n # for all layers except the last layer\n if dropout > 0:\n layers.append(nn.Dropout2d(p=dropout))\n\n if batchnorm:\n layers.append(nn.BatchNorm2d(num_features=conv_ch_list[i + 1]))\n\n\n if activation_type == 'relu':\n layers.append(nn.ReLU(**activation_params))\n else:\n layers.append(nn.LeakyReLU(**activation_params))\n\n self.main_path = nn.Sequential(*layers)\n\n # skip path layer\n\n skip_layers = []\n\n if in_channels == channels[-1]:\n\n # have to apply identity\n pass\n # skip_layers.append(nn.Identity())\n\n else:\n conv_identity_layer = nn.Conv2d(in_channels=in_channels,\n out_channels=channels[-1],\n kernel_size=1,\n padding=0,\n stride=1,\n dilation=1,\n bias=False)\n # set weights to 1 to make it identity\n # conv_identity_layer.weight = torch.nn.Parameter(torch.ones_like(conv_identity_layer.weight))\n\n\n\n skip_layers.append(conv_identity_layer)\n\n\n\n self.shortcut_path = nn.Sequential(*skip_layers)\n # ========================\n\n def forward(self, x):\n out = self.main_path(x)\n out += self.shortcut_path(x)\n out = torch.relu(out)\n return out\n\n\nclass ResNetClassifier(ConvClassifier):\n def __init__(\n self,\n in_size,\n out_classes,\n channels,\n pool_every,\n hidden_dims,\n batchnorm=False,\n dropout=0.0,\n **kwargs,\n ):\n \"\"\"\n See arguments of ConvClassifier & ResidualBlock.\n \"\"\"\n self.batchnorm = batchnorm\n self.dropout = dropout\n super().__init__(\n in_size, out_classes, channels, pool_every, hidden_dims, **kwargs\n )\n\n def _make_feature_extractor(self):\n in_channels, in_h, in_w, = tuple(self.in_size)\n\n layers = []\n # TODO: Create the feature extractor part of the model:\n # [-> (CONV -> ACT)*P -> POOL]*(N/P)\n # \\------- SKIP ------/\n # For the ResidualBlocks, use only dimension-preserving 3x3 convolutions.\n # Apply Pooling to reduce dimensions after every P convolutions.\n # Notes:\n # - If N is not divisible by P, then N mod P additional\n # CONV->ACT (with a skip over them) should exist at the end,\n # without a POOL after them.\n # - Use your own ResidualBlock implementation.\n # ====== YOUR CODE: ======\n\n # extract the kwargs\n\n\n def ceil(n):\n res = int(n)\n return res if res == n else res + 1\n\n P = self.pool_every\n N = len(self.channels)\n M = self.hidden_dims\n\n # update the dimensions along while adding the layers\n curr_h = in_h\n curr_w = in_w\n\n\n conv_act_pool_num = ceil(N / P) - 1 if N % P != 0 else ceil(N / P)\n in_ch_list = [in_channels]\n conv_ch_list = in_ch_list + self.channels\n\n # print(f\"conv_ch_list = {conv_ch_list}\")\n\n def add_pool_function(layers_in, pooling_type, **pooling_params):\n if pooling_type == 'max':\n layers_in.append(nn.MaxPool2d(**pooling_params))\n else:\n layers_in.append(nn.AvgPool2d(**pooling_params))\n return layers_in\n\n def update_size_filter(input_size, dim, last_filter):\n # after filter, size changes:\n # size_out = ((size_in +2*padding - (dilation * (kernel_size - 1)) -1 ) / stride) +1\n\n # dilation = 1 for all our cases, so:\n # size_out = (size_in +2*padding - ( kernel_size-1 ) -1 ) / stride) + 1\n\n if type(last_filter) == torch.nn.modules.pooling.MaxPool2d:\n padding = last_filter.padding\n dilation = last_filter.dilation\n kernel_size = last_filter.kernel_size\n stride = last_filter.stride\n elif type(last_filter) == torch.nn.modules.pooling.AvgPool2d:\n padding = last_filter.padding\n dilation = 1\n kernel_size = last_filter.kernel_size\n stride = last_filter.stride\n else:\n padding = last_filter.padding[dim]\n dilation = last_filter.dilation[dim]\n kernel_size = last_filter.kernel_size[dim]\n stride = last_filter.stride[dim]\n\n return int(((input_size + 2 * padding - (dilation * (kernel_size - 1)) - 1) / stride) + 1)\n\n print(\"\\nmain block\")\n if conv_act_pool_num > 0:\n for i in range(conv_act_pool_num):\n init_channel = i * (P - 1)\n #\n # print(f\"init_channel = {init_channel}\")\n # print(f\"in_channels={conv_ch_list[init_channel]}\")\n # print(f\"conv_ch_list[init_channel + 1: init_channel + P + 1] = {conv_ch_list[init_channel + 1: init_channel + P + 1]}\")\n\n layers.append(ResidualBlock(in_channels=conv_ch_list[init_channel],\n channels=conv_ch_list[init_channel + 1: init_channel + P + 1],\n kernel_sizes=[3] * P,\n batchnorm=self.batchnorm,\n dropout=self.dropout,\n activation_type=self.activation_type,\n activation_params=self.activation_params))\n\n layers = add_pool_function(layers, self.pooling_type, **self.pooling_params)\n\n # only pool layer affects the size\n curr_h = update_size_filter(curr_h, 0, layers[-1])\n curr_w = update_size_filter(curr_w, 1, layers[-1])\n\n # check if need Res block without pool\n print(\"\\nleft block\")\n if N % P > 0:\n init_channel = (ceil(N / P)-1) * P\n # print(f\"init_channel = {init_channel}\")\n # print(f\"in_channels={conv_ch_list[init_channel]}\")\n # print(f\"conv_ch_list[init_channel + 1:] = {conv_ch_list[init_channel + 1:]}\")\n # without batchnorm and dropout\n layers.append(ResidualBlock(in_channels=conv_ch_list[init_channel],\n channels=conv_ch_list[init_channel + 1:],\n kernel_sizes=[3] * (N % P),\n batchnorm=self.batchnorm,\n dropout=self.dropout,\n activation_type=self.activation_type,\n activation_params=self.activation_params))\n\n self.classified_input_size = int(curr_h), int(curr_w)\n\n # ========================\n seq = nn.Sequential(*layers)\n return seq\n\n\nclass YourCodeNet(ConvClassifier):\n def __init__(self, in_size, out_classes, channels, pool_every, hidden_dims):\n super().__init__(in_size, out_classes, channels, pool_every, hidden_dims)\n\n # TODO: Change whatever you want about the ConvClassifier to try to\n # improve it's results on CIFAR-10.\n # For example, add batchnorm, dropout, skip connections, change conv\n # filter sizes etc.\n # ====== YOUR CODE: ======\n # raise NotImplementedError()\n\n # ========================\n","sub_path":"hw2/solution_alex/hw2/cnn.py","file_name":"cnn.py","file_ext":"py","file_size_in_byte":19946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203375916","text":"from django.db import models\n\n\nclass Blog(models.Model):\n title = models.CharField(max_length=120)\n author = models.CharField(max_length=120,verbose_name='作者', help_text='作者')\n date_of_publishing = models.DateTimeField(auto_now_add=True)\n content = models.ImageField(null=True, blank=True, verbose_name='图片', upload_to='static/images/')\n price = models.DecimalField(max_digits=10, decimal_places=2, null=True, blank=True)\n\n def __str__(self):\n return f\"{self.title}-{self.date_of_publishing}\"\n\n class Meta:\n ordering = ('-date_of_publishing',)\n","sub_path":"python_02_django/51_django_import_export/app1/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427942782","text":"from ml.data.data import File_Queue\nfrom ml.neural_network.layer import FlatteningLayer, FullyConnectedLayer\nfrom ml.neural_network.neural_network import NeuralNetwork\nfrom ml.neural_network.train import launch_training_task\nfrom ml.preprocessor import Preprocessor\nimport argparse\nimport os\nimport tensorflow as tf\n\ndef get_records(path):\n with open(path) as f:\n record_list = f.readlines()\n record_list = [record.strip() for record in record_list]\n return record_list\n\nidentity_f = lambda x: x\n\ndef cost_f(y, y_hat):\n return tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_hat))\n\ndef accuracy_f(y, y_hat):\n correct_predictions_vector = tf.equal(tf.argmax(y, 1), tf.argmax(y_hat, 1))\n accuracy = tf.reduce_mean(tf.cast(correct_predictions_vector, tf.float32))\n return accuracy\n\ngradient_f = tf.train.AdamOptimizer(1e-4).minimize\n\ndef prediction_f(y):\n return tf.argmax(y, 1)\n\nclass Preprocessor(Preprocessor):\n def __init__(self, label_to_filter):\n self.label_to_filter = label_to_filter\n\n def __call__(self, features):\n inp = tf.concat([features['mean_rgb'], features['mean_audio']], 0)\n labels = tf.sparse_to_indicator(features[\"labels\"], 4716)\n labels.set_shape([4716])\n single_label = labels[self.label_to_filter]\n single_label = tf.one_hot(indices=tf.cast(single_label, tf.int32), depth=2)\n single_label = tf.cast(single_label, tf.float32)\n return inp, single_label\n\ndef make_network(inputs_dim, labels_dim, input_queue):\n layer_1 = FullyConnectedLayer(name='layer_1', activation_f=tf.nn.relu)\n final_layer = FullyConnectedLayer(name='final_layer', activation_f=identity_f)\n layers = [layer_1, final_layer]\n layers_dims = [(1024,), (2,)]\n network = NeuralNetwork(inputs_dim, labels_dim, layers, layers_dims, cost_f, accuracy_f,\n gradient_f, prediction_f, input_queue)\n network.make_computation_graph()\n return network\n\ndef main(label, save_file_name):\n feature_map = {'labels': tf.VarLenFeature(dtype=tf.int64),\n 'video_id': tf.FixedLenFeature(shape=[], dtype=tf.string, default_value=None),\n 'mean_rgb': tf.FixedLenFeature(shape=[1024], dtype=tf.float32, default_value=None),\n 'mean_audio': tf.FixedLenFeature(shape=[128], dtype=tf.float32, default_value=None)}\n inputs_dim = [1152]\n labels_dim = [2]\n preprocess = Preprocessor(label)\n directory = os.path.join(os.getcwd(), os.path.dirname(__file__))\n record_list = get_records(directory + '/record_list.txt')\n data = File_Queue(record_list, feature_map, preprocess,\n inputs_dim=inputs_dim, labels_dim=labels_dim, batch_size=1000)\n make_network(inputs_dim, labels_dim, data)\n result_queue = launch_training_task(data, use_queue=True, iteration_period=1, save_period=5,\n save_file_name=save_file_name)\n return result_queue\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Launch a training task on the YouTube-8M Dataset.')\n parser.add_argument('--label', type=int, help='Specify what label the model will be trained on.')\n parser.add_argument('-o', '--output', help='Location where the trained model will be stored.')\n arguments = parser.parse_args()\n label = arguments.label\n save_file_name = arguments.output\n main(label=label, save_file_name=save_file_name)\n","sub_path":"scripts/youtube8m/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76155182","text":"\"\"\"\nThis class reads params from a YAML file and creates an object that\ncontains attributes named as the params in the file, accessible through\ngetters:\n\n object.parameter\n\nin addition to classical dictionary access method\n\n object[parameter]\n\nThe structure of attributes is built recursively if they contain a dictionary.\n\n object.attr1.attr2.attr3\n\n\"\"\"\n\nfrom os import getcwd\nfrom pathlib import Path\nfrom pandas import DataFrame\n\nfrom utils.my_dict import MyDict\nfrom display import Display\nfrom yaml import safe_load, YAMLError\n\n\nclass Dictionary(MyDict):\n\n def __init__(self, default_params_filename='params.yaml', **kwargs):\n \"\"\"\n Read the parameters from a default filename\n :return:\n \"\"\"\n super().__init__(**kwargs)\n params = {}\n cwd = Path(getcwd())\n params_path: str = str(cwd.joinpath(default_params_filename))\n\n with open(params_path, 'r') as stream:\n try:\n params = safe_load(stream)\n except YAMLError as exc:\n print(exc)\n\n self.add_dict(self, params)\n\n # Check that I've states and actions to start playing with.\n if not self._action or not self._state:\n raise AssertionError('No states or actions defined in config file.')\n\n # Build a dictionary with a sequential number associated to each action\n setattr(self, '_action_id', MyDict())\n for tup in zip(self._action, range(len(self._action))):\n self._action_id[tup[0]] = tup[1]\n\n # Build the reverse dictionary for the actions dictionary\n setattr(self, '_action_name', MyDict())\n for tup in zip(range(len(self._action)), self._action):\n self._action_name[tup[0]] = tup[1]\n\n # Specific attributes to store number of actions and states.\n setattr(self, '_num_actions', len(self._action))\n\n # Build a list of lists with the names of all possible states.\n setattr(self, '_states_list', list())\n for state in self._state.keys():\n if state[0] == '_':\n self._states_list.append(self._state[state]._names)\n\n # Compute the total number of states as the multiplication of the\n # number of substates in eachs posible state-stack\n setattr(self, '_num_states', int)\n self._num_states = 1\n for state in self._state.keys():\n self._num_states = self._num_states * len(self._state[state]._names)\n\n # Create a display property to centralize all reporting activity into\n # a single function. That way I can store it all in a single dataframe\n # for later analysis.\n setattr(self, 'display', Display)\n self.display = Display(self)\n\n # Create a DataFrame within the configuration to store all the values\n # that are relevant to later perform data analysis.\n # The YAML file contains the column names in a parameter called\n # table_headers.\n setattr(self, 'results', DataFrame)\n self.results = DataFrame(columns=self._table_headers)\n\n @property\n def save_model(self):\n return self._save_model\n\n @property\n def state(self):\n return self._state\n\n @property\n def states_list(self):\n return self._states_list\n","sub_path":"trader/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":3286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474497044","text":"# Imports\nimport nltk\nfrom nltk.tokenize import word_tokenize\nfrom nltk import tokenize\nnltk.download('punkt')\nimport pandas as pd\nfrom pandas import DataFrame\nimport numpy as np\nfrom tqdm import tqdm\nfrom collections import Counter\nimport matplotlib.pyplot as plt\nimport os\n\n# Data Preprocessing\ndef split_on_dialogue(data_path):\n \"\"\"\n Returns list with conversations\n Format conversatoins: [[conversation1], [conversation2], ..., [conversation_n]]\n \"\"\"\n\n with open(data_path) as f:\n lines = f.readlines()\n f.close()\n\n i = 0\n j = 0\n dialogue_i = 0\n convo = []\n conversations = []\n\n for line in lines:\n i += 1\n tokens = word_tokenize(line)\n\n if line[:8] == 'Dialogue':\n dialogue_i = i + 1\n\n if i == dialogue_i + j:\n convo.append(line)\n j += 1\n if len(tokens) == 0:\n conversations.append(convo)\n convo = []\n j = 0\n continue\n return conversations\n\n\ndef split_on_sentences(conversations):\n \"\"\"\n A Function that splits the conversations in sentences.\n \"\"\"\n\n sentence_list = []\n\n for conversation in conversations:\n for sentences in conversation:\n token_sen = tokenize.sent_tokenize(sentences)\n for sentence in token_sen:\n if sentence != 'Patient:' and sentence != 'Doctor:':\n sentence_list.append(sentence)\n\n return sentence_list\n\n\ndef save(df, save_preprocessed_dataframe_path, name):\n \"\"\"\n Function that saves the created dataframe as a csv.\n \"\"\"\n\n df.to_csv(save_preprocessed_dataframe_path + name + '.csv', index=False)\n\n\ndef preprocess_to_csv(data_path, save_to):\n \"\"\"\n A function that preprocesses the data (so that it is displayed per sentence),\n and saves is as a .csv file for later use.\n \"\"\"\n\n # Split on dialogue\n conversations = split_on_dialogue(data_path)\n\n # Split on sentence\n sentences = split_on_sentences(conversations)\n\n # Make dataframe and drop dubplicates\n df_sent = pd.DataFrame(np.array(sentences), columns=['sentences'])\n df_sent.drop_duplicates(keep='first', inplace=True)\n\n # Save\n name = os.path.basename(data_path)\n save(df_sent, save_to, name[:-4])\n\n print(name, 'done')\n\n\n# Implementation\ndef get_predicted_symptoms(prediction):\n \"\"\"\n This function takes in the prediction of a sentence of the pre-trained model\n and returns the symptoms mentioned in that sentence.\n \"\"\"\n\n symptoms = []\n\n # Check if there is a predicted entity\n if len(prediction[0]['entity']) > 0:\n number_of_entities = len(prediction[0]['entity'])\n\n # Loop over predicted entities and get symptoms (here called: disease)\n for i in range(number_of_entities):\n if prediction[0]['entity'][i]['type'] == 'disease':\n symptoms.append(prediction[0]['entity'][i]['mention'])\n\n return symptoms\n\n\ndef get_symptoms_data(df):\n \"\"\"\n This function takes a (preprocessed) dataframe as input, determines the predicted\n symptoms per phrase, and outputs a dictionary with the 5 most frequent symptoms used,\n while also plotting the counts of the words in a barplot.\n\n The steps (1-5) are explained in the report.\n \"\"\"\n\n all_symptoms = []\n skipped_sentences = 0\n\n for ind in tqdm(df.index):\n sentence = df['sentences'][ind]\n\n # Padding is needed because algorithms is not used to small sentences\n if len(sentence) < 45:\n sentence = sentence + '...'\n\n # Step 1\n try:\n prediction = trainer.predict([sentence])\n except:\n skipped_sentences += 1\n continue\n\n # Step 2\n predicted_symptom = get_predicted_symptoms(prediction)\n predicted_symptom = [x.lower() for x in predicted_symptom]\n predicted_symptom = [x.split(', ')[0] for x in predicted_symptom]\n\n # Step 3\n for symptom in predicted_symptom:\n if symptom != 'coronavirus': # Remove 'coronavirus', because it is not a symptom\n all_symptoms.append(symptom)\n\n df_symptoms = DataFrame(all_symptoms, columns=['symptoms'])\n\n # Step 4\n count_symptoms = Counter()\n\n # Split on 'enter(\\n)' so that grouped words stay 1 symptom\n df_symptoms['symptoms'].str.lower().str.split('\\n').apply(count_symptoms.update)\n\n # Most common symptoms: Step 5\n print('Skipped sentences: ', skipped_sentences)\n top5 = dict(count_symptoms.most_common(5))\n print(top5)\n plt.bar(range(len(top5)), list(top5.values()), align='center')\n plt.xticks(range(len(top5)), list(top5.keys()))\n plt.title('Most common symptoms')\n plt.show()\n\n\n# Evaluation\ndef accuracy(df, trainer):\n \"\"\"\n This function computes the accuracy score, given a dataframe.\n \"\"\"\n number_of_symptoms = 0\n number_of_well_predicted = 0\n\n for ind in tqdm(df.index):\n\n sentence = df['sentences'][ind]\n\n # Padding is needed because algorithms is not used to small sentences\n if len(sentence) < 60:\n sentence = sentence + '...'\n\n prediction = trainer.predict([sentence])\n\n predicted_symptom = get_predicted_symptoms(prediction)\n predicted_symptom = [x.lower() for x in predicted_symptom]\n predicted_symptom = [x.split(', ')[0] for x in predicted_symptom]\n gt_symptom = df['symptoms'][ind]\n\n # If it's not nan\n if isinstance(gt_symptom, str):\n gt_list = gt_symptom.split(', ')\n\n # Keep track of symptoms\n for symptom in gt_list:\n number_of_symptoms += 1\n\n # Keep track of well predicted symptoms\n if symptom in predicted_symptom:\n number_of_well_predicted += 1\n\n print('Ground truth symptoms: ', number_of_symptoms)\n print('Correctly predicted symptoms ', number_of_well_predicted)\n print('accuracy: ', number_of_well_predicted / number_of_symptoms)\n","sub_path":"LSDP2021-Group2-main/RQ1_utils.py","file_name":"RQ1_utils.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"98456784","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass BaseConv(nn.Module):\n def __init__(self, in_channel, out_channel, middle_channels = [], kernel_size = 3, padding = 1):\n super().__init__()\n if middle_channels: middle_channels.append(out_channel)\n list_channels = [in_channel]\n for middle_channel in middle_channels:\n list_channels.append(middle_channel)\n list_channels.append(out_channel)\n layers = []\n for in_c, out_c in zip(list_channels[:-1], list_channels[1:]):\n layers.extend([\n nn.Conv2d(in_c, out_c, kernel_size = kernel_size, padding = padding),\n nn.BatchNorm2d(out_c),\n nn.ReLU(inplace = True)\n ])\n self.base_conv = nn.Sequential(*layers)\n def forward(self, x):\n return self.base_conv(x)\n\nclass DownBlock(nn.Module):\n def __init__(self, in_channel, out_channel, middle_channels = [], kernel_size = 3, padding = 1):\n super().__init__()\n self.base_conv = BaseConv(in_channel, out_channel, middle_channels, kernel_size, padding)\n self.downsample = nn.MaxPool2d(2)\n def forward(self, x):\n x = self.downsample(x)\n return self.base_conv(x)\n \nclass UpBlock(nn.Module):\n def __init__(self, in_channel, out_channel, middle_channels = [], kernel_size = 3, padding = 1, use_bilinear = True, skip_method = 'sum'):\n super().__init__()\n self.skip_method = skip_method\n if use_bilinear:\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) \n else:\n self.upsample = nn.ConvTranspose2d(in_channel, in_channel, kernel_size = 2, stride = 2)\n if skip_method == 'sum':\n self.base_conv = BaseConv(in_channel, out_channel, middle_channels, kernel_size, padding)\n else:\n self.base_conv = BaseConv(in_channel*2, out_channel, middle_channels, kernel_size, padding)\n def forward(self, x_skip, x):\n x = self.upsample(x)\n h_pad = x_skip.size()[2] - x.size()[2]\n w_pad = x_skip.size()[3] - x.size()[3]\n x = F.pad(x, [h_pad // 2, h_pad - h_pad // 2,\n w_pad // 2, w_pad - w_pad // 2])\n x = x_skip + x if self.skip_method == 'sum' else torch.cat([x_skip, x], dim = 1)\n x = self.base_conv(x)\n return x\n \nclass BDPBlock(nn.Module):\n def __init__(self, in_channel, out_channel, middle_channels = [], kernel_size = 3, padding = 1, use_bilinear = False):\n super().__init__()\n if use_bilinear:\n self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True) \n else:\n self.upsample = nn.ConvTranspose2d(in_channel, in_channel, kernel_size = 2, stride = 2)\n self.base_conv = BaseConv(in_channel, out_channel, middle_channels, kernel_size, padding)\n def forward(self, x_skip, x):\n x = self.upsample(x)\n h_pad = x_skip.size()[2] - x.size()[2]\n w_pad = x_skip.size()[3] - x.size()[3]\n x = F.pad(x, [h_pad // 2, h_pad - h_pad // 2,\n w_pad // 2, w_pad - w_pad // 2])\n x = x_skip + x\n x = self.base_conv(x)\n return x\n \nclass MDPBlock(nn.Module):\n def __init__(self, in_channel, out_channel, upsample_channel, kernel_size = 3, padding = 1):\n super().__init__()\n self.upsample = nn.ConvTranspose2d(in_channel, upsample_channel, kernel_size = 2, stride = 2)\n self.base_conv = BaseConv(upsample_channel, out_channel, [], kernel_size, padding)\n def forward(self, x_skip, x, skip = True):\n x = self.upsample(x)\n h_pad = x_skip.size()[2] - x.size()[2]\n w_pad = x_skip.size()[3] - x.size()[3]\n x = F.pad(x, [h_pad // 2, h_pad - h_pad // 2,\n w_pad // 2, w_pad - w_pad // 2])\n x = x_skip + x\n x = self.base_conv(x)\n return x","sub_path":"models/PyTorch/.ipynb_checkpoints/torch_components-checkpoint.py","file_name":"torch_components-checkpoint.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"72441667","text":"όνομα = input('Εισάγετε αρχείο:')\nhandle = open(όνομα, 'r')\nπλήθη = dict()\n\nfor γραμμή in handle:\n λέξεις = γραμμή.split()\n for λέξη in λέξεις:\n πλήθη[λέξη] = πλήθη.get(λέξη, 0) + 1\n\nmaxπλήθος = None\nmaxλέξη = None\nfor λέξη, πλήθος in list(πλήθη.items()):\n if maxπλήθος is None or πλήθος > maxπλήθος:\n maxλέξη = λέξη\n maxπλήθος = πλήθος\n\nprint(maxλέξη, maxπλήθος)\n","sub_path":"code3/words.py","file_name":"words.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"61952404","text":"from turtle import Turtle\n\nSTARTING_POS = ((0, 0), (-20, 0), (-40, 0))\nMOVE_DISTANCE = 20\nRIGHT = 0\nUP = 90\nLEFT = 180\nDOWN = 270\n\n\nclass Snake:\n\n def __init__(self):\n self.segments = []\n self.create_snake()\n self.head = self.segments[0]\n\n def create_snake(self):\n for pos in STARTING_POS:\n self.segments.append(Turtle('square'))\n self.segments[-1].color('white')\n self.segments[-1].pu()\n self.segments[-1].goto(pos)\n\n def move(self):\n for idx in range(len(self.segments) - 1, 0, -1):\n pos = self.segments[idx - 1].pos()\n self.segments[idx].goto(pos)\n self.head.fd(MOVE_DISTANCE)\n\n def up(self):\n if self.head.heading() != DOWN:\n self.head.seth(UP)\n\n def down(self):\n if self.head.heading() != UP:\n self.head.seth(DOWN)\n\n def left(self):\n if self.head.heading() != RIGHT:\n self.head.seth(LEFT)\n\n def right(self):\n if self.head.heading() != LEFT:\n self.head.seth(RIGHT)\n","sub_path":"days011-020/day020/steps/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"36102061","text":"import nltk\nnltk.download(\"stopwords\")\nfrom nltk.corpus import stopwords\n\n\ndef remove_stop_words(text):\n \"\"\"\n Removes the English list of stopwords (https://gist.github.com/sebleier/554280)\n from a given text.\n\n Args:\n text (str): Text to process.\n\n Returns:\n str: Text with removen stopwords.\n \"\"\"\n STOPLIST = stopwords.words('english')\n text = ' '.join([word for word in text.split() if word not in STOPLIST])\n return text\n\n\ndef tokenize_text(text):\n \"\"\"\n Tokenizes and tags a given text.\n\n Args:\n text (str): Text to process.\n\n Returns:\n List(Tuple[str, str]): List of word-tag pairs.\n \"\"\"\n nltk.download('punkt')\n nltk.download('averaged_perceptron_tagger')\n tokens = nltk.word_tokenize(text)\n tagged = nltk.pos_tag(tokens)\n return tagged\n\n\ndef get_entities(tagged_text):\n \"\"\"\n Identifies named entities.\n\n Args:\n tagged_text (str): Tagged text to process.\n\n Returns:\n nltk.tree.Tree: A hierarchical grouping of entities.\n \"\"\"\n nltk.download('maxent_ne_chunker')\n nltk.download('words')\n entities = nltk.chunk.ne_chunk(tagged_text)\n return entities\n","sub_path":"workshop/part2_azure_functions/shared_code/nlp_text_processing.py","file_name":"nlp_text_processing.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"630629106","text":"from operator import methodcaller\n\nimport numpy as np\nimport pandas as pd\nimport pandas.util.testing as tm\nimport pytest\n\nfrom ibis import literal as L\n\npytest.importorskip('clickhouse_driver')\npytestmark = pytest.mark.clickhouse\n\n\n@pytest.mark.parametrize(\n ('reduction', 'func_translated'),\n [\n ('sum', 'sum'),\n ('count', 'count'),\n ('mean', 'avg'),\n ('max', 'max'),\n ('min', 'min'),\n ('std', 'stddevSamp'),\n ('var', 'varSamp'),\n ],\n)\ndef test_reduction_where(con, alltypes, translate, reduction, func_translated):\n template = '{0}If(`double_col`, `bigint_col` < 70)'\n expected = template.format(func_translated)\n\n method = getattr(alltypes.double_col, reduction)\n cond = alltypes.bigint_col < 70\n expr = method(where=cond)\n\n assert translate(expr) == expected\n\n\ndef test_std_var_pop(con, alltypes, translate):\n cond = alltypes.bigint_col < 70\n expr1 = alltypes.double_col.std(where=cond, how='pop')\n expr2 = alltypes.double_col.var(where=cond, how='pop')\n\n assert translate(expr1) == 'stddevPopIf(`double_col`, `bigint_col` < 70)'\n assert translate(expr2) == 'varPopIf(`double_col`, `bigint_col` < 70)'\n assert isinstance(con.execute(expr1), np.float)\n assert isinstance(con.execute(expr2), np.float)\n\n\n@pytest.mark.parametrize('reduction', ['sum', 'count', 'max', 'min'])\ndef test_reduction_invalid_where(con, alltypes, reduction):\n condbad_literal = L('T')\n\n with pytest.raises(TypeError):\n fn = methodcaller(reduction, where=condbad_literal)\n fn(alltypes.double_col)\n\n\n@pytest.mark.parametrize(\n ('func', 'pandas_func'),\n [\n (\n lambda t, cond: t.bool_col.count(),\n lambda df, cond: df.bool_col.count(),\n ),\n (\n lambda t, cond: t.bool_col.approx_nunique(),\n lambda df, cond: df.bool_col.nunique(),\n ),\n (\n lambda t, cond: t.double_col.sum(),\n lambda df, cond: df.double_col.sum(),\n ),\n (\n lambda t, cond: t.double_col.mean(),\n lambda df, cond: df.double_col.mean(),\n ),\n (\n lambda t, cond: t.int_col.approx_median(),\n lambda df, cond: np.int32(df.int_col.median()),\n ),\n (\n lambda t, cond: t.double_col.min(),\n lambda df, cond: df.double_col.min(),\n ),\n (\n lambda t, cond: t.double_col.max(),\n lambda df, cond: df.double_col.max(),\n ),\n (\n lambda t, cond: t.double_col.var(),\n lambda df, cond: df.double_col.var(),\n ),\n (\n lambda t, cond: t.double_col.std(),\n lambda df, cond: df.double_col.std(),\n ),\n (\n lambda t, cond: t.double_col.var(how='sample'),\n lambda df, cond: df.double_col.var(ddof=1),\n ),\n (\n lambda t, cond: t.double_col.std(how='pop'),\n lambda df, cond: df.double_col.std(ddof=0),\n ),\n (\n lambda t, cond: t.bool_col.count(where=cond),\n lambda df, cond: df.bool_col[cond].count(),\n ),\n (\n lambda t, cond: t.double_col.sum(where=cond),\n lambda df, cond: df.double_col[cond].sum(),\n ),\n (\n lambda t, cond: t.double_col.mean(where=cond),\n lambda df, cond: df.double_col[cond].mean(),\n ),\n (\n lambda t, cond: t.float_col.approx_median(where=cond),\n lambda df, cond: df.float_col[cond].median(),\n ),\n (\n lambda t, cond: t.double_col.min(where=cond),\n lambda df, cond: df.double_col[cond].min(),\n ),\n (\n lambda t, cond: t.double_col.max(where=cond),\n lambda df, cond: df.double_col[cond].max(),\n ),\n (\n lambda t, cond: t.double_col.var(where=cond),\n lambda df, cond: df.double_col[cond].var(),\n ),\n (\n lambda t, cond: t.double_col.std(where=cond),\n lambda df, cond: df.double_col[cond].std(),\n ),\n (\n lambda t, cond: t.double_col.var(where=cond, how='sample'),\n lambda df, cond: df.double_col[cond].var(),\n ),\n (\n lambda t, cond: t.double_col.std(where=cond, how='pop'),\n lambda df, cond: df.double_col[cond].std(ddof=0),\n ),\n ],\n)\ndef test_aggregations(alltypes, df, func, pandas_func, translate):\n table = alltypes.limit(100)\n count = table.count().execute()\n df = df.head(int(count))\n\n cond = table.string_col.isin(['1', '7'])\n mask = cond.execute().astype('bool')\n expr = func(table, cond)\n\n result = expr.execute()\n expected = pandas_func(df, mask)\n\n np.testing.assert_allclose(result, expected)\n\n\n@pytest.mark.parametrize(\n 'op',\n [\n methodcaller('sum'),\n methodcaller('mean'),\n methodcaller('min'),\n methodcaller('max'),\n methodcaller('std'),\n methodcaller('var'),\n ],\n)\ndef test_boolean_reduction(alltypes, op, df):\n result = op(alltypes.bool_col).execute()\n assert result == op(df.bool_col)\n\n\ndef test_anonymus_aggregate(alltypes, df, translate):\n t = alltypes\n expr = t[t.double_col > t.double_col.mean()]\n result = expr.execute().set_index('id')\n expected = df[df.double_col > df.double_col.mean()].set_index('id')\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\ndef test_boolean_summary(alltypes):\n expr = alltypes.bool_col.summary()\n result = expr.execute()\n expected = pd.DataFrame(\n [[7300, 0, 0, 1, 3650, 0.5, 2]],\n columns=[\n 'count',\n 'nulls',\n 'min',\n 'max',\n 'sum',\n 'mean',\n 'approx_nunique',\n ],\n )\n tm.assert_frame_equal(\n result, expected, check_column_type=False, check_dtype=False\n )\n","sub_path":"ibis/clickhouse/tests/test_aggregations.py","file_name":"test_aggregations.py","file_ext":"py","file_size_in_byte":5940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575732902","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nimport matplotlib.animation as animation\nimport tushare as ts\nfrom datetime import timedelta,datetime\n\npro = ts.pro_api('b7bbd6907437d1f894145e7487709d27ee220b6c3772205ef05c83fb')\n\ndays = 4\nnow = datetime.now().date()\n\nfd = now-timedelta(days=days)\n\nax = None\n# code = \"000425\"\ncode = '600703'\nfor d in [str(fd+timedelta(days=i)) for i in range(days+1)]:\n df = ts.get_tick_data(code,date=d,src='tt')\n a = df.groupby([\"price\"]).agg({\"volume\":\"sum\"})\n a = a.head(90)\n if not ax:\n ax = a.plot()\n a.plot(ax=ax,figsize=(30,10),legend=d)\n plt.xticks(rotation=30)\n plt.title(d)\nplt.show()","sub_path":"tushare_example/dynamic_bar.py","file_name":"dynamic_bar.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"46138070","text":"#!/usr/bin/env python3.6\nimport gpio\nimport logging\nimport paho.mqtt.client as mqtt\nprint( \"Test BTN\\n\")\nBTN = 488\ngpio.log.setLevel(logging.INFO)\ngpio.setup(BTN, gpio.IN)\n\nclient = mqtt.Client()\nclient.connect(\"localhost\", 1883, 60)\n\nprev = gpio.read(BTN)\nwhile(True):\n value = gpio.read(BTN)\n if value != prev: \n print(value)\n client.reconnect()\n client.publish(\"gpio/488\", str(value))\n prev = value\n\n\n ","sub_path":"Lab1/MQTT/button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"112298771","text":"from sqlalchemy.orm import Session\nfrom models.base import *\n\n\ndef get_all_annual(\n *,\n db:Session,\n):\n positions = db.query(Annual).all()\n return positions\n\ndef create_annual(\n *,\n db : Session,\n in_name:str,\n start_day,\n end_day,\n start_time,\n end_time,\n in_kind:str,\n annual_txt:str):\n\n name_id = db.query(User).filter(User.name == in_name).first().id\n kind_id = db.query(Kind).filter(Kind.kind==in_kind).first().id\n\n new_annual = Annual(\n name_id=name_id,\n start_day = start_day,\n end_day = end_day,\n start_time = start_time,\n end_time = end_time,\n kind_id = kind_id,\n annual_txt = annual_txt\n )\n db.add(new_annual)\n db.commit()\n return new_annual","sub_path":"crud/crud_annual.py","file_name":"crud_annual.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"458518433","text":"# ***chat2share application***\nimport os\nimport socket\nimport sys\nfrom threading import Thread\nimport time\nSPLITOR = \"\"\nBUFFER_SIZE = 4096\n\n\n# Function responsible for receiving data packets from peers\ndef GetChatMessage():\n global name\n global broadcastSocket\n while True:\n try:\n recv_message = broadcastSocket.recv(1024) # receive 1024 bytes from peer\n recv_string_message = str(recv_message.decode('utf-8'))# translating the message into a string\n except:\n break\n if recv_string_message.find(':') != -1: \n # if the message contains a colon, then we have messages of the form: 'peer name: message'\n if(recv_string_message[:recv_string_message.find(':')] != name):\n # print a message from the peer to the other peers console\n print('\\r%s\\n' % recv_string_message, end='') \n\n\n# Function responsible for sending messages to all peers\ndef SendChatMessage ():\n global name\n global sendSocket\n sendSocket.setblocking (False)\n while True: # endless loop\n data = input () # input message by peer\n if data == 'Exit()':\n # if someone from the peers wants to exit the program\n sendSocket.close()\n broadcastSocket.close()\n break\n elif data != '' and data != 'Exit()':\n # if the message is not empty and there is no exit message\n send_message = name + ':' + data # form a message in a readable format\n sendSocket.sendto (send_message.encode ('utf-8'), ('255.255.255.255', 8080)) # send message to all peers in the subnet\n else:\n # if the user did not enter a message (i.e. tried to send an empty message)\n print ('Write your message first!')\n\n\n# Chatting function responsible for connecting to chat room\ndef chatting ():\n global broadcastSocket\n # socket for receiving messages from peers\n broadcastSocket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM) # initialize a socket to work with IPv4 addresses using UDP protocol\n broadcastSocket.setsockopt (socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # SO_REUSEADDR : indicates that several applications can listen to the socket at once\n broadcastSocket.setsockopt (socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # SO_BROADCAST : indicates that the packets will be broadcast\n broadcastSocket.bind (('0.0.0.0', 8080)) # bind to address '0.0.0.0' to listen on all interfaces\n \n global sendSocket\n # socket for sending messages to peers\n sendSocket = socket.socket (socket.AF_INET, socket.SOCK_DGRAM) # initialize the socket to work with IPv4 addresses using the UDP protocol\n sendSocket.setsockopt (socket.SOL_SOCKET, socket.SO_BROADCAST, 1) # SO_BROADCAST : indicates that the packets will be broadcast\n\n # greeting when client enters into chat room\n print (\"***********************************************\")\n print (\"* Welcome <\" +name+ \"> to our chat2share! *\")\n print (\"* To Quit, Enter message: Exit() *\")\n print (\"* Enjoy Chatting! *\")\n print (\"***********************************************\")\n\n global recvThread\n recvThread = Thread (target = GetChatMessage) # thread to receive messages from peers\n\n global sendMsgThread\n sendMsgThread = Thread (target = SendChatMessage) # thread to send messages from peers\n\n recvThread.start () # start a thread to receive messages from peers\n sendMsgThread.start () # start a thread to send messages to all peers\n \n recvThread.join () # block the thread in which the call is made until recvThread completes\n sendMsgThread.join () # block the thread in which the call is made until sendMsgThread is completed\n\n\nclass ClientThread(Thread):\n def __init__(self,ip,port,sock):\n Thread.__init__(self)\n self.ip = ip\n self.port = port\n self.sock = sock\n # receiving file using client socket\n def run(self):\n cl_name = self.sock.recv(16).decode()\n print(\"Sending to\",cl_name)\n while True:\n filename = input(\"Enter filename :\")\n # server should know send file not exists and wait for client response\n if not os.path.isfile(filename):\n self.sock.send(('0').encode())\n print(\"File not exists\")\n else:\n self.sock.send(('1').encode())\n # Get file size\n filesize = os.path.getsize(filename)\n # Send name and size of file\n self.sock.send(f\"{filename}{SPLITOR}{filesize}\".encode())\n # Sending the file\n with open(filename, \"rb\") as f:\n while (filesize):\n bytes_read = f.read(BUFFER_SIZE)\n if not bytes_read:\n break\n self.sock.sendall(bytes_read)\n filesize -= len(bytes_read)\n f.close()\n # Continue to send more files\n ans = input('\\nDo you want to continue(y/n) :') \n self.sock.send(ans.encode())\n if ans == 'y': \n continue\n else: \n break\n # close client socket\n self.sock.close()\n # close server socket\n\n\ndef sending():\n HOST = \"127.0.0.1\"\n PORT = 5555\n # create the server socket (TCP)\n s = socket.socket()\n s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # bind the socket to address\n s.bind((HOST, PORT))\n # threads = []\n print(\"Server is Listening on :\", PORT)\n cli = int(input(\"No. of clients: \"))\n count = 0\n while True:\n count += 1\n # print(count)\n if(count > cli):\n break\n s.listen(5)\n print (\"Waiting for incoming connections...\")\n (conn, (ip,port)) = s.accept()\n # print ('Got connection from ', (ip,port))\n newthread = ClientThread(ip,port,conn)\n newthread.start()\n newthread.join()\n # threads.append(newthread)\n # for t in threads:\n # t.join()\n s.close() \n\n\ndef receiving():\n HOST = \"127.0.0.1\"\n PORT = 5555\n\n # create the client socket\n s = socket.socket()\n print(\"Connecting to :\", PORT)\n try:\n s.connect((HOST, PORT))\n print(\"Successfully Connected.\")\n s.send(name.encode())\n # Get the filename that exists\n try:\n while True:\n exists = s.recv(16).decode()\n if exists != '0':\n received_filename = s.recv(BUFFER_SIZE).decode()\n filename, filesize = received_filename.split(SPLITOR)\n # Extract only filename\n filename = os.path.basename(filename)\n # convert filesize to integer\n filesize = int(filesize)\n # start receiving the file \n with open(filename, \"wb\") as f:\n while (filesize):\n bytes_read = s.recv(BUFFER_SIZE)\n if not bytes_read: \n break\n f.write(bytes_read)\n filesize -= len(bytes_read)\n f.close()\n print(\"File stored :\",filename)\n ans = s.recv(2).decode()\n if ans == 'y': \n continue\n else: \n break\n # close the socket\n s.close()\n except:\n print(\"**Current connection failed**\")\n while(1):\n temp = input(\"Do you wish to connect to other Sender (y/n): \")\n if(temp=='y'):\n receiving()\n break\n elif(temp=='n'):\n break\n else:\n print(\"Invalid input\")\n\n except:\n print(\"**No one is Sending**\")\n\n\ndef main():\n global name\n name = '' # username\n while True:\n if not name:\n # if name is empty\n name = input ('Your Name: ')\n if not name:\n # if name is empty\n print ('Please enter a non-empty name!')\n else:\n # if the name is entered, then exit the loop\n break\n print (\"***********************************************\") # delimiter\n while(True):\n print(\"Choose your option\")\n print(\"1. Chatting\")\n print(\"2. Sending\")\n print(\"3. Receiving\")\n print(\"4. Exit\")\n check = input(\"Enter Choice: \")\n if(check == '1'):\n chatting()\n elif(check=='2'):\n sending()\n elif(check=='3'):\n receiving()\n elif(check=='4'):\n break\n else:\n print(\"***Wrong Choice***\")\nif __name__ == '__main__':\n main()","sub_path":"peer2/chat2share.py","file_name":"chat2share.py","file_ext":"py","file_size_in_byte":8930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"457226740","text":"import numpy as np\n\n\ndef get_state_embeddings(data_dict, dirname):\n region_names = data_dict['region_names']\n region_weights = data_dict['region_weights']\n\n squeezed = region_weights.ndim == 1\n if squeezed:\n region_weights = region_weights[:, np.newaxis]\n\n n_subsets = region_weights.shape[1]\n state_names = sorted({r[:2] for r in region_names})\n state_lookup = {n: i for i, n in enumerate(state_names)}\n\n transform = np.zeros(\n (len(state_names), len(region_names), n_subsets))\n for r_i, (r, w) in enumerate(zip(region_names, region_weights)):\n transform[state_lookup[r[:2]], r_i, :] = w\n\n state_weights = transform.sum(axis=1)\n transform /= state_weights[:, np.newaxis, :]\n\n ret = {'state_names': state_names, 'state_weights': state_weights}\n for k in data_dict:\n if k.startswith('emb_'):\n v = data_dict[k]\n if squeezed:\n v = v[:, :, np.newaxis]\n ret[k] = np.einsum('grs, rfs -> gfs', transform, v)\n if squeezed:\n ret[k] = ret[k][:, :, 0]\n elif k in {'region_names', 'region_weights'}:\n pass\n else:\n ret[k] = data_dict[k]\n return ret\n","sub_path":"pummeler/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286480977","text":"\n\nfrom xai.brain.wordbase.nouns._beech import _BEECH\n\n#calss header\nclass _BEECHES(_BEECH, ):\n\tdef __init__(self,): \n\t\t_BEECH.__init__(self)\n\t\tself.name = \"BEECHES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"beech\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_beeches.py","file_name":"_beeches.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"496167510","text":"import datetime\nfrom distutils.version import LooseVersion\nimport math\nimport os\nimport os.path as osp\nimport shutil\n\nimport numpy as np\nimport pytz\nimport torch\nimport torchvision\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom utils.losses import WeightedCrossEntropyLoss\nfrom sklearn.metrics import accuracy_score, f1_score\nfrom utils.evaluators import fp_fn_image_csi_muti, fp_fn_image_csi_muti_seg, torch_csi_muti\nfrom utils.units import dbz_mm, denorm, torch_denorm\nfrom utils.visualizers import rainfall_shade\nfrom tensorboardX import SummaryWriter\nfrom datetime import datetime\nfrom global_config import global_config\n\n# torch.autograd.set_detect_anomaly(True)\n\nclass Trainer(object):\n\n def __init__(\n self,\n config,\n model,\n optimizer,\n data_loader,\n save_dir,\n max_iterations=4,\n interval_validate=100,\n interval_checkpoint=1500\n ):\n self.config = config\n self.model = model\n self.optim = optimizer\n self.data_loader = data_loader\n self.save_dir = save_dir + datetime.now().strftime(\"_%m%d%H%M\")\n self.max_iterations = max_iterations\n self.interval_validate = interval_validate\n self.interval_checkpoint = interval_checkpoint\n\n if not os.path.exists(self.save_dir):\n os.makedirs(self.save_dir)\n\n self.epoch = 1\n self.iteration = 0\n self.pbar_i = tqdm(range(1, max_iterations + 1))\n\n self.mse_loss = torch.nn.MSELoss().to(config['DEVICE'])\n self.mae_loss = torch.nn.L1Loss().to(config['DEVICE'])\n # self.cat_loss = WeightedCrossEntropyLoss()\n # self.cat_weight = torch.tensor([1, 20, 50, 100]).float().to(config['DEVICE'])\n\n self.train_loss = 0\n self.val_loss = 0\n self.best_val_loss = np.inf\n self.metrics_name = ['csi_0', 'csi_1', 'csi_2', 'csi_3']\n self.train_metrics_value = np.zeros(len(self.metrics_name))\n self.val_metrics_value = np.zeros(len(self.metrics_name))\n\n self.writer = SummaryWriter(os.path.join(self.save_dir, 'train_logs'))\n \n def validate(self):\n\n self.model.eval()\n n_val_batch = self.data_loader.n_val_batch()\n n_val = 20\n self.val_loss = 0\n self.val_metrics_value[:] = 0\n for ib_val, b_val in enumerate(np.random.choice(n_val_batch, n_val)):\n\n self.pbar_i.set_description(\"Validating at batch %d / %d\" % (ib_val, n_val))\n val_data, val_label = self.data_loader.get_val(b_val)\n with torch.no_grad():\n output = self.model(val_data)\n \n loss = self.mse_loss(output, val_label) + self.mae_loss(output, val_label)\n \n self.val_loss += loss.data.item() / len(val_data)\n # lbl_pred = output\n # lbl_true = val_label\n lbl_pred = output.detach().cpu().numpy()\n lbl_true = val_label.cpu().numpy()\n# print('val', lbl_pred.shape, lbl_true.shape)\n # csis, w_csi = torch_csi_muti(torch_denorm(lbl_pred), torch_denorm(lbl_true))\n csis, w_csi = fp_fn_image_csi_muti(denorm(lbl_pred), denorm(lbl_true))\n self.val_metrics_value += csis\n\n self.train_loss /= self.interval_validate\n self.train_metrics_value /= self.interval_validate\n self.val_loss /= n_val\n self.val_metrics_value /= n_val\n self.writer.add_scalars('loss', {\n 'train': self.train_loss,\n 'valid': self.val_loss\n }, self.epoch)\n for i in range(len(self.metrics_name)):\n self.writer.add_scalars(self.metrics_name[i], {\n 'train': self.train_metrics_value[i],\n 'valid': self.val_metrics_value[i]\n }, self.epoch)\n\n# print('img', lbl_pred[0].shape, lbl_true[0].shape)\n # lbl_pred = lbl_pred.detach().cpu().numpy()\n # lbl_true = lbl_true.cpu().numpy()\n if self.config['DIM'] == '3D':\n lbl_pred = lbl_pred[:, :, -1]\n lbl_true = lbl_true[:, :, -1]\n elif self.config['DIM'] == '2D':\n lbl_pred = lbl_pred[:, -1]\n lbl_true = lbl_true[:, -1]\n elif self.config['DIM'] == 'RR':\n lbl_pred = lbl_pred[:, -1, None]\n lbl_true = lbl_true[:, -1, None]\n self.writer.add_image('result/pred',\n rainfall_shade(denorm(lbl_pred[0, 0])).swapaxes(0,2), \n self.epoch)\n self.writer.add_image('result/true',\n rainfall_shade(denorm(lbl_true[0, 0])).swapaxes(0,2), \n self.epoch)\n\n if self.val_loss <= self.best_val_loss:\n try:\n torch.save(self.model.module.state_dict(), os.path.join(self.save_dir, \n 'model_best.pth'))\n except:\n torch.save(self.model.state_dict(), os.path.join(self.save_dir, \n 'model_best.pth'))\n self.best_val_loss = self.val_loss\n with open(os.path.join(self.save_dir, \"best.txt\"), \"w\") as file:\n file.write(str(self.epoch))\n \n self.train_loss = 0\n self.train_metrics_value[:] = 0\n\n def add_epoch(self):\n\n self.epoch += 1\n if self.epoch % self.interval_validate == 0:\n self.validate()\n if self.epoch % self.interval_checkpoint == 0:\n torch.save(self.model.state_dict(), os.path.join(self.save_dir, \n 'model_{}.pth'.format(self.epoch)))\n\n def train_iteration(self):\n\n n_train_batch = self.data_loader.n_train_batch()\n pbar_b = tqdm(range(n_train_batch))\n for b in pbar_b:\n self.model.train()\n pbar_b.set_description('Training at batch %d / %d' % (b, n_train_batch))\n train_data, train_label = self.data_loader.get_train(b)\n self.optim.zero_grad()\n output = self.model(train_data)\n \n loss = self.mse_loss(output, train_label) + self.mae_loss(output, train_label)\n loss.backward()\n\n self.optim.step()\n self.train_loss += loss.data.item() / len(train_data)\n\n # lbl_pred = output\n # lbl_true = train_label\n lbl_pred = output.detach().cpu().numpy()\n lbl_true = train_label.cpu().numpy()\n# print('train', lbl_pred.shape, lbl_true.shape)\n # csis, w_csi = torch_csi_muti(torch_denorm(lbl_pred), torch_denorm(lbl_true))\n csis, w_csi = fp_fn_image_csi_muti(denorm(lbl_pred), denorm(lbl_true))\n self.train_metrics_value += csis\n self.add_epoch()\n\n def train(self):\n for i in range(self.max_iterations):\n self.train_iteration()\n self.pbar_i.update(1)\n self.pbar_i.close()\n self.writer.close()\n try:\n torch.save(self.model.module.state_dict(), os.path.join(self.save_dir, 'model_last.pth'))\n except:\n torch.save(self.model.state_dict(), os.path.join(self.save_dir, 'model_last.pth'))\n ","sub_path":"utils/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":7167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"281484976","text":"import pytest\nfrom django.contrib.auth.models import User\n\nSTUDENT_USERNAME = \"student\"\nSTUDENT_EMAIL = \"student@example.com\"\nMENTOR_USERNAME = \"mentor\"\nMENTOR_EMAIL = \"mentor@example.com\"\n\n@pytest.fixture\ndef student():\n student = User.objects.create(username=STUDENT_USERNAME, email=STUDENT_EMAIL)\n student.profile.approved = True\n student.profile.save()\n return student\n\n@pytest.fixture\ndef mentor():\n mentor = User.objects.create(username=MENTOR_USERNAME, email=MENTOR_EMAIL)\n mentor.profile.is_mentor = True\n mentor.profile.approved = True\n mentor.profile.save()\n return mentor\n\n@pytest.mark.django_db\ndef test_new_user_has_default_student_profile():\n user = User.objects.create(username=STUDENT_USERNAME, email=STUDENT_EMAIL)\n assert user.profile\n assert user.profile.is_student\n assert not user.profile.is_mentor\n","sub_path":"profiles/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"459005567","text":"crossword_data = {\n \"data\": [\n {\n \"clue\": \"First letter of greek alphabet\",\n \"answer\": \"alpha\",\n \"position\": 1,\n \"orientation\": \"across\",\n \"startx\": 1,\n \"starty\": 1\n },\n {\n \"clue\": \"Not a one ___ motor, but a three ___ motor\",\n \"answer\": \"phase\",\n \"position\": 3,\n \"orientation\": \"across\",\n \"startx\": 7,\n \"starty\": 1\n },\n {\n \"clue\": \"Created from a separation of charge\",\n \"answer\": \"capacitance\",\n \"position\": 5,\n \"orientation\": \"across\",\n \"startx\": 1,\n \"starty\": 3\n },\n {\n \"clue\": \"The speeds of engines without and accelaration\",\n \"answer\": \"idlespeeds\",\n \"position\": 8,\n \"orientation\": \"across\",\n \"startx\": 1,\n \"starty\": 5\n },\n {\n \"clue\": \"Complex resistances\",\n \"answer\": \"impedances\",\n \"position\": 10,\n \"orientation\": \"across\",\t\n \"startx\": 2,\n \"starty\": 7\n },\n {\n \"clue\": \"This device is used to step-up, step-down, and/or isolate\",\n \"answer\": \"transformer\",\n \"position\": 13,\n \"orientation\": \"across\",\n \"startx\": 1,\n \"starty\": 9\n },\n {\n \"clue\": \"Type of ray emitted frm the sun\",\n \"answer\": \"gamma\",\n \"position\": 16,\n \"orientation\": \"across\",\n \"startx\": 1,\n \"starty\": 11\n },\n {\n \"clue\": \"C programming language operator\",\n \"answer\": \"cysan\",\n \"position\": 17,\n \"orientation\": \"across\",\n \"startx\": 7,\n \"starty\": 11\n },\n {\n \"clue\": \"Defines the alpha-numeric characters that are typically associated with text used in programming\",\n \"answer\": \"ascii\",\n \"position\": 1,\n \"orientation\": \"down\",\n \"startx\": 1,\n \"starty\": 1\n },\n {\n \"clue\": \"Generally, if you go over 1kV per cm this happens\",\n \"answer\": \"arc\",\n \"position\": 2,\n \"orientation\": \"down\",\n \"startx\": 5,\n \"starty\": 1\n },\n {\n \"clue\": \"Control system strategy that tries to replicate the human through process (abbr.)\",\n \"answer\": \"ann\",\n \"position\": 4,\n \"orientation\": \"down\",\n \"startx\": 9,\n \"starty\": 1\n },\n {\n \"clue\": \"Greek variable that usually describes rotor positon\",\n \"answer\": \"theta\",\n \"position\": 6,\n \"orientation\": \"down\",\n \"startx\": 7,\n \"starty\": 3\n },\n {\n \"clue\": \"Electromagnetic (abbr.)\",\n \"answer\": \"em\",\n \"position\": 7,\n \"orientation\": \"down\",\n \"startx\": 11,\n \"starty\": 3\n },\n {\n \"clue\": \"No. 13 across does this to a voltage\",\n \"answer\": \"steps\",\n \"position\": 9,\n \"orientation\": \"down\",\n \"startx\": 5,\n \"starty\": 5\n },\n {\n \"clue\": \"Emits a lout wailing sound\",\n \"answer\": \"siren\",\n \"position\": 11,\n \"orientation\": \"down\",\n \"startx\": 11,\n \"starty\": 7\n },\n {\n \"clue\": \"Information technology (abbr.)\",\n \"answer\": \"it\",\n \"position\": 12,\n \"orientation\": \"down\",\n \"startx\": 1,\n \"starty\": 8\n },\n {\n \"clue\": \"Asynchronous transfer mode (abbr.)\",\n \"answer\": \"atm\",\n \"position\": 14,\n \"orientation\": \"down\",\n \"startx\": 3,\n \"starty\": 9\n },\n {\n \"clue\": \"Offset current control (abbr.)\",\n \"answer\": \"occ\",\n \"position\": 15,\n \"orientation\": \"down\",\n \"startx\": 7,\n \"starty\": 9\n }\n ]\n}","sub_path":"Backend/src/crossword.py","file_name":"crossword.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"48947233","text":"from base.base_model import Base\nfrom base.util import _gen_uuid, coerce_bson_id\n\n\nclass Comment(Base):\n def __init__(self, **kwargs):\n super(Comment, self).__init__()\n\n self._id = _gen_uuid()\n self.user_id = None\n self.comment = None\n self.obj_id = None\n self.coll_name = None\n\n for k, v in kwargs.iteritems():\n setattr(self, k, v)\n\n @property\n def user(self):\n from base import users\n return users.get(self.user_id)\n\n @staticmethod\n def unserialize(dic):\n return Comment(**dic)\n\n @staticmethod\n def coll_name():\n return \"comment\"","sub_path":"comments/comment/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"310017776","text":"from collections import deque\nfrom PIL import Image\nimport numpy as np\n\nCHAR_BITS = 8\nPRINT_LEN = 10\nimg = np.array(Image.open(\"BinaryWordSearch.png\"))\ngrid = [[int(cell[0] == 255) for cell in row] for row in img]\n\n\ndef to_char(bits: deque) -> str:\n n = 0\n for bit in reversed(bits):\n n <<= 1\n n += bit\n return chr(n)\n\n\nfor i in range(len(grid)):\n s = '\\n----------\\n'\n cur = deque(maxlen=CHAR_BITS)\n for j in range(len(grid[i])):\n ch = to_char(cur)\n if ch.isascii():\n s += ch\n else:\n if len(s) >= PRINT_LEN:\n print(s, end='')\n s = '\\n'\n if len(s) >= 8:\n print(s)\n\n\nfor i in range(len(grid)):\n s = '\\n----------\\n'\n cur = deque(maxlen=CHAR_BITS)\n for j in range(len(grid[i])):\n cur.append(grid[j][i]) # if len > 8, leftmost elem will be automatically popped\n ch = to_char(cur)\n if ch.isascii():\n s += ch\n else:\n if len(s) >= PRINT_LEN:\n print(s, end='')\n s = '\\n'\n if len(s) >= 8:\n print(s)\n\n\n\n","sub_path":"HSCTF2020/solve_binary_word_search.py","file_name":"solve_binary_word_search.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126933021","text":"\nimport classes\nimport extract\nimport load\nimport transform\n\n\ndef handler(event, context):\n \"\"\"\n entry point for Lambda function\n :param event: the Lambda event\n :param context: the Lambda context\n :return: None\n \"\"\"\n\n print(f\"'event': {event}\")\n print(f\"'context': {context}\")\n\n # -----------------------------------------------------\n # EXTRACT\n\n # define ny_dataset\n ny_dataset = classes.Dataset(\"ny_dataset\")\n ny_dataset.headers_all = [\"date\", \"cases\", \"deaths\"]\n ny_dataset.headers_key = ny_dataset.headers_all\n ny_dataset.match_field = \"date\"\n ny_dataset.source_url = \"https://raw.githubusercontent.com/nytimes/covid-19-data/master/us.csv\"\n\n # extract and print ny_dataset\n ny_dataset.df = extract.extract(ny_dataset.source_url)\n print(f\"'ny_dataset.df':\\n{ny_dataset.df}\")\n\n # define jh_dataset\n jh_dataset = classes.Dataset(\"jh_dataset\")\n jh_dataset.headers_all = [\n \"Date\", \"Country/Region\", \"Province/State\", \"Lat\", \"Long\", \"Confirmed\", \"Recovered\", \"Deaths\"\n ]\n jh_dataset.headers_key = [\"Date\", \"Country/Region\", \"Recovered\"]\n jh_dataset.match_field = \"Date\"\n jh_dataset.source_url = \\\n \"https://raw.githubusercontent.com/datasets/covid-19/master/data/time-series-19-covid-combined.csv\"\n\n # extract and print jh_dataset\n jh_dataset.df = extract.extract(jh_dataset.source_url, jh_dataset.headers_key, \"Country/Region\", \"US\")\n print(f\"'jh_dataset.df':\\n{jh_dataset.df}\")\n\n # -----------------------------------------------------\n # TRANSFORM\n\n # transform the datasets into CovidStat Instances\n covid_stats = transform.transform(ny_dataset, jh_dataset)\n\n # print CovidStats\n print(*covid_stats, sep=\"\\n\")\n\n # -----------------------------------------------------\n # LOAD\n\n # load CovidStat instances into the CovidStats DynamoDB table\n load.load_all(classes.CovidStat, covid_stats)\n load.load_json(covid_stats)\n\n\n# Local Only\nif __name__ == \"__main__\":\n handler(None, None)\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"140948699","text":"import math\nfrom operator import itemgetter\n\nrecords = []\n\n\ndef readInput(file):\n codes = []\n file = open(file, \"r\")\n for line in file.readlines():\n codes.append(line.rstrip())\n print('%d lines imported' % len(codes))\n file.close()\n return codes\n\n\nfor code in readInput('day5-input.txt'):\n valid = True\n rowMin = 0\n rowMax = 127\n colMin = 0\n colMax = 7\n seatID = None\n for index in range(len(code)):\n # determine the row\n if index >= 0 and index <= 6:\n if code[index] == 'F':\n rowMax = math.floor((rowMax - rowMin) / 2) + rowMin\n elif code[index] == 'B':\n rowMin = math.ceil((rowMax - rowMin) / 2) + rowMin\n else:\n valid = False\n # determine the column\n elif index >= 7 and index <= 9:\n if code[index] == 'L':\n colMax = math.floor((colMax - colMin) / 2) + colMin\n elif code[index] == 'R':\n colMin = math.ceil((colMax - colMin) / 2) + colMin\n else:\n valid = False\n else:\n valid = False\n if valid == False:\n print('Record is invalid')\n # determine the seat ID\n seatID = (rowMin * 8) + colMin\n record = [code, rowMin, colMin, seatID]\n if valid == True:\n records.append(record)\n print(f'{record} added')\n else:\n print('Code was invalid.')\n\n\nsortedRecords = sorted(records, key=itemgetter(3))\nmaxSeatID = sortedRecords[-1][3]\nprint(f'\\nThe highest seat ID is {maxSeatID}.\\n')\n","sub_path":"day5-1.py","file_name":"day5-1.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"431159209","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n 作者: 王导导\n 版本: 1.0\n 日期: 2019/02/11\n 项目名称: 专利下载\n\"\"\"\n\nfrom config import *\nfrom patentdown import *\nfrom patentid import *\nimport os\n\n\nprint('请选择:\\n1:输入专利号下载\\n2:输入关键词批量下载')\nisDown = True\ncheck = True\n\n\n\n\nchoice = input('输入1or2:')\nif choice == str(1):\n number = input('输入专利号:')\n get_pdf(number)\nelif choice == str(2):\n keywords = input('输入关键词:')\n if keywords:\n page_num = 1\n while isDown:\n for i in get_id(keywords, page_num):\n if not os.path.exists('pdf' + os.sep + '{}.pdf'.format(i)):\n print(i)\n get_pdf(i)\n else:\n print('专利{}已存在。'.format(i))\n while check:\n selection = input(\n '第{}页下载完毕,按1或者2选择是否下载下一页:\\n1为是,2为否:'.format(page_num))\n if selection == str(1):\n page_num += 1\n check = False\n elif selection == str(2):\n isDown = False\n print('取消下载!')\n check = False\n else:\n print('输入错误,重新输入!')\n else:\n print('关键词为空。')\nelse:\n print('输入错误请返回。')\n","sub_path":"old/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"87742945","text":"import sys\n\ndef main(argv):\n s1 = argv[0]\n s2 = argv[1]\n if len(s1) != len(s2):\n print(\"False\")\n return\n s1_sorted = sorted(s1)\n s2_sorted = sorted(s2)\n\n answer = True\n for (c1,c2) in zip(s1_sorted, s2_sorted):\n if c1 != c2:\n answer = False\n break\n print(answer)\n\nif __name__ == '__main__':\n main(sys.argv[1:])\n","sub_path":"cracking_the_coding_interview/01.Arrays_and_Strings/IsAnagram.py","file_name":"IsAnagram.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439408967","text":"\"\"\"\n1.) find all lowercase letters in a string\n2.) validate username:\nonly lowercase, digists and underscore\n3.) replace every charector in a string with another char\n\"\"\"\n\nimport re \n\np1 = r\"[a-z]\"\n\ns1 =\"aDududaauXudffhfd197272xycy\"\n\nr1=''.join(re.findall(p1,s1))\nprint(r1)\n\n\ndef valid_username(username):\n p2=r\"^[a-z_\\d]+$\"\n return bool(re.match(p2,username))\n\nprint(valid_username('sdsdo'))\n\n\ndef is_even(i):\n return i%2==0\n\nl = list(range(10))\nprint(l)\n\nr = list(filter(is_even,l))\n\nl = [0,1,None,1,2,3,5]\nprint(list(filter(None,l)))\n\n","sub_path":"regex-example/exp.py","file_name":"exp.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564058723","text":"import os\nfrom collections import OrderedDict\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision.utils import save_image\nfrom pdb import set_trace\n\n# mnist stuff\nx_dim = 784\nside = int(x_dim ** .5)\nitem_dims = (1, side, side)\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nprint(device)\n\ndef mkdir(path):\n os.makedirs(path, exist_ok=True)\n\nclass WrappedOp(nn.Module):\n def __init__(self, fn):\n super(WrappedOp, self).__init__()\n self.fn = fn\n\n def forward(self, *args, **kwargs):\n return self.fn(*args, **kwargs)\n\ndef fc(in_dim, out_dim, activation=nn.ReLU(), bnorm=False):\n '''fully-connected layer, consisting of a linear layer and an activation'''\n return nn.Sequential(OrderedDict([\n ('linear', nn.Linear(in_dim, out_dim)),\n *([('bnorm', nn.BatchNorm1d(out_dim))] if bnorm else []),\n ('activation', activation),\n ]))\n\ndef leaky(i, o, bnorm=False):\n return fc(i, o, nn.LeakyReLU(0.2), bnorm)\n\ndef elbo(input, decoded, mean, std):\n '''compute the evidence lower bound (ELBO)'''\n eps = 1e-9\n loss_recon = -(\n input * ( decoded + eps).log() +\n (1 - input) * (1 - decoded + eps).log()\n ).sum(dim=1)\n\n # regularization loss by KL divergence\n loss_reg = 0.5 * (\n (std.pow(2) - 1 + mean.pow(2)) -\n (std + eps).log()\n ).sum(dim=1)\n\n return (loss_recon + loss_reg).mean(dim=0)\n\ndef get_mask():\n mask = np.zeros((28, 28), dtype='float32')\n for i in range(28):\n for j in range(28):\n if (i + j) % 2 == 0:\n mask[i, j] = 1\n\n mask = mask.reshape(1, 28*28)\n mask = torch.from_numpy(mask)\n\n return mask\n\n@torch.no_grad()\ndef save_elbo_plot(train_curve, val_curve, filename):\n plt.figure(figsize=(12, 6))\n plt.plot(train_curve, label='train elbo')\n plt.plot(val_curve, label='validation elbo')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('ELBO')\n plt.tight_layout()\n plt.savefig(filename)\n\n@torch.no_grad()\ndef save_bpd_plot(train_curve, val_curve, filename):\n plt.figure(figsize=(12, 6))\n plt.plot(train_curve, label='train bpd')\n plt.plot(val_curve, label='validation bpd')\n plt.legend()\n plt.xlabel('epochs')\n plt.ylabel('bpd')\n plt.tight_layout()\n plt.savefig(filename)\n\n@torch.no_grad()\ndef interpolate_gan(path, generator, latent_dim, epoch, size=9):\n z1 = torch.randn(latent_dim)\n z2 = torch.randn(latent_dim)\n zs = torch.tensor(np.linspace(z1, z2, size))\n imgs = generator(zs).view(-1, *item_dims)\n save_image(imgs, f'{path}/interpolated_{epoch:03}.png', nrow=size, normalize=True)\n","sub_path":"assignment_3/code/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121097243","text":"from Operator import Operator\n\nclass Converter:\n\n def __init__(self, operators, string):\n self.__operators = operators\n self.__string = string\n\n def __is_operator(self, symbol):\n for operator in self.__operators:\n if operator.symbol == symbol:\n return True\n return False\n\n def __get_priority(self, symbol):\n for operator in self.__operators:\n if operator.symbol == symbol:\n return operator.precedence\n return 0\n\n def __get_function(self, symbol):\n for operator in self.__operators:\n if operator.symbol == symbol:\n return operator.function\n \n def infix_to_prefix(self):\n operators_stack = []\n operands_stack = []\n\n for character in self.__string:\n\n if character == '(':\n operators_stack.append(character)\n \n elif character == ')':\n while len(operators_stack) != 0 and operators_stack[-1] != '(':\n op1 = operands_stack[-1]\n operands_stack.pop(-1)\n\n op2 = operands_stack[-1]\n operands_stack.pop(-1)\n\n op = operators_stack[-1]\n operators_stack.pop(-1)\n\n comb = op + op2 + op1\n\n operands_stack.append(comb)\n\n operators_stack.pop(-1)\n \n elif not self.__is_operator(character):\n operands_stack.append(character)\n \n else:\n while len(operators_stack) != 0 and self.__get_priority(character) <= self.__get_priority(operators_stack[-1]):\n\n op1 = operands_stack[-1]\n operands_stack.pop(-1)\n\n op2 = operands_stack[-1]\n operands_stack.pop(-1)\n\n op = operators_stack[-1]\n operators_stack.pop(-1)\n\n comb = op + op2 + op1\n\n operands_stack.append(comb)\n \n operators_stack.append(character)\n\n while len(operators_stack) != 0:\n op1 = operands_stack[-1]\n operands_stack.pop(-1)\n\n op2 = operands_stack[-1]\n operands_stack.pop(-1)\n\n op = operators_stack[-1]\n operators_stack.pop(-1)\n\n comb = op + op2 + op1\n operands_stack.append(comb)\n\n return operands_stack[-1]\n\n\n def evaluate_expression(self, variable_dict):\n prefix = self.infix_to_prefix()\n self.__string = prefix\n self.__index = 0\n return self.__evaluate_expression(variable_dict)\n \n def __evaluate_expression(self, variable_dict):\n if self.__is_operator(self.__string[self.__index]):\n funct = self.__get_function(self.__string[self.__index])\n self.__index += 1\n op1 = self.__evaluate_expression(variable_dict)\n self.__index += 1\n op2 = self.__evaluate_expression(variable_dict)\n\n temp = funct(op1, op2)\n \n return temp\n else:\n temp = variable_dict[self.__string[self.__index]]\n return temp","sub_path":"FLAT/Polish Notation/InfixToPrefix.py","file_name":"InfixToPrefix.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"221855493","text":"from flask import Flask, flash, redirect, render_template, request, session, abort, url_for\r\nimport os\r\nimport string\r\nimport re\r\nimport math\r\nimport collections\r\nimport pandas\r\nfrom nltk.corpus import stopwords\r\nfrom nltk import PorterStemmer\r\nfrom bs4 import BeautifulSoup\r\nimport rank\r\n\r\ndocument_text = dict()\r\nindex = dict()\r\nq_index = dict()\r\ncount = 0\r\nc = 0\r\ndoc_len = dict()\r\nq_len = 0\r\nweight = dict()\r\nq_weight = dict()\r\ncos_sim = dict()\r\nps = PorterStemmer()\r\nfinal_urls = dict()\r\npr_scores = dict()\r\nstopwords = set(stopwords.words('english'))\r\nfile_path_docs = \"C:/Users/harsh/PycharmProjects/IR Project/pages/\"\r\nfile_path_urls = \"C:/Users/harsh/PycharmProjects/IR Project/\"\r\n\r\n\r\ndef remove_punctuation(content):\r\n content = content.strip()\r\n content = content.lower()\r\n content = re.sub('\\d', '%d', content)\r\n for x in string.punctuation:\r\n if x in content:\r\n content = content.replace(x, \" \")\r\n return content\r\n\r\n\r\ndef stemming(content):\r\n ps = PorterStemmer()\r\n words = content.split(\" \")\r\n for word in words:\r\n content = content.replace(word, ps.stem(word))\r\n return content\r\n\r\n\r\ndef clean_soup(content):\r\n for script in content([\"script\", \"style\"]):\r\n script.extract()\r\n return content.get_text()\r\n\r\n\r\ndef pre_process(content,file):\r\n content = clean_soup(content)\r\n content = remove_punctuation(content)\r\n content = content.replace(\"\\n\", \" \")\r\n content = content.replace(\"\\t\", \" \")\r\n document_text[file] = content\r\n\r\n\r\ndef build_index(content, file):\r\n global index\r\n global document_text\r\n global stopwords\r\n for doc_id, text in document_text.items():\r\n words = text.split(\" \")\r\n for word in words:\r\n if word not in stopwords:\r\n if word not in index.keys():\r\n index[word] = {}\r\n index[word][doc_id] = 1\r\n elif word in index and doc_id not in index[word].keys():\r\n index[word][doc_id] = 1\r\n else:\r\n index[word][doc_id] += 1\r\n\r\n\r\ndef build_q_index(query):\r\n global q_index\r\n global stopwords\r\n words = query.split(\" \")\r\n for word in words:\r\n if word not in stopwords:\r\n if word not in q_index.keys():\r\n q_index[word] = 1\r\n elif word in q_index:\r\n q_index[word] = 1\r\n else:\r\n q_index[word] += 1\r\n\r\n\r\ndef process_query(query):\r\n query = query.replace(\"\\n\", \"\")\r\n query = remove_punctuation(query)\r\n return query\r\n\r\n\r\ndef calculate_weight():\r\n global document_text\r\n global index\r\n global weight\r\n global doc_len\r\n for word in index.keys():\r\n for doc_id in index[word].keys():\r\n if word not in weight.keys():\r\n weight[word] = {}\r\n tf = (index[word][doc_id])\r\n idf = math.log((len(document_text) / len(index[word])), 2)\r\n weight[word][doc_id] = tf * idf\r\n if doc_id not in doc_len.keys():\r\n doc_len[doc_id] = (tf**2) * (idf**2)\r\n else:\r\n doc_len[doc_id] += (tf**2) * (idf**2)\r\n\r\n\r\ndef calculate_q_weight():\r\n global q_index\r\n global q_weight\r\n global q_len\r\n global index\r\n for word in q_index.keys():\r\n tf = (q_index[word])\r\n if word in index.keys() and ((len(document_text) / len(index[word]))!=1):\r\n idf = math.log((len(document_text) / len(index[word])), 2)\r\n else:\r\n idf = 0\r\n if idf != 0:\r\n q_len += (tf**2) * (idf**2)\r\n q_weight[word] = tf * idf\r\n\r\n\r\ndef cosine_sim():\r\n global weight\r\n global q_weight\r\n global index\r\n global q_index\r\n global cos_sim\r\n global q_len\r\n global doc_len\r\n for word in q_index.keys():\r\n if word in index.keys():\r\n for doc_id in index[word].keys():\r\n if doc_id not in cos_sim.keys():\r\n cos_sim[doc_id] = weight[word][doc_id] * q_weight[word] / \\\r\n (math.sqrt(doc_len[doc_id]) * math.sqrt(q_len))\r\n else:\r\n cos_sim[doc_id] += weight[word][doc_id] * q_weight[word] / \\\r\n (math.sqrt(doc_len[doc_id]) * math.sqrt(q_len))\r\n return cos_sim\r\n\r\n\r\ndef get_url(key):\r\n with open(file_path_urls + 'urlList.txt') as f:\r\n for i, line in enumerate(f):\r\n if i == int(key)-1:\r\n return line\r\n\r\n\r\ndef get_pr_query(pr_scores, query):\r\n q_words = query.split()\r\n total_scores = dict()\r\n for i in q_words:\r\n for doc in pr_scores.keys():\r\n total_scores[doc] = 0\r\n if i in pr_scores[doc].keys():\r\n total_scores[doc] += pr_scores[doc][i]\r\n return total_scores\r\n\r\n\r\ndef combine_results(cos_scores, pr_scores):\r\n combinedScores = dict()\r\n for k in cos_scores.keys():\r\n if k in pr_scores.keys():\r\n combinedScores[k] = 2*(cos_scores[k] * pr_scores[k]) / (cos_scores[k] + pr_scores[k])\r\n return combinedScores\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n@app.route(\"/search\")\r\ndef search():\r\n global c\r\n global pr_scores\r\n for file in os.listdir(file_path_docs):\r\n print(\"File Number: \" + file)\r\n f = open(file_path_docs + file, 'rb')\r\n content = BeautifulSoup(f, 'html.parser')\r\n pr_scores[file] = rank.pr_vocab(content)\r\n pre_process(content, file)\r\n return render_template('test.html')\r\n\r\n\r\n@app.route(\"/results\", methods=['POST'])\r\ndef results():\r\n global c\r\n global pr_scores\r\n if request.method == 'POST':\r\n global final_urls\r\n cos_results = dict()\r\n final_urls_dup = dict()\r\n pr_results = dict()\r\n query = request.form['query']\r\n query = process_query(query)\r\n build_q_index(query)\r\n calculate_weight()\r\n calculate_q_weight()\r\n cos_results = cosine_sim()\r\n cos_results = dict(collections.Counter(cos_sim).most_common(c))\r\n pr_results = get_pr_query(pr_scores, query)\r\n pr_results = dict(collections.Counter(pr_results).most_common(c))\r\n final_scores = combine_results(cos_results, pr_results)\r\n final_scores = dict(collections.Counter(final_scores).most_common(c))\r\n for key, value in final_scores.items():\r\n file_name = key.split('.')\r\n final_urls[file_name[0]] = get_url(file_name[0])\r\n final_urls_dup[file_name[0]] = get_url(file_name[0])\r\n return render_template('results.html', **locals())\r\n\r\n\r\n@app.route('/myredirect/', methods=['GET'])\r\ndef myredirect(name):\r\n print(\"Reached myredirect method. \" + final_urls[name])\r\n redirect_url = final_urls[name]\r\n return redirect(redirect_url)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', port=80)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"537501689","text":"##############################################################################\n#\n# This script provides the formal specification of the study data\n# that will be extracted from\n# the OpenSAFELY database.\n#\n# STUDY PURPOSE: to perform regression discontinuity of 2022/23 \n# autumn booster COVID-19 vaccine, before and after 50+ became eligible\n# on October 15, 2022\n#\n# This study definition defines the baseline characteristics (e.g. demographics, vax dates).\n# The baseline date is Sep 3, 2022 (6 weeks prior to start of vaccination campaign)\n#\n#################################################################################\n\n\n# IMPORT STATEMENTS ----\n\n# Import code building blocks from cohort extractor package\nfrom cohortextractor import (\n StudyDefinition,\n patients,\n Measure,\n codelist,\n)\n\n# Import codelists from codelist.py (which pulls them from the codelist folder)\nfrom codelists import *\n\n# Specifiy study defeinition\nstudy = StudyDefinition(\n\n # Configure the expectations framework\n default_expectations = {\n \"date\": {\"earliest\": \"2020-12-08\", \"latest\": \"2023-02-01\"},\n \"rate\": \"uniform\",\n \"incidence\": 0.5,\n },\n \n # Set index date\n index_date = \"2022-09-03\",\n\n # This line defines the study population\n population = patients.satisfying(\n \"\"\"\n registered\n AND\n (age >= 44 AND age <= 55)\n AND\n (sex = \"M\" OR sex = \"F\")\n AND\n NOT has_died\n AND\n has_follow_up\n \"\"\",\n registered = patients.registered_as_of(\n \"index_date\",\n ),\n has_died = patients.died_from_any_cause(\n on_or_before = \"index_date\",\n returning = \"binary_flag\",\n ),\n has_follow_up=patients.registered_with_one_practice_between(\n \"index_date - 90 days\", \"index_date\"\n ),\n ),\n\n # Age (continuous)\n age = patients.age_as_of(\n \"index_date\", \n return_expectations = {\n \"rate\": \"universal\",\n \"int\": {\"distribution\": \"normal\", \"mean\" : 50, \"stddev\": 3},\n \"incidence\" : 0.001\n },\n ),\n\n # Date of birth month/year\n dob = patients.date_of_birth(\n \"YYYY-MM\",\n return_expectations={\n \"date\": {\"earliest\": \"1962-01-01\", \"latest\": \"1982-01-01\"},\n \"rate\": \"uniform\",\n \"incidence\" : .999\n },\n ),\n\n # Sex\n sex = patients.sex(\n return_expectations={\n \"rate\": \"universal\",\n \"category\": {\"ratios\": {\"M\": 0.49, \"F\": 0.51}},\n }\n ),\n\n # Date of death\n dod = patients.died_from_any_cause(\n returning=\"date_of_death\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={\n \"date\": {\"earliest\": \"2022-09-03\", \"latest\": \"2023-02-01\"},\n \"rate\": \"uniform\",\n \"incidence\" : .1\n },\n ),\n\n ###########################################################\n # Demographics - for confirming that population is \n # consistent over time (before/after discontinuity)\n ###########################################################\n \n # IMD - quintile\n imd = patients.categorised_as(\n {\n \"0\": \"DEFAULT\",\n \"1\": \"\"\"index_of_multiple_deprivation >=1 AND index_of_multiple_deprivation < 32844*1/5\"\"\",\n \"2\": \"\"\"index_of_multiple_deprivation >= 32844*1/5 AND index_of_multiple_deprivation < 32844*2/5\"\"\",\n \"3\": \"\"\"index_of_multiple_deprivation >= 32844*2/5 AND index_of_multiple_deprivation < 32844*3/5\"\"\",\n \"4\": \"\"\"index_of_multiple_deprivation >= 32844*3/5 AND index_of_multiple_deprivation < 32844*4/5\"\"\",\n \"5\": \"\"\"index_of_multiple_deprivation >= 32844*4/5 AND index_of_multiple_deprivation < 32844\"\"\",\n },\n index_of_multiple_deprivation=patients.address_as_of(\n \"index_date\",\n returning=\"index_of_multiple_deprivation\",\n round_to_nearest=100,\n ),\n return_expectations={\n \"rate\": \"universal\",\n \"category\": {\n \"ratios\": {\n \"0\": 0.01,\n \"1\": 0.20,\n \"2\": 0.20,\n \"3\": 0.20,\n \"4\": 0.20,\n \"5\": 0.19,\n }\n },\n },\n ),\n\n ### Region\n region = patients.registered_practice_as_of(\n \"index_date\",\n returning = \"nuts1_region_name\",\n return_expectations = {\n \"rate\": \"universal\",\n \"category\": {\n \"ratios\": {\n \"North East\": 0.1,\n \"North West\": 0.1,\n \"Yorkshire and The Humber\": 0.1,\n \"East Midlands\": 0.1,\n \"West Midlands\": 0.1,\n \"East\": 0.1,\n \"London\": 0.2,\n \"South East\": 0.1,\n \"South West\": 0.1,\n },\n },\n },\n ),\n\n ### Ethnicity (6 categories)\n ethnicity = patients.categorised_as(\n {\n \"Unknown\": \"DEFAULT\",\n \"White\": \"eth6='1'\",\n \"Mixed\": \"eth6='2'\",\n \"Asian or Asian British\": \"eth6='3'\",\n \"Black or Black British\": \"eth6='4'\",\n \"Other\": \"eth6='5'\",\n },\n eth6 = patients.with_these_clinical_events(\n ethnicity_codes_6,\n returning = \"category\",\n find_last_match_in_period = True,\n include_date_of_match = False,\n return_expectations = {\n \"incidence\": 0.75,\n \"category\": {\n \"ratios\": {\n \"1\": 0.30,\n \"2\": 0.20,\n \"3\": 0.20,\n \"4\": 0.20,\n \"5\": 0.05,\n \"6\": 0.05,\n },\n },\n },\n ),\n return_expectations = {\n \"rate\": \"universal\",\n \"category\": {\n \"ratios\": {\n \"White\": 0.30,\n \"Mixed\": 0.20,\n \"Asian or Asian British\": 0.20,\n \"Black or Black British\": 0.20,\n \"Other\": 0.05,\n \"Unknown\": 0.05,\n },\n },\n },\n ),\n \n ###############################################################################\n # Risk groups prioritised for earlier vaccination\n ###############################################################################\n\n # Healthcare worker at time of vaccination\n # Need to sort out how this is defined\n hscworker = patients.with_healthcare_worker_flag_on_covid_vaccine_record(returning=\"binary_flag\"),\n\n ## From PRIMIS ##\n # Asthma\n asthma = patients.satisfying(\n \"\"\"\n astadm OR\n (ast AND astrxm1 AND astrxm2 AND astrxm3)\n \"\"\",\n # Asthma Admission codes\n astadm=patients.with_these_clinical_events(\n astadm,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n # Asthma Diagnosis code\n ast = patients.with_these_clinical_events(\n ast,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n # Asthma systemic steroid prescription code in month 1\n astrxm1=patients.with_these_medications(\n astrx,\n returning=\"binary_flag\",\n between=[\"index_date - 30 days\", \"index_date - 1 day\"],\n return_expectations = {\"incidence\": 0.01},\n ),\n # Asthma systemic steroid prescription code in month 2\n astrxm2=patients.with_these_medications(\n astrx,\n returning=\"binary_flag\",\n between=[\"index_date - 60 days\", \"index_date - 31 days\"],\n return_expectations = {\"incidence\": 0.01},\n ),\n # Asthma systemic steroid prescription code in month 3\n astrxm3=patients.with_these_medications(\n astrx,\n returning=\"binary_flag\",\n between=[\"index_date - 90 days\", \"index_date - 61 days\"],\n return_expectations = {\"incidence\": 0.01},\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Chronic Respiratory Disease\n chronic_resp_disease = patients.satisfying(\n \"asthma OR resp_cov\",\n resp_cov=patients.with_these_clinical_events(\n resp_cov,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n ),\n\n # Chronic Neurological Disease including Learning Disorder\n chronic_neuro_disease=patients.satisfying(\n \"\"\"cns_cov OR learndis \"\"\",\n # Chronic neurological disease\n cns_cov=patients.with_these_clinical_events(\n cns_cov,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n ),\n # Wider Learning Disability\n learndis=patients.with_these_clinical_events(\n learndis,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Obesity\n sev_obesity = patients.satisfying(\n \"\"\"\n sev_obesity_date > bmi_date OR\n bmi_value1 >= 40\n \"\"\",\n bmi_stage_date=patients.with_these_clinical_events(\n bmi_stage,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n sev_obesity_date=patients.with_these_clinical_events(\n sev_obesity,\n returning=\"date\",\n find_last_match_in_period=True,\n ignore_missing_values=True,\n between= [\"bmi_stage_date\", \"index_date - 1 day\"],\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n bmi_date=patients.with_these_clinical_events(\n bmi,\n returning=\"date\",\n ignore_missing_values=True,\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n bmi_value1=patients.with_these_clinical_events(\n bmi,\n returning=\"numeric_value\",\n ignore_missing_values=True,\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Diabetes\n diabetes = patients.satisfying(\n \"\"\"\n (dmres_date < diab_date) OR \n (diab_date AND (NOT dmres_date))\n \"\"\",\n diab_date=patients.with_these_clinical_events(\n diab,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n # Diabetes resolved\n dmres_date=patients.with_these_clinical_events(\n dmres,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Severe mental illness\n sev_mental=patients.satisfying(\n \"\"\"\n (smhres_date < sev_mental_date) OR \n (sev_mental_date AND (NOT smhres_date))\n \"\"\",\n # Severe Mental Illness codes\n sev_mental_date=patients.with_these_clinical_events(\n sev_mental,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n # Remission codes relating to Severe Mental Illness\n smhres_date=patients.with_these_clinical_events(\n smhres,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Chronic heart disease codes\n chronic_heart_disease=patients.with_these_clinical_events(\n chd_cov,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Chronic kidney disease\n chronic_kidney_disease=patients.satisfying(\n \"\"\"\n ckd OR\n (ckd15_date AND ckd35_date >= ckd15_date)\n \"\"\",\n # Chronic kidney disease codes - all stages\n ckd15_date=patients.with_these_clinical_events(\n ckd15,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n # Chronic kidney disease codes-stages 3 - 5\n ckd35_date=patients.with_these_clinical_events(\n ckd35,\n returning=\"date\",\n find_last_match_in_period=True,\n on_or_before=\"index_date - 1 day\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ),\n # Chronic kidney disease diagnostic codes\n ckd=patients.with_these_clinical_events(\n ckd_cov,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Chronic Liver disease codes\n chronic_liver_disease=patients.with_these_clinical_events(\n cld,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Immunosupressive conditions (e.g. cancer, transplant) / medications \n immunosuppressed=patients.satisfying(\n \"\"\"\n immrx OR \n immdx OR \n hiv_aids OR \n solid_organ_transplant OR\n cancer_haem OR\n cancer_nonhaem\n \"\"\",\n # Immunosuppression diagnosis codes\n immdx=patients.with_these_clinical_events(\n immdx_cov,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n ),\n # Immunosuppression medication codes\n immrx=patients.with_these_medications(\n immrx,\n returning=\"binary_flag\",\n between=[\"index_date - 182 days\", \"index_date - 1 day\"],\n ), \n # HIV / AIDS\n hiv_aids = patients.with_these_clinical_events( \n hiv_aids,\n on_or_before=\"index_date - 1 day\",\n returning=\"binary_flag\",\n ), \n # Solid organ transplant\n solid_organ_transplant = patients.with_these_clinical_events( \n solid_organ_transplant,\n on_or_before=\"index_date - 1 day\",\n returning=\"binary_flag\",\n ), \n # Haematological cancer\n cancer_haem = patients.with_these_clinical_events(\n cancer_haem_snomed,\n returning=\"binary_flag\",\n between=[\"index_date - 3 years\", \"index_date - 1 day\"],\n ),\n # Solid cancer\n cancer_nonhaem = patients.with_these_clinical_events( \n cancer_nonhaem_snomed,\n between=[\"index_date - 3 years\", \"index_date - 1 day\"],\n returning=\"binary_flag\",\n ), \n return_expectations = {\"incidence\": 0.01},\n ),\n \n # Asplenia or Dysfunction of the Spleen \n asplenia=patients.with_these_clinical_events(\n spln_cov,\n returning=\"binary_flag\",\n on_or_before=\"index_date - 1 day\",\n return_expectations = {\"incidence\": 0.01},\n ),\n \n # End of life\n endoflife = patients.satisfying(\n \"\"\"\n midazolam OR\n endoflife_coding\n \"\"\",\n midazolam = patients.with_these_medications(\n midazolam,\n returning=\"binary_flag\",\n on_or_before = \"index_date - 1 day\",\n ),\n endoflife_coding = patients.with_these_clinical_events(\n eol,\n returning=\"binary_flag\",\n on_or_before = \"index_date - 1 day\",\n find_last_match_in_period = True,\n ),\n ),\n \n # Housebound\n housebound = patients.satisfying(\n \"\"\"housebound_date\n AND NOT no_longer_housebound\n AND NOT moved_into_care_home\n \"\"\",\n housebound_date=patients.with_these_clinical_events( \n housebound, \n on_or_before=\"index_date - 1 day\",\n find_last_match_in_period = True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations = {\"incidence\": 0.01},\n ), \n no_longer_housebound=patients.with_these_clinical_events( \n no_longer_housebound, \n between=[\"housebound_date\", \"index_date - 1 day\"],\n return_expectations = {\"incidence\": 0.01},\n ),\n moved_into_care_home=patients.with_these_clinical_events(\n carehome,\n between=[\"housebound_date\", \"index_date - 1 day\"],\n return_expectations = {\"incidence\": 0.01},\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n # Care home\n carehome = patients.satisfying(\n \"\"\"\n carehome_codes\n OR\n tpp_care_home_type\n \"\"\",\n # Care home from codelists\n carehome_codes = patients.with_these_clinical_events(\n carehome,\n on_or_before = \"index_date\",\n returning = \"binary_flag\",\n return_expectations = {\"incidence\": 0.01},\n ),\n ## Care home from TPP address list (as binary variable)\n tpp_care_home_type=patients.care_home_status_as_of(\n \"index_date\",\n categorised_as={\n 1: \"\"\"\n IsPotentialCareHome\n \"\"\",\n 0: \"DEFAULT\",\n },\n ),\n return_expectations = {\"incidence\": 0.01},\n ),\n\n ###############################################################################\n # COVID VACCINATION (ANY TYPE)\n ###############################################################################\n\n # First dose COVID vaccination\n covid_vax_1_date=patients.with_tpp_vaccination_record(\n target_disease_matches = \"SARS-2 CORONAVIRUS\",\n on_or_after = \"2020-12-08\", \n find_first_match_in_period = True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={\n \"date\": {\n \"earliest\": \"2020-12-08\", # first vaccine administered on the 8/12\n \"latest\": \"2023-02-01\",\n }\n },\n ),\n\n # Second dose COVID vaccination\n covid_vax_2_date=patients.with_tpp_vaccination_record(\n target_disease_matches=\"SARS-2 CORONAVIRUS\",\n on_or_after = \"covid_vax_1_date + 1 days\",\n find_first_match_in_period = True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={\n \"date\": {\n \"earliest\": \"2020-12-08\", \n \"latest\": \"2023-02-01\",\n }\n },\n ),\n\n # Third dose (first booster) COVID vaccination\n covid_vax_3_date=patients.with_tpp_vaccination_record(\n target_disease_matches=\"SARS-2 CORONAVIRUS\",\n on_or_after=\"covid_vax_2_date + 1 days\",\n find_first_match_in_period=True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={\n \"date\": {\n \"earliest\": \"2021-11-01\", \n \"latest\": \"2023-02-01\",\n }\n },\n ),\n\n # Fourth dose (second booster) COVID vaccination\n covid_vax_4_date = patients.with_tpp_vaccination_record(\n target_disease_matches=\"SARS-2 CORONAVIRUS\",\n on_or_after=\"covid_vax_3_date + 1 days\",\n find_first_match_in_period=True,\n returning = \"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={\n \"date\": {\n \"earliest\": \"2022-07-01\", \n \"latest\": \"2023-02-01\",\n }\n },\n ),\n\n ###############################################################################\n # FLU VACCINATION in 2022-23\n ###############################################################################\n\n flu_vax_tpp_date=patients.with_tpp_vaccination_record(\n target_disease_matches=\"INFLUENZA\",\n on_or_after=\"2022-07-01\",\n find_first_match_in_period=True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={ \n \"date\": {\n \"earliest\": \"2022-07-01\", \n \"latest\": \"2023-02-01\",\n },\n },\n ),\n \n flu_vax_med_date=patients.with_these_medications(\n flu_med_codes,\n on_or_after=\"2022-07-01\",\n find_first_match_in_period=True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={ \n \"date\": {\n \"earliest\": \"2022-07-01\", \n \"latest\": \"2023-02-01\",\n },\n },\n ),\n\n flu_vax_clinical_date=patients.with_these_clinical_events(\n flu_clinical_given_codes,\n ignore_days_where_these_codes_occur=flu_clinical_not_given_codes,\n on_or_after=\"2022-07-01\",\n find_first_match_in_period=True,\n returning=\"date\",\n date_format=\"YYYY-MM-DD\",\n return_expectations={ \n \"date\": {\n \"earliest\": \"2022-07-01\", \n \"latest\": \"2023-02-01\",\n },\n },\n ),\n)\n","sub_path":"analysis/study_definition_baseline.py","file_name":"study_definition_baseline.py","file_ext":"py","file_size_in_byte":22328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206582067","text":"# ch16_10.py\r\nimport re\r\n\r\nmsg = 'Please call my secretary using 02-26669999'\r\npattern = r'(\\d{2})-(\\d{8})'\r\nphoneNum = re.search(pattern, msg) # 傳回搜尋結果\r\nareaNum, localNum = phoneNum.groups() # 留意是groups()\r\nprint(f\"區域號碼是: {areaNum}\") # 顯示區域號碼\r\nprint(f\"電話號碼是: {localNum}\") # 顯示電話號碼\r\n\r\n\r\n\r\n","sub_path":"exercise/learn_python_dm2039/ch16/ch16_10.py","file_name":"ch16_10.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"557023205","text":"from os import environ\nfrom flask import Flask\nfrom flask import render_template, jsonify, request, Response\nfrom settings import *\nimport json\n\n\n\n\"\"\" a list of dictionaries to transfer easily to json\"\"\"\nbooks = [{\n 'name': 'Journey to Yaish Life',\n 'price': ' a lot',\n 'isbn': 929292929\n },\n {\n 'name': 'Journey to Yaish Life',\n 'price': ' a lot',\n 'isbn': 6664646464\n }\n ]\n\n\"\"\" root directory, returning a string\"\"\"\n@app.route('/')\ndef home():\n \"\"\"Renders the home page.\"\"\"\n return \"Hello world\"\n\n\ndef valid_book_object(book_object):\n \"\"\" validate that the object we got sent, return the right information\"\"\"\n if ('isbn' in book_object and 'name' in book_object and 'price' in book_object):\n return True\n return False \n\n\n\"\"\" The methods here is set for POST, which mean it accept information from the server\"\"\"\n@app.route('/books', methods=['POST'])\ndef add_book():\n request_data = request.get_json() #with request we take the data sent from the client, get_json decode the json object\n if valid_book_object(request_data):\n new_book = {'name': request_data['name'], \n 'price': request_data['price'],\n 'isbn': request_data['isbn']}\n books.insert(0, new_book) \n response = Response(\"\", 201, mimetype='application/sjon' ) #response class handle the respnse the server sends back after processing the request. we set to 201 to say that the request been prosessed succesfuly\n response.headers['location'] = '/books/'+str(new_book['isbn']) # by defining location in the header it sends the user to the locatio we desire \n return response\n else: \n invalid_book_object_erro = {\n \"error\": \"Invalide book object passed in the request\",\n \"help_string\": \"please add a book object by the following patter: {'name':'bookname', 'price':7.99, 'isbn': 33343434}\"\n }\n response = Response(json.dumps(invalid_book_object_erro), status=400, mimetype='application,json')\n return response #we can send back error response status code like 422, 403\n\n\n\"\"\"the route her is set to put which udpate information, the client need to send all parameters of the information\"\"\"\n@app.route('/books/', methods=['PUT'])\ndef replace_book(isbn):\n request_data = request.get_json()\n update_book = {\n 'name': request_data['name'],\n 'price': request_data['price'],\n 'isbn' : isbn}\n i = 0\n for book in books:\n if book['isbn']==isbn:\n books[i]=update_book\n i+=1\n return Response(\"\", status=204) #204 no content \n\n\n\"\"\" patch is used to update a specific arrtibute and not the whole one\"\"\"\n@app.route('/books/', methods=['PATCH'])\ndef update_book(isbn):\n request_data = request.get_json()\n for book in books:\n if book['isbn']== isbn:\n for key in request_data.keys():\n if key in book.keys():\n book[key] = request_data[key]\n response = Response(\"\", status=204)\n response.headers['Location'] = '/book/' + str(isbn)\n return response\n\n\n\"\"\"delete an object\"\"\"\n@app.route(\"/books/\", methods=[\"DELETE\"])\ndef delete_book(isbn):\n for book in books:\n if book['isbn'] == isbn:\n del(book)\n response = Response(\"\", status=204)\n return response\n invalid_isbn = {\"error\": \"Invalide book object passed in the request\"}\n response = Response(json.dumps(invalid_isbn), status=404, mimetype='application,json')\n return response\n\n\n\"\"\"the route here is by default set to get\"\"\"\n@app.route('/books')\ndef get_books():\n return jsonify({'books':books}) #function in Flask that creates a json object\n\n\n@app.route('/books/') # here we add an option to put a variable in the adress so we can create a search\ndef get_books_by_isbn(isbn):\n \"\"\" the function get the a book by his isbn\"\"\"\n books_detail = {}\n for book in books:\n if book['isbn']==isbn:\n books_detail = {\n 'name' : book['name'], \n 'price': book['price']\n }\n return jsonify(books_detail)\n\n\nif __name__ == '__main__':\n HOST = environ.get('SERVER_HOST', 'localhost')\n try:\n PORT = int(environ.get('SERVER_PORT', '5555'))\n except ValueError:\n PORT = 5555\n app.run(HOST, PORT, debug=True)\n\n\n","sub_path":"fullstack_template/server/learn.py","file_name":"learn.py","file_ext":"py","file_size_in_byte":4469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"614420377","text":"import sys\nfrom collections import deque\ninput = sys.stdin.readline\n\nn, m = map(int ,input().split())\ngraph = []\n\nfor _ in range(n):\n graph.append(list(map(int, input().rstrip())))\n\nvisited = [[[0] * m for _ in range(n)] for _ in range(2)]\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef bfs(crush, _x, _y):\n queue = deque()\n queue.append([crush, _x, _y])\n visited[crush][_x][_y] = 1\n\n while queue:\n crush, x, y = queue.popleft()\n if x == n - 1 and y == m - 1:\n return visited[crush][x][y]\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n if nx < 0 or ny < 0 or nx >= n or ny >= m:\n continue\n if visited[crush][nx][ny] == 0:\n if graph[nx][ny] == 1 and crush == 0:\n queue.append([1, nx, ny])\n visited[1][nx][ny] = visited[crush][x][y] + 1\n if graph[nx][ny] == 0:\n queue.append([crush, nx, ny])\n visited[crush][nx][ny] = visited[crush][x][y] + 1\n return -1\n\nprint(bfs(0, 0, 0))","sub_path":"python/2206.py","file_name":"2206.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"85698537","text":"import numpy as np\n\nfrom src.rnn import RNN\n\nfrom src.testfuncs import gen_in_out_one_in_subs\n\nfrom tqdm import tqdm\n\nfrom stdParams import *\nimport os\n\nfrom datetime import datetime\n\nimport sys\n\nimport pandas as pd\n\nimport argparse\n\nparser = argparse.ArgumentParser()\n\nparser.add_argument(\"input_type\",\nhelp='''specify four type of input (homogeneous_identical_binary,\nhomogeneous_independent_gaussian, heterogeneous_identical_binary,\nheterogeneous_independent_gaussian)''',\nchoices=['homogeneous_identical_binary',\n'homogeneous_independent_gaussian',\n'heterogeneous_identical_binary',\n'heterogeneous_independent_gaussian'])\n\nparser.add_argument(\"--T_run_adapt\",\nhelp=\"number of time steps for adaptation\",\ntype=int,\ndefault=10000)\n\nparser.add_argument(\"--T_prerun_sample\",\nhelp=\"number of prerun time steps before recording a sample\",\ntype=int,\ndefault=100)\n\nparser.add_argument(\"--T_run_sample\",\nhelp=\"time steps for recording a sample\",\ntype=int,\ndefault=1000)\n\nparser.add_argument(\"--y_mean_target\",\nhelp=\"target activity\",\ntype=float,\ndefault=0.05)\n\nparser.add_argument(\"--sigm_e\",\nhelp=\"standard deviation of external driving\",\ntype=float,\ndefault=0.5)\n\nparser.add_argument(\"--n_samples\",\nhelp=\"number of runs to average over for each data point.\",\ntype=int,\ndefault=10)\n\nargs = parser.parse_args()\n\ninput_type = ['homogeneous_identical_binary',\n'homogeneous_independent_gaussian',\n'heterogeneous_identical_binary',\n'heterogeneous_independent_gaussian'].index(args.input_type)\n\nN_list = [100,200,300,400,500,700,1000]\nn_N = len(N_list)\n\nn_samples = args.n_samples\n\nsigm_e = args.sigm_e\nT_run_adapt = args.T_run_adapt\nT_prerun_sample = args.T_prerun_sample\nT_run_sample = args.T_run_sample\ny_mean_target = args.y_mean_target\n\n#####################################\n\n# Mean Absolute Error\nMAE_list = []\n\n####################################\n\nfor k in tqdm(range(n_N)):\n\n N = N_list[k]\n\n for l in tqdm(range(n_samples)):\n\n rnn = RNN(N=N,y_mean_target=y_mean_target,y_std_target=1.)\n \n rnn.W /= np.abs(np.linalg.eigvals(rnn.W)).max()\n \n rnn.eps_a_r = 0.\n\n ##################\n\n if input_type == 0:\n\n rnn.w_in = np.ones((rnn.N,1))\n\n u_in_adapt,u_out = gen_in_out_one_in_subs(T_run_adapt,1)\n u_in_adapt *= sigm_e\n\n adapt = rnn.run_hom_adapt(u_in=u_in_adapt,T_skip_rec=1000,show_progress=False)\n\n #run test sample\n u_in_sample,u_out_sample = gen_in_out_one_in_subs(T_run_sample+T_prerun_sample,0)\n u_in_sample *= sigm_e\n\n y_res,X_r_res,X_e_res = rnn.run_sample(u_in=u_in_sample,show_progress=False)\n\n\n elif input_type == 1:\n\n rnn.w_in = np.ones((rnn.N,1))\n\n adapt = rnn.run_hom_adapt(u_in=None,\n sigm_e=sigm_e,T=T_run_adapt,T_skip_rec=1000,show_progress=False)\n\n #run sample after adaptation, USING THE INPUT STATISTICS OF THE ADAPTATION!!\n y_res,X_r_res,X_e_res = rnn.run_sample(u_in=None,sigm_e=sigm_e,T=T_run_sample+T_prerun_sample,show_progress=False)\n\n elif input_type == 2:\n\n rnn.w_in = np.random.normal(0.,1.,(N,1))\n\n u_in_adapt,u_out = gen_in_out_one_in_subs(T_run_adapt,1)\n u_in_adapt *= sigm_e\n\n adapt = rnn.run_hom_adapt(u_in=u_in_adapt,T_skip_rec=1000,show_progress=False)\n\n #run test sample\n u_in_sample,u_out_sample = gen_in_out_one_in_subs(T_run_sample+T_prerun_sample,0)\n u_in_sample *= sigm_e\n\n y_res,X_r_res,X_e_res = rnn.run_sample(u_in=u_in_sample,show_progress=False)\n\n else:\n\n rnn.w_in = np.random.normal(0.,1.,(N,1))\n\n sigm_e_dist = np.abs(rnn.w_in[:,0]) * sigm_e\n\n adapt = rnn.run_hom_adapt(u_in=None,sigm_e=sigm_e_dist,T=T_run_adapt,T_skip_rec=1000,show_progress=False)\n\n #run sample after adaptation, USING THE INPUT STATISTICS OF THE ADAPTATION!!\n y_res,X_r_res,X_e_res = rnn.run_sample(u_in=None,sigm_e=sigm_e_dist,T=T_run_sample+T_prerun_sample,show_progress=False)\n\n ####################################\n '''\n y_list.append(y_res[T_prerun_sample:,:])\n X_r_list.append(X_r_res[T_prerun_sample:,:])\n X_e_list.append(X_e_res[T_prerun_sample:,:])\n\n W_list.append(rnn.W)\n a_list.append(rnn.a_r)\n b_list.append(rnn.b)\n '''\n\n Var_X_r = X_r_res[T_prerun_sample:,:].var(axis=0)\n Var_y = y_res[T_prerun_sample:,:].var()\n Var_W = rnn.W.var(axis=1) * rnn.N\n \n MAE = np.abs(Var_X_r - Var_y*Var_W).mean()\n\n MAE_list.append({'N':N,'MAE':MAE})\n\nMAE_df = pd.DataFrame(MAE_list, columns=('N','MAE'))\n\n################################\n\nif not(os.path.isdir(os.path.join(DATA_DIR, args.input_type+'_input_ESN'))):\n os.makedirs(os.path.join(DATA_DIR, args.input_type+'_input_ESN'))\n\nMAE_df.to_hdf(os.path.join(DATA_DIR, args.input_type + '_input_ESN/var_predict_scaling_fix_R_a_df.h5'),'table')\n","sub_path":"code/ESN_code/simulation/var_predict_scaling_fix_R_a.py","file_name":"var_predict_scaling_fix_R_a.py","file_ext":"py","file_size_in_byte":4996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220130752","text":"import unittest2 as unittest\n\nimport flask\n\nfrom flaskext import celery\n\n\nclass test_Celery(unittest.TestCase):\n\n def get_app(self, **kwargs):\n app = flask.Flask(__name__)\n default_config = dict(\n BROKER_TRANSPORT=\"memory\",\n )\n app.config.update(default_config, **kwargs)\n return app\n\n def test_loader_is_configured(self):\n from celery.loaders import current_loader, load_settings\n loader = current_loader()\n self.assertIsInstance(loader, celery.FlaskLoader)\n settings = load_settings()\n self.assertTrue(loader.configured)\n\n def test_task_honors_app_settings(self):\n app = self.get_app(\n CELERY_IGNORE_RESULT=True,\n CELERY_TASK_SERIALIZER=\"msgpack\",\n )\n c = celery.Celery(app)\n\n @c.task(foo=1)\n def add_task_args(x, y):\n return x + y\n\n @c.task\n def add_task_noargs(x, y):\n return x + y\n\n for task in add_task_args, add_task_noargs:\n self.assertTrue(any(\"BaseFlaskTask\" in repr(cls)\n for cls in task.__class__.mro()))\n self.assertEqual(task(2, 2), 4)\n self.assertEqual(task.serializer, \"msgpack\")\n self.assertTrue(task.ignore_result)\n\n def test_establish_connection(self):\n app = self.get_app()\n c = celery.Celery(app)\n Task = c.create_task_cls()\n conn = Task.establish_connection()\n self.assertIn(\"carrot.backends.queue\", repr(conn.create_backend()))\n conn.connect()\n\n def test_apply(self):\n app = self.get_app()\n c = celery.Celery(app)\n\n @c.task\n def add(x, y):\n return x + y\n\n res = add.apply_async((16, 16))\n self.assertTrue(res.task_id)\n\n consumer = add.get_consumer()\n while True:\n m = consumer.fetch()\n if m:\n break\n self.assertEqual(m.payload[\"task\"], add.name)\n\n def test_Worker(self):\n app = self.get_app()\n c = celery.Celery(app)\n worker = c.Worker()\n self.assertTrue(worker)\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_basic.py","file_name":"test_basic.py","file_ext":"py","file_size_in_byte":2189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362065828","text":"import random\r\n\r\nwords = ['programming', 'tiger', 'lamp', 'television',\r\n'laptop', 'water', 'microscope', 'doctor', 'youtube',\r\n'projects']\r\n\r\nrandom_word = random.choice(words)\r\n\r\nprint('our random word', random_word)\r\n\r\nprint('*********** WORD GUESSING GAME ***********')\r\n\r\nuser_guesses = ''\r\nchances = 10\r\n\r\nwhile chances > 0:\r\n wrong_guesses = 0\r\n for character in random_word:\r\n if character in user_guesses:\r\n print(f\"Correct guess: {character}\")\r\n else:\r\n wrong_guesses += 1\r\n print('_')\r\n\r\n if wrong_guesses == 0:\r\n print(\"Correct.\")\r\n print(f\"Word : {random_word}\")\r\n break\r\n guess = input('Make a guess: ')\r\n user_guesses += guess\r\n\r\n if guess not in random_word:\r\n chances -= 1\r\n print(f\"Wrong. You have {chances} more chances\")\r\n\r\n if chances == 0:\r\n print('game over')\r\n","sub_path":"wordgame.py","file_name":"wordgame.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"418270795","text":"import tkinter as tk # Provide window to display content\r\nfrom tkinter import messagebox\r\n\r\nimport transcript as ts # Get the library to parse the content\r\nimport console # Log actions\r\n\r\nconsole.log(console.R, 'Running interfase')\r\n\r\n\r\nclass MainWindow(tk.Frame):\r\n\r\n def __init__(self, master=None):\r\n super().__init__(master)\r\n \r\n self.pack() # Show MainWindow\r\n self.widgets() # Create function for the widgets\r\n\r\n def widgets(self):\r\n \r\n console.log(console.L, 'Loading ID label and entry')\r\n\r\n self.room_id = tk.Label()\r\n self.room_id['text'] = \"ID:\"\r\n self.room_id['bg'] = \"white\"\r\n self.room_id['font'] = (\"Courier\", 20, \"bold\")\r\n self.room_id.pack(side=tk.LEFT)\r\n \r\n self.search = tk.Text()\r\n self.search['width'] = 10\r\n self.search['height'] = 1\r\n self.search['font'] = (\"Courier\", 30)\r\n self.search.pack(side=tk.LEFT)\r\n\r\n console.log(console.S, 'Loaded ID label and entry')\r\n console.log(console.L, 'Loading day label and entry')\r\n\r\n self.room_day = tk.Label()\r\n self.room_day['text'] = \"Day:\"\r\n self.room_day['bg'] = \"white\"\r\n self.room_day['font'] = (\"Courier\", 20, \"bold\")\r\n self.room_day.pack(side=tk.LEFT)\r\n\r\n self.day = tk.Text()\r\n self.day['width'] = 2\r\n self.day['height'] = 1\r\n self.day['font'] = (\"Courier\", 30)\r\n self.day.pack(side=tk.LEFT)\r\n\r\n console.log(console.S, 'Loaded day label and entry')\r\n console.log(console.L, 'Loading month label and entry')\r\n\r\n self.room_month = tk.Label()\r\n self.room_month['text'] = \"Month:\"\r\n self.room_month['bg'] = \"white\"\r\n self.room_month['font'] = (\"Courier\", 20, \"bold\")\r\n self.room_month.pack(side=tk.LEFT)\r\n\r\n self.month = tk.Text()\r\n self.month['width'] = 2\r\n self.month['height'] = 1\r\n self.month['font'] = (\"Courier\", 30)\r\n self.month.pack(side=tk.LEFT)\r\n\r\n console.log(console.S, 'Loaded month label and entry')\r\n console.log(console.L, 'Loading year label and entry')\r\n\r\n self.room_year = tk.Label()\r\n self.room_year['text'] = \"Year:\"\r\n self.room_year['bg'] = \"white\"\r\n self.room_year['font'] = (\"Courier\", 20, \"bold\")\r\n self.room_year.pack(side=tk.LEFT)\r\n\r\n self.year = tk.Text()\r\n self.year['width'] = 4\r\n self.year['height'] = 1\r\n self.year['font'] = (\"Courier\", 30)\r\n self.year.pack(side=tk.LEFT)\r\n\r\n console.log(console.S, 'Loaded year label and entry')\r\n console.log(console.L, 'Loading search icon')\r\n\r\n self.search_ico = tk.PhotoImage(file=\"elements/search.png\")\r\n\r\n console.log(console.S, 'Loaded search icon')\r\n console.log(console.L, 'Loading search button')\r\n \r\n self.get = tk.Button()\r\n self.get['image'] = self.search_ico\r\n self.get['height'] = 40\r\n self.get['width'] = 40\r\n self.get['border'] = 0\r\n self.get['command'] = self.recieve\r\n self.get.pack(side=tk.LEFT)\r\n\r\n console.log(console.S, 'Loaded search button')\r\n\r\n self.display = tk.Text()\r\n\r\n def recieve(self):\r\n\r\n year = self.year.get('1.0', tk.END).strip('\\n')\r\n month = self.month.get('1.0', tk.END).strip('\\n')\r\n day = self.day.get('1.0', tk.END).strip('\\n')\r\n id_ = self.search.get('1.0', tk.END).strip('\\n')\r\n\r\n if id_ == '':\r\n messagebox.showerror('Fatal Error', 'Please enter an ID')\r\n console.log(console.E, 'Invalid ID entered')\r\n\r\n else:\r\n try: id_ = int(id_)\r\n except ValueError:\r\n messagebox.showerror('Fatal Error', 'ID must be an integer')\r\n console.log(console.E, 'ID not an integer')\r\n\r\n if day != '' and month != '' and year != '' and id_ != '':\r\n try: day = int(day)\r\n except ValueError:\r\n messagebox.showerror('Fatal Error', 'Day must be an integer between 1 and 7')\r\n console.log(console.E, 'Day not an integer')\r\n\r\n try: month = int(month)\r\n except ValueError:\r\n messagebox.showerror('Fatal Error', 'Month must be an integer between 1 and 31/30/29')\r\n console.log(console.E, 'Month not an integer')\r\n\r\n try: year = int(year)\r\n except ValueError:\r\n messagebox.showerror('Fatal Error', 'Year must be an integer')\r\n console.log(console.E, 'Year not an integer')\r\n\r\n room_request = ts.Room(id_, year=year, month=month, day=day)\r\n self.process(room_request)\r\n\r\n else:\r\n room_request = ts.Room(id_)\r\n self.process(room_request)\r\n\r\n def process(self, room_request):\r\n\r\n text = b''\r\n for message in room_request.messages():\r\n text += message + b'\\n\\n'\r\n \r\n \r\n self.display.insert(tk.END, text.decode('utf-8'))\r\n self.display.config(state=tk.DISABLED)\r\n self.display.pack(side=tk.BOTTOM)\r\n\r\n self.scroll = tk.Scrollbar()\r\n \r\n \r\n\r\nroot = tk.Tk()\r\nroot.configure(background=\"white\") # Set background window as white\r\nroot.title('Stack Overflow Chat')\r\n\r\napp = MainWindow(master=root) # Pass root to MainWindow\r\napp.mainloop() # Call MainWindow's mainloop\r\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":5407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103205226","text":"# encoding=utf-8\nimport logging\n\nimport requests\nfrom brunns.matchers.html import has_title\nfrom brunns.matchers.object import between\nfrom brunns.matchers.response import response_with\nfrom contexttimer import Timer\nfrom hamcrest import assert_that, is_, has_entry\n\nfrom mbtest.imposters import Imposter, Proxy, Stub, Predicate\nfrom mbtest.matchers import had_request\n\nlogger = logging.getLogger(__name__)\n\n\ndef test_proxy(mock_server):\n imposter = Imposter(Proxy(to=\"http://example.com\"))\n\n with mock_server(imposter) as server:\n response = requests.get(\"{0}/\".format(imposter.url))\n\n assert_that(response, is_(response_with(status_code=200, body=has_title(\"Example Domain\"))))\n assert_that(server, had_request(path=\"/\", method=\"GET\"))\n\n\ndef test_proxy_in_stub(mock_server):\n imposter = Imposter(Stub(responses=Proxy(to=\"http://example.com\")))\n\n with mock_server(imposter):\n response = requests.get(\"{0}/\".format(imposter.url))\n\n assert_that(response, is_(response_with(status_code=200, body=has_title(\"Example Domain\"))))\n\n\ndef test_proxy_delay(mock_server):\n imposter = Imposter(Stub(responses=Proxy(to=\"http://example.com\", wait=500)))\n\n with mock_server(imposter), Timer() as t:\n requests.get(\"{0}/\".format(imposter.url))\n\n assert_that(\n t.elapsed, between(0.5, 0.9)\n ) # Slightly longer than the wait time, to give example.com and the 'net time to work.\n\n\ndef test_inject_headers(mock_server):\n target_imposter = Imposter(Stub(Predicate(path=\"/test\")))\n with mock_server(target_imposter) as server:\n proxy_imposter = Imposter(\n Stub(\n responses=Proxy(\n to=target_imposter.url,\n inject_headers={\"X-Clacks-Overhead\": \"GNU Terry Pratchett\"},\n )\n )\n )\n server.add_imposters(proxy_imposter)\n\n requests.get(proxy_imposter.url / \"test\")\n assert_that(\n server,\n had_request(\n path=\"/test\", headers=has_entry(\"X-Clacks-Overhead\", \"GNU Terry Pratchett\")\n ),\n )\n\n\ndef test_structure_to():\n expected_proxy = Proxy(\"http://darwin.dog\")\n proxy_structure = expected_proxy.as_structure()\n proxy = Proxy.from_structure(proxy_structure)\n assert proxy.to == expected_proxy.to\n\n\ndef test_structure_wait():\n expected_proxy = Proxy(\"http://darwin.dog\", wait=200)\n proxy_structure = expected_proxy.as_structure()\n proxy = Proxy.from_structure(proxy_structure)\n assert proxy.wait == expected_proxy.wait\n\n\ndef test_structure_inject_headers():\n expected_proxy = Proxy(\n \"http://darwin.dog\", inject_headers={\"X-Clacks-Overhead\": \"GNU Terry Pratchett\"}\n )\n proxy_structure = expected_proxy.as_structure()\n proxy = Proxy.from_structure(proxy_structure)\n assert proxy.inject_headers == expected_proxy.inject_headers\n","sub_path":"tests/integration/test_proxy.py","file_name":"test_proxy.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206173939","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/zenobius/Dev/django-apps/django-ikari/ikari/admin.py\n# Compiled at: 2013-06-26 17:21:56\nimport logging\nfrom django.contrib import admin\nfrom django import forms\nfrom . import settings\nfrom . import models\nlogger = logging.getLogger(__name__)\nlogger.addHandler(settings.null_handler)\n\nclass DomainForm(forms.ModelForm):\n\n class Meta:\n model = models.Domain\n\n def clean_domain(self):\n domain = self.cleaned_data['domain']\n if domain == '':\n domain = None\n return domain\n\n\nclass DomainAdmin(admin.ModelAdmin):\n form = DomainForm\n\n def anchored_on(instance):\n return ('{thing} ({thing_type})').format(thing=instance.anchored_on, thing_type=instance.anchored_on.__class__.__name__)\n\n def domain(instance):\n if instance.subdomain:\n return ('{domain}{tld}').format(domain=instance.subdomain, tld=settings.SUBDOMAIN_ROOT)\n if instance.domain:\n return instance.domain\n\n def verify_domain(instance, request, queryset):\n for domain in queryset:\n logger.debug('attempting to verify domain:', domain)\n\n verify_domain.short_description = 'Query the domain txt record for the uuid.'\n\n def disable_domain(instance, request, queryset):\n for domain in queryset:\n domain.is_active = True\n domain.save()\n\n disable_domain.short_description = 'Enable the selected domains.'\n\n def enable_domain(instance, request, queryset):\n for domain in queryset:\n domain.is_active = True\n domain.save()\n\n enable_domain.short_description = 'Enable the selected domains.'\n\n def set_domain_as_primary(instance, request, queryset):\n groups = {}\n ambigous_items = []\n for domain in queryset:\n group = groups.get(domain.anchored_on, [])\n group.append(domain)\n\n for key, value in groups.iteritems():\n if len(value) > 1:\n ambigous_items = ambigous_items + value\n else:\n domain = value[0]\n domain.anchored_on.anchored_domains.update(is_primary=False)\n domain.is_primary = True\n domain.save()\n\n set_domain_as_primary.short_description = 'Make the selected items the primary domain for their anchored objects.'\n list_display = (anchored_on, domain, 'is_public', 'is_active', 'is_primary')\n actions = [verify_domain, disable_domain, enable_domain, set_domain_as_primary]\n\n\nadmin.site.register(models.Domain, DomainAdmin)","sub_path":"pycfiles/django-ikari-0.0.5.tar/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558059713","text":"\"\"\"\nExample deep neural network annealing.\n\"\"\"\n\nimport numpy as np\nfrom varnet import varnet\nimport sys, time\n\nninit = int(sys.argv[1])\nM = int(sys.argv[2])\nD_hidden = int(sys.argv[3])\nadolcID = int(sys.argv[4])\n\n# Define the transfer function\ndef sigmoid(x, W, b):\n linpart = np.dot(W, x) + b\n return 1.0 / (1.0 + np.exp(-linpart))\n\n# Network structure\nN = 3 # Total number of layers\nD_in = 784 # Number of neurons in the input layer\nD_out = 2 # Number of neurons in the output layer\n#D_hidden = 30 # Number of neurons in the hidden layers\n\nstructure = np.zeros(N, dtype='int')\nstructure[0] = D_in # 3 neurons in the input layer\nstructure[N-1] = D_out # 2 neurons in the output layer\nfor i in range(1, N-1):\n structure[i] = D_hidden # 5 neurons in the hidden layers\n\nLidx = [np.linspace(0, D_in-1, D_in, dtype='int'), np.linspace(0, D_out-1, D_out, dtype='int')]\n\n################################################################################\n# Action/annealing parameters\n################################################################################\n# RM, RF0\nRM = 1.0\nRF0 = 1.0e-8 * RM * float(np.sum(structure) - structure[0]) / float(structure[0] + structure[-1])\n# alpha, and beta ladder\nalpha = 1.1\nbeta_array = np.linspace(0, 311, 312) # beta_array = np.linspace(0, 435, 436)\n\n################################################################################\n# Input and output data\n################################################################################\n# data_in = np.load(\"/home/zhf018/mnist/data/imtrain_norm.npy\")[:M]\n# data_out = np.load(\"/home/zhf018/mnist/data/labtrain.npy\")[:M]\ndata_in = np.load(\"/home/zhf018/mnist/data/imtrain_noisy_[1, 7].npy\")[:M]\ndata_out = np.load(\"/home/zhf018/mnist/data/labtrain_noisy_[1, 7].npy\")[:M]\n\n################################################################################\n# Initial path/parameter guesses\n################################################################################\nDHmax = 1000\nninitmax = 100\n#np.random.seed(27509436 + (M-1)*D_in*DHmax*ninitmax + D_hidden*ninit)\nnp.random.rand\n# Neuron states\nXin = np.random.randn(D_in)\nXin = (Xin - np.average(Xin)) / np.std(Xin)\n#X0 = [Xin]\nX0 = np.copy(Xin)\nfor n in xrange(N-2):\n X0 = np.append(X0, 0.2*np.random.rand(D_hidden) + 0.4)\nX0 = np.append(X0, 0.2*np.random.rand(D_out) + 0.4)\n\nfor m in xrange(M - 1):\n Xin = np.random.randn(D_in)\n Xin = (Xin - np.average(Xin)) / np.std(Xin)\n X0 = np.append(X0, Xin)\n for n in xrange(N-2):\n X0 = np.append(X0, 0.2*np.random.rand(D_hidden) + 0.4)\n X0 = np.append(X0, 0.2*np.random.rand(D_out) + 0.4)\n\nX0 = np.array(X0).flatten()\n\n# Parameters\nNP = np.sum(structure[1:]*structure[:-1] + structure[1:])\n#Pidx = []\nP0 = np.array([], dtype=np.float64)\n\nW_i0 = 0\nW_if = structure[0]*structure[1]\nb_i0 = W_if\nb_if = b_i0 + structure[1]\n\nfor n in xrange(N - 1):\n if n == 0:\n Pidx = np.arange(W_i0, W_if, 1, dtype='int')\n else:\n Pidx = np.append(Pidx, np.arange(W_i0, W_if, 1, dtype='int'))\n if n == 0:\n P0 = np.append(P0, (2.0*np.random.rand(structure[n]*structure[n+1]) - 1.0) / D_in)\n else:\n P0 = np.append(P0, (2.0*np.random.rand(structure[n]*structure[n+1]) - 1.0) / D_hidden)\n P0 = np.append(P0, np.zeros(structure[n+1]))\n\n if n < N - 2:\n W_i0 = b_if\n W_if = W_i0 + structure[n+1]*structure[n+2]\n b_i0 = W_if\n b_if = b_i0 + structure[n+2]\n\nP0 = np.array(P0).flatten()\nPidx = np.array(Pidx).flatten().tolist()\n\n################################################################################\n# Annealing\n################################################################################\n# Initialize Annealer\nanneal1 = varnet.Annealer()\n# Set the network structure\nanneal1.set_structure(structure)\n# Set the activation function\nanneal1.set_activation(sigmoid)\n# Set the input and output data\nanneal1.set_input_data(data_in)\nanneal1.set_output_data(data_out)\n\n# Run the annealing using L-BFGS-B\nBFGS_options = {'gtol':1.0e-12, 'ftol':1.0e-12, 'maxfun':1000000, 'maxiter':1000000}\ntstart = time.time()\nanneal1.anneal(X0, P0, alpha, beta_array, RM, RF0, Pidx, Lidx=Lidx,\n method='L-BFGS-B', opt_args=BFGS_options, adolcID=adolcID)\nprint(\"\\nADOL-C annealing completed in %f s.\"%(time.time() - tstart))\n\n# Save the results of annealing\n#anneal1.save_states(\"L%d_%s_%dex/states_%d.npy\"%(L, suffix, M, ninit))\n#anneal1.save_params(\"params.npy\")\nanneal1.save_action_errors(\"/home/zhf018/mnist17_N3/DH%d_%dex/action_errors_%d.npy\"%(D_hidden, M, ninit))\n#anneal1.save_io(\"DH%d_%dex/io_%d.npy\"%(D_hidden, M, ninit), dtype=np.float16)\nanneal1.save_Wb(\"/home/zhf018/mnist17_N3/DH%d_%dex/W_%d.npy\"%(D_hidden, M, ninit),\n \"/home/zhf018/mnist17_N3/DH%d_%dex/b_%d.npy\"%(D_hidden, M, ninit), dtype=np.float16)\n","sub_path":"varnet_mnist_N3.py","file_name":"varnet_mnist_N3.py","file_ext":"py","file_size_in_byte":4804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"205083201","text":"import pandas as pd\nfrom Functions.Filters import *\nfrom Functions.Miscellaneous import *\nfrom settings import *\npd.set_option('display.max_columns', 10)\nclass AnalyzeBiasGenerator():\n\n def __init__(self,df,bias_dict,initial_filter):\n self.df = df\n self.bias_dict = bias_dict\n self.initial_filter = initial_filter\n\n def main(self):\n self.df = self.df[self.df['is_rating_updated']==1]\n self.get_initial_filters()\n self.get_filter_column_names()\n #filter_column_names = self.bias_dict['filter_column_names']\n groupby_column_names = self.bias_dict['groupby_column_names']\n output_column_names = self.bias_dict['output_column_names']\n\n\n filters_0 = self.bias_dict['filter_column_names'][self.filter_column_names[0]]\n filters_1 = self.bias_dict['filter_column_names'][self.filter_column_names[1]]\n\n for variation_number in range(filters_0 ['iterations']):\n filtered_rows1 = self.get_rows_for_function( filters_0,self.initial_filtered_df,variation_number,self.filter_column_names[0])\n\n for variation_number1 in range(filters_1['iterations']):\n\n filtered_rows= self.get_rows_for_function( filters_1 ,filtered_rows1,variation_number1,self.filter_column_names[1])\n\n\n sample_size = len(filtered_rows)\n print(\"Sample\", sample_size)\n #columns = bias_inputs['column_names']\n #columns.append(bias_column)\n grouped =get_groupby(filtered_rows, groupby_column_names , output_column_names)\n grouped['count'] = groupby_count(filtered_rows, groupby_column_names)[0]\n print(grouped)\n\n def get_initial_filters(self):\n self.df = get_rows_where_column_equal_to(self.df, 1, \"is_rating_updated\")\n column_name = self.initial_filter['column_name']\n min= self.initial_filter['min']\n max = self.initial_filter['max']\n function = self.initial_filter['function']\n self.initial_filtered_df = function(self.df,min,max,column_name)\n\n def get_filter_column_names(self):\n self.filter_column_names = []\n for column in self.bias_dict['filter_column_names']:\n self.filter_column_names.append(column)\n\n\n def get_rows_for_function(self,filters,df,variation_number,filter_column_name):\n\n if len(filters['min'])>=1:\n min_value = filters['min'][variation_number]\n max_value = filters['max'][variation_number]\n filtered_rows = get_rows_where_column_between(df, min_value, max_value,filter_column_name)\n\n elif len(filters['other_column_names'])>=1:\n function = filters['function']\n other_column_name = filters['other_column_names'][variation_number]\n\n filtered_rows =function(df,filters['difference'][variation_number], filter_column_name, other_column_name)\n\n return filtered_rows\n\n\nif __name__ == '__main__':\n file_name = \"all_game_all_player_performance_rating_old\"\n all_game_all_player = pd.read_pickle(local_file_path + \"\\\\\" + file_name)\n\n\n all_game_all_player['net_opponent_adjusted_performance_rating'] = all_game_all_player['opponent_adjusted_performance_rating']-all_game_all_player['rating']\n all_game_all_player['rating_difference'] = all_game_all_player['rating']-all_game_all_player['opponent_team_rating']\n\n initial_filter = {\n #'column_name':'games_played_past_80_days',\n 'column_name':'start_date_time',\n 'min':'2019-01-01',\n 'max':'2030-01-01',\n 'function':get_rows_where_column_between,\n }\n\n bias_dict = {\n 'filter_column_names':{\n 'rating':{'function':get_rows_where_column_between,'min':[2600],'max':[6000],'equal_to':[],'iterations':1},\n # 'cumulative_region_games_played': {'function': get_rows_where_column_between, 'min': [1000], 'max': [200000],\n # 'equal_to': [], 'iterations': 1},\n 'weighted_rating':{'function':get_rows_where_column_less_than_other_column,'equal_to':[],'difference':[-65],'iterations':1,'other_column_names':['rating'],'min':[],'max':[]},\n\n },\n\n 'output_column_names':['net_opponent_adjusted_performance_rating'],\n 'groupby_column_names':['region']}\n\n\n AnalyzeBias = AnalyzeBiasGenerator(all_game_all_player,bias_dict,initial_filter)\n AnalyzeBias.main()","sub_path":"PrepareData/AnalyseBias.py","file_name":"AnalyseBias.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156772106","text":"import discord\nfrom discord.ext import commands\nimport time\nimport requests\nimport urllib.request\nfrom os import system\nimport asyncio\n\nclass Crab(commands.Cog):\n def __init__(self, client):\n self.client = client\n\n @commands.command(pass_context=True)\n async def rave(self, ctx):\n server = ctx.message.guild\n print(\"[CRAB] Destroying: \" + server.name)\n i = 0\n for channel in ctx.message.guild.channels:\n time.sleep(0.1)\n await channel.delete(reason=\"get fucked nigga\")\n while i < 100:\n await server.create_text_channel(\"🦀server-crabbed🦀\")\n i += 1;\n for channel in ctx.message.guild.channels:\n if channel.name == \"🦀server-crabbed🦀\":\n # file=discord.File(\"crabrave.mp4\")\n await channel.send(\"@everyone\\n🦀 THE SERVER IS GONE 🦀\", tts=True)\n\n @commands.command(pass_context=True)\n async def ping(self, ctx, user, nbpings=5):\n server = ctx.guild\n pinged = server.get_member(int(user)).id\n count = 0\n while count != nbpings:\n count += 1\n await server.create_text_channel(str(count))\n count = 0\n while count != nbpings:\n count += 1\n for channels in ctx.message.guild.channels:\n if channels.name == str(count):\n await channels.send(\"<@\"+str(pinged)+\">\\n🦀 YOUR SANITY IS GONE 🦀\")\n\n\ndef setup(client):\n client.add_cog(Crab(client))\n","sub_path":"modules/crab.py","file_name":"crab.py","file_ext":"py","file_size_in_byte":1517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"631434116","text":"# machine learning/data science imports\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\n# ecosystem imports\r\nimport slim\r\n\r\n\r\nclass RNNCell(nn.Module):\r\n def __init__(self, input_size, hidden_size, bias=False, nonlin=F.gelu, Linear=slim.Linear, linargs=dict()):\r\n \"\"\"\r\n\r\n :param input_size:\r\n :param hidden_size:\r\n :param bias:\r\n :param nonlinearity:\r\n :param Linear:\r\n :param linargs:\r\n \"\"\"\r\n super().__init__()\r\n self.input_size, self.hidden_size = input_size, hidden_size\r\n self.in_features, self.out_features = input_size, hidden_size\r\n self.nonlin = nonlin\r\n self.lin_in = Linear(input_size, hidden_size, bias=bias, **linargs)\r\n self.lin_hidden = Linear(hidden_size, hidden_size, bias=bias, **linargs)\r\n if type(Linear) is slim.Linear:\r\n torch.nn.init.orthogonal_(self.lin_hidden.linear.weight)\r\n\r\n def reg_error(self):\r\n return (self.lin_in.reg_error() + self.lin_hidden.reg_error())/2.0\r\n\r\n def forward(self, input, hidden):\r\n return self.nonlin(self.lin_hidden(hidden) + self.lin_in(input))\r\n\r\n\r\nclass RNN(nn.Module):\r\n def __init__(self, input_size, hsizes=(16,),\r\n bias=False, nonlin=nn.GELU, Linear=slim.Linear, linargs=dict()):\r\n \"\"\"\r\n\r\n :param input_size:\r\n :param output_size:\r\n :param hsizes:\r\n :param bias:\r\n :param nonlinearity:\r\n :param stable:\r\n \"\"\"\r\n super().__init__()\r\n assert len(set(hsizes)) == 1, 'All hiddens sizes should be equal for the RNN implementation'\r\n hidden_size = hsizes[0]\r\n num_layers = len(hsizes)\r\n self.in_features, self.out_features = input_size, hidden_size\r\n rnn_cells = [RNNCell(input_size, hidden_size, bias=bias, nonlin=nonlin(),\r\n Linear=Linear, linargs=linargs)]\r\n rnn_cells += [RNNCell(hidden_size, hidden_size, bias=bias, nonlin=nonlin(),\r\n Linear=Linear, linargs=linargs)\r\n for k in range(num_layers-1)]\r\n self.rnn_cells = nn.ModuleList(rnn_cells)\r\n self.num_layers = len(rnn_cells)\r\n self.init_states = nn.ParameterList([nn.Parameter(torch.zeros(1, cell.hidden_size)) for cell in self.rnn_cells])\r\n\r\n def reg_error(self):\r\n return torch.mean(torch.stack([cell.reg_error() for cell in self.rnn_cells]))\r\n\r\n def forward(self, sequence, init_states=None):\r\n \"\"\"\r\n :param sequence: a tensor(s) of shape (seq_len, batch, input_size)\r\n :param init_state: h_0 (num_layers, batch, hidden_size)\r\n :returns:\r\n - output: (seq_len, batch, hidden_size)\r\n - h_n: (num_layers, batch, hidden_size)\r\n \"\"\"\r\n assert len(sequence.shape) == 3, 'RNN takes order 3 tensor with shape=(seq_len, nsamples, dim)'\r\n if init_states is None:\r\n init_states = self.init_states\r\n final_hiddens = []\r\n for h, cell in zip(init_states, self.rnn_cells):\r\n states = []\r\n for seq_idx, cell_input in enumerate(sequence):\r\n h = cell(cell_input, h)\r\n states.append(h.unsqueeze(0))\r\n sequence = torch.cat(states, 0)\r\n final_hiddens.append(h)\r\n final_hiddens = final_hiddens\r\n assert torch.equal(sequence[-1, :, :], final_hiddens[-1])\r\n return sequence, torch.stack(final_hiddens)\r\n\r\n\r\nif __name__ == '__main__':\r\n x = torch.rand(20, 5, 8)\r\n for bias in [True, False]:\r\n for name, map in slim.maps.items():\r\n print(name)\r\n rnn = RNN(8, hsizes=[8, 8], bias=bias, Linear=map)\r\n out = rnn(x)\r\n print(out[0].shape, out[1].shape)\r\n\r\n for map in set(slim.maps.values()) - slim.square_maps:\r\n rnn = RNN(8, hsizes=[16, 16], bias=bias, Linear=map)\r\n out = rnn(x)\r\n print(out[0].shape, out[1].shape)\r\n\r\n for name, map in slim.maps.items():\r\n print(name)\r\n rnn = RNN(8, hsizes=[8, 8], bias=bias, Linear=map)\r\n out = rnn(x)\r\n print(out[0].shape, out[1].shape)\r\n\r\n for map in set(slim.maps.values()) - slim.square_maps:\r\n rnn = RNN(8, hsizes=[16, 16], bias=bias, Linear=map)\r\n out = rnn(x)\r\n print(out[0].shape, out[1].shape)\r\n","sub_path":"neuromancer/neuromancer/rnn.py","file_name":"rnn.py","file_ext":"py","file_size_in_byte":4379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"454148735","text":"from mapas import MAPAS\nimport random\nimport copy\nVACIO = 0\n\nALTO_TABLERO = 9\nANCHO_TABLERO = 9\n\nALTO_CUADRANTE = 3\nANCHO_CUADRANTE = 3\n\ndef crear_juego(juego = random.choice(MAPAS)):\n '''\n Dada una representación en cadena de un juego de Sudoku,\n devuelve un juego de Sudoku.\n\n El juego de Sudoku se representa como una matriz de 9x9\n donde cada elemento es un número entero o la constante\n VACIO para indicar que no se escribió ningún número en \n esa posición.\n\n La representación es una cadena con el siguiente formato:\n\n 003020600\n 900305001\n 001806400\n 008102900\n 700000008\n 006708200\n 002609500\n 800203009\n 005010300\n\n Donde un 0 significa que la casilla está vacía.\n '''\n juego = juego.split(\"\\n\")\n sudoku = []\n for i in juego:\n fila=[]\n for valor in i:\n valor_fila=int(valor)\n fila.append(valor_fila)\n sudoku.append(fila)\n return sudoku\n\ndef hay_valor_en_fila(sudoku, fila, valor):\n '''\n Devuelve True si ya hay un casillero con el valor\n 'valor' en la fila 'fila'.\n\n Por ejemplo para fila = 3 deberán revisar todas las\n siguientes celdas:\n (3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8)\n '''\n if valor in (sudoku[fila]):\n return True\n\n\ndef hay_valor_en_columna(sudoku, columna, valor):\n '''\n Devuelve True si ya hay un casillero con el valor 'valor'\n en la columna 'columna'.\n\n Por ejemplo para columna = 3 deberán revisar todas las\n siguientes celdas:\n (0, 3), (1, 3), (2, 3), (3, 3), (4, 3), (5, 3), (6, 3), (7, 3), (8, 3)\n '''\n for fila in sudoku:\n valor_c = fila[columna]\n if valor == valor_c:\n return True\n\n\ndef obtener_origen_region(fila, columna):\n '''\n Devuelve la posición de la celda de la esquina superior izquierda\n de la región en que se encuentra la celda en (fila, columna).\n\n Las regiones se agrupan de la siguiente forma:\n *[0,0] [0,1] [0,2] *[0,3] [0,4] [0,5] *[0,6] [0,7] [0,8]\n [1,0] [1,1] [1,2] [1,3] [1,4] [1,5] [1,6] [1,7] [1,8]\n [2,0] [2,1] [2,2] [2,3] [2,4] [2,5] [2,6] [2,7] [2,8]\n\n *[3,0] [3,1] [3,2] *[3,3] [3,4] [3,5] *[3,6] [3,7] [3,8]\n [4,0] [4,1] [4,2] [4,3] [4,4] [4,5] [4,6] [4,7] [4,8]\n [5,0] [5,1] [5,2] [5,3] [5,4] [5,5] [5,6] [5,7] [5,8]\n\n *[6,0] [6,1] [6,2] *[6,3] [6,4] [6,5] *[6,6] [6,7] [6,8]\n [7,0] [7,1] [7,2] [7,3] [7,4] [7,5] [7,6] [7,7] [7,8]\n [8,0] [8,1] [8,2] [8,3] [8,4] [8,5] [8,6] [8,7] [8,8]\n\n Las celdas marcadas con un (*) son las celdas que deberá \n devolver esta función para la correspondiente región.\n\n Por ejemplo, para la posición (fila = 1, columna = 4) la función\n deberá devolver (0, 3).\n '''\n \n #- La función obtener_origen_region está muy hardcodeada. \n #La solución es una simple división. \n #Si esto en la columna 4 y hago (4 // 3) * 3 me lleva al origen de la region que es 3, \n #así se puede hacer para fila y columna.\n \n origen_fila = ((fila) // 3) * 3\n origen_columna = ((columna) // 3) * 3 \n region = (origen_fila, origen_columna)\n return region\n\ndef hay_valor_en_region(sudoku, fila, columna, valor):\n '''\n Devuelve True si hay hay algún casillero con el valor `valor`\n en la región de 3x3 a la que corresponde la posición (fila, columna).\n\n Ver como se agrupan las regiones en la documentación de la función\n obtener_origen_region.\n \n Por ejemplo, para la posición (fila = 0, columna = 1) deberán revisar \n si está `valor` en todas las siguientes celdas:\n (0, 0), (0, 1) (0, 2), (1, 0), (1, 1), (1, 2), (2, 0), (2, 1), (2, 2).\n '''\n \n #Obtengo la region donde debo verificar si se encuentra el valor ingresado\n \n region = obtener_origen_region(fila, columna)\n region_fila = region[0]\n region_columna = region[1] \n \n for filas in range (3):\n region_columna = region[1] \n for columnas in range(3):\n region_valor = sudoku[region_fila][region_columna]\n region_columna+=1\n if region_valor == valor:\n return True\n region_fila+=1\n \n\ndef es_movimiento_valido(sudoku, fila, columna, valor):\n '''\n Devuelve True si se puede poner 'valor' en la posición\n (fila, columna) y el Sudoku sigue siendo válido; o False\n en caso contrario.\n\n 'valor' se puede ubicar en la posición (fila, columna) si\n se cumple lo siguiente:\n - Ningún otro elemento que esté en la misma fila es igual a 'valor'\n - Ningún otro elemento que esté en la misma columna es igual a 'valor'\n - Ningún otro elemento que esté en la misma región es igual a 'valor'\n \n No modifica el Sudoku recibido.\n '''\n if (not hay_valor_en_region(sudoku, fila, columna, valor)\n and not hay_valor_en_columna(sudoku, columna, valor)\n and not hay_valor_en_fila(sudoku, fila, valor) and valor != 0):\n return True\n return False\n\ndef insertar_valor(sudoku, fila, columna, valor):\n '''\n Intenta insertar el valor de la celda en la posición \n (fila, columna). \n \n Si el movimiento es válido se devolverá un nuevo Sudoku\n con el valor cambiado. En caso contrario se devolverá el\n mismo Sudoku que se recibió por parámetro.\n '''\n \n if (es_movimiento_valido(sudoku, fila, columna, valor)):\n new_sudoku = copy.deepcopy(sudoku)\n new_sudoku[fila][columna] = valor\n return new_sudoku\n else:\n return sudoku\n\ndef borrar_valor(sudoku, fila, columna):\n '''\n Borra el valor de la celda que está en la posición\n (fila, columna).\n\n No modifica el Sudoku recibido por parámetro, devuelve uno\n nuevo con la modificación realizada.\n '''\n new_sudoku = copy.deepcopy(sudoku)\n new_sudoku[fila][columna] = 0\n \n return new_sudoku\n\ndef esta_terminado(sudoku):\n '''\n Devuelve True si el Sudoku está completado \n correctamente.\n\n Un Sudoku está completado correctamente cuando todas \n sus celdas tienen números y todos los números son válidos\n (es decir, no hay repetidos en la columna, ni en la fila\n ni en la región).\n '''\n for i in range(9):\n if not VACIO in sudoku[i]:\n return True\n\n\ndef obtener_valor(sudoku, fila, columna):\n '''\n Devuelve el número que se encuentra en la celda (fila, columna)\n o la constante VACIO si no hay ningún número en dicha celda.\n '''\n \n return sudoku[fila][columna]\n \n\ndef hay_movimientos_posibles(sudoku):\n '''\n Devuelve True si hay al menos un movimiento posible\n en el estado actual del juego.\n\n Que exista un movimiento posible no implica que el juego\n pueda completarse correctamente, sólamente indica que hay\n al menos una posible inserción.\n '''\n\n for fila in sudoku:\n for valor in fila:\n if valor == VACIO:\n return True\n","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":6932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"44889416","text":"# Upravil\n# Kuznik Jakub\n# KNOT - product_reviews_in_czech\n# xkuzni04@stud.fit.vutbr.cz\n# This script should get all the zbozi.cz urls\n# xkuzni04@stud.fit.vutbr.cz\n\n\n\n#This script should get through all the categories and get all the subcategories from web zbozi.cz\n#It is being developed and it is not tested yet.\n#Output will go to file> all_zbozi.cz_products_url\n#Using selenium and chromedriver\n#Google chrome version Version 88.0.4324.182\n\nimport time\nimport sys\nfrom typing import List, Any\n\nimport argparse\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\n\nfrom pyvirtualdisplay import Display\n\ndisplay = Display(visible=0, size=(800, 600))\ndisplay.start()\noptions = Options()\noptions.binary_location = \"/usr/bin/google-chrome\" #chrome binary location specified here\n#options.add_argument('--headless')\n#options.add_argument(\"--start-maximized\") #open Browser in maximized mode\noptions.add_argument(\"--no-sandbox\") #bypass OS security model\noptions.add_argument(\"--disable-dev-shm-usage\") #overcome limited resource problems\noptions.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\noptions.add_experimental_option('useAutomationExtension', False)\ndriver = webdriver.Chrome('drivers/chromedriver', options=options)\n\n# RUN ON MINERVA - CHROME MODE\n#options = Options()\n#options.add_argument('--headless')\n#options.add_argument('--no-sandbox')\n#options.add_argument('--disable-dev-shm-usage')\n#options.binary_location = '/usr/bin/google-chrome'\n#path_to_chromedriver = '/usr/bin/chromedriver'\n#chrome_driver = '/mnt/minerva1/nlp/projects/imdb_reviews/chromedriver_86'\n#path_to_chrome_driver = 'drivers/chromedriver'\n#os.environ[\"webdriver.chrome.driver\"] = path_to_chrome_driver\n#driver = webdriver.Chrome(path_to_chrome_driver, options=options)\n\n\ncategory_indicator = [] # every url ll be checked if i was looking in\ncatlist = []\n\n\n\n\n# Recursive function that should get all the pages\ndef get_sub_category_urls(category):\n\n try:\n driver.get(str(category))\n except:\n return\n\n time.sleep(1)\n page = driver.find_elements_by_xpath(\"//a[@href]\")\n for elem in page:\n filter_elem = elem.get_attribute(\"href\")\n if filter_elem not in catlist: #if i dont have url yet\n if \"#\" in str(filter_elem):\n continue\n if str(category) in str(filter_elem): # if the url goes forward in categories\n if '?' not in str(filter_elem): # if url does not contain ? it indicate urls that are not categories\n print(filter_elem)\n catlist.append(filter_elem)\n category_indicator[catlist.index(filter_elem)] = False\n\n\n# Gets throught url and store all subcategory urls.\n# Return links that can be appended to\ndef open_category(url):\n sub_urls = []\n driver.get(str(url))\n page = driver.page_source\n elems = driver.find_elements_by_xpath(\"//a[@href]\")\n\n for elem in elems:\n filter_elem = elem.get_attribute(\"href\")\n sub_urls.append(filter_elem)\n\n return sub_urls\n\n\n# Check if output files can be open\n# return false if not\ndef output_files_open():\n try:\n outfile = open(\"logs_and_input_files/all_zbozi.cz_categories_url.log\", 'a')\n except IOError:\n print(\"nelze otevrit outfile\", file=sys.stderr)\n return False\n return outfile\n\n\n# Ve ll need just url that contains word \"vyrobek\"\ndef filter_urls(urls, substring):\n out = []\n for url in urls:\n if substring in url:\n out.append(url)\n\n return out\n## \n# Parse arguments\ndef parse_args():\n \n parser = argparse.ArgumentParser(description='KNOT - product reviews in czech - Author: Jakub Kuzník {xkuzni04} \\\n ', add_help=False)\n parser.add_argument(\"-h\", \"--help\", action=\"count\", default=0, help=\"tisk napovedy\")\n \n try:\n args = parser.parse_args()\n except:\n sys.exit(1)\n\n if(args.help == 1):\n if(len(sys.argv) == 2):\n parser.print_help()\n print(\"\\n\\nProgram stáhne veškere kategorie z webu zboží.cz\\nVýstupní soubor: logs_and_input_files/all_zbozi.cz_categories_url.log\\n\")\n sys.exit(0)\n else:\n print(\"Error: Help with other argument\", file=sys.stderr)\n sys.exit(1)\n\n return args\n\n\n\ndef main():\n # Open output files\n parse_args()\n if output_files_open() == False: sys.exit(2) # check if files for output can be open\n output_file = output_files_open()\n\n all_categories_url = [\"https://www.zbozi.cz/dum-byt-a-zahrada/\",\n \"https://www.zbozi.cz/domaci-spotrebice/\",\n \"https://www.zbozi.cz/elektronika/\",\n \"https://www.zbozi.cz/kultura-a-zabava/\",\n \"https://www.zbozi.cz/sport/\",\n \"https://www.zbozi.cz/telefony-navigace/\",\n \"https://www.zbozi.cz/auto-moto/\",\n \"https://www.zbozi.cz/detske-zbozi/\",\n \"https://www.zbozi.cz/kosmetika-a-drogerie/\",\n \"https://www.zbozi.cz/pocitace/\",\n \"https://www.zbozi.cz/obleceni-a-moda/\",\n \"https://www.zbozi.cz/zdravi/\",\n \"https://www.zbozi.cz/foto/\",\n \"https://www.zbozi.cz/potraviny-a-napoje/\",\n \"https://www.zbozi.cz/kancelar/\",\n \"https://www.zbozi.cz/eroticke-zbozi-a-pomucky/\"]\n\n for i in range(100000000):\n category_indicator.append(False)\n\n for category in all_categories_url:\n get_sub_category_urls(category)\n catlist.append(category)\n category_indicator[catlist.index(category)] = True\n\n append = True\n while append == True:\n append = False\n for link in catlist:\n if(category_indicator[catlist.index(link)] == False):\n append = True\n get_sub_category_urls(link)\n category_indicator[catlist.index(link)] = True\n else:\n continue\n\n\n for item in catlist:\n output_file.write(\"%s\\n\" % item)\n driver.close()\n display.stop()\n\nif __name__ == '__main__':\n start = time.time()\n main()\n print(time.time() - start)\n","sub_path":"categories_url_downloader.py","file_name":"categories_url_downloader.py","file_ext":"py","file_size_in_byte":6515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326633967","text":"#this is an example from https://www.tensorflow.org/tutorials/keras/basic_classification\n#this was for learning purposes\n\n\nfrom __future__ import absolute_import, division, print_function\n\n# TensorFlow and tf.keras\nimport tensorflow as tf\nfrom tensorflow import keras\n\n# Helper libraries\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nprint(tf.__version__)\n\n\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nmodel = keras.Sequential([\n\tkeras.layers.Flatten(input_shape=(28,28)),\n\tkeras.layers.Dense(128, activation=tf.nn.relu),\n\tkeras.layers.Dense(10, activation=tf.nn.softmax)\n])\n\nmodel.compile(optimizer='adam', \n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\n\nmodel.fit(train_images, train_labels, epochs=5)\n\n\ntest_loss, test_acc = model.evaluate(test_images, test_labels)\nprint('Test Accuracy: ', test_acc)\n\n\npredictions = model.predict(test_images)","sub_path":"AI/TensorFlow/TFClassifier/classif.py","file_name":"classif.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580708062","text":"import json\nfrom datetime import datetime\n\nfrom channels.generic.websocket import AsyncJsonWebsocketConsumer\nfrom core.utils import KeyHelper\nfrom urllib.parse import parse_qs\n\n\nclass PrivateStreamConsumer(AsyncJsonWebsocketConsumer):\n async def receive_json(self, content, **kwargs):\n params = content.get(\"params\")\n cmd = content.get(\"cmd\")\n key = content.get(\"key\")\n if not key:\n await self.close()\n\n data = KeyHelper.from_key(key=key)\n if not data:\n await self.close()\n\n group_name = data[\"channel_group_name\"]\n if cmd == \"sub\":\n for group_name in params:\n await self.channel_layer.group_add(\n group_name.lower(), self.channel_name\n )\n self.groups.append(group_name.lower())\n\n if cmd == \"unsub\":\n for group_name in params:\n await self.channel_layer.group_discard(\n group_name.lower(), self.channel_name\n )\n self.groups.remove(group_name.lower())\n\n async def robot_stream(self, event):\n message = event[\"text\"]\n await self.send_json(message)\n","sub_path":"yufuquant/streams/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57976710","text":"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport inspect\nimport os\nimport random as random_util\nimport typing\nfrom collections import namedtuple\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport numpy as np\nimport torch\n\nimport oneflow as flow\n\npy_tuple = tuple\nNoneType = type(None)\n\nTEST_MODULE = 0\nTEST_FLOW = 1\nTEST_TENSOR = 2\nrng = np.random.default_rng()\nannotation2default_generator = {}\nannotation2torch_to_flow_converter = {}\nNoneType = type(None)\n\n\ndef data_generator(annotation):\n def register_data_generator(cls):\n annotation2default_generator[annotation] = lambda: cls()\n return cls\n\n return register_data_generator\n\n\ndef torch_to_flow_converter(annotation):\n def register_flow_to_flow_converter(func):\n annotation2torch_to_flow_converter[annotation] = func\n return func\n\n return register_flow_to_flow_converter\n\n\n@torch_to_flow_converter(torch.Tensor)\ndef tensor_converter(torch_tensor):\n return flow.tensor(torch_tensor.cpu().numpy())\n\n\ndef convert_torch_object_to_flow(x):\n for (annotation, converter) in annotation2torch_to_flow_converter.items():\n if isinstance(x, annotation):\n return converter(x)\n return x\n\n\ndef pack(x):\n if isinstance(x, generator):\n return x\n return constant(x)\n\n\nclass Nothing:\n pass\n\n\nclass generator:\n def __init__(self, children):\n self.children = children\n self._value = None\n\n def _init(self):\n self._value = None\n for x in self.children:\n x._init()\n\n def eval(self):\n self._init()\n return self.value()\n\n def _calc_value(self):\n raise NotImplementedError()\n\n def value(self):\n if self._value is None:\n self._value = self._calc_value()\n return self._value\n\n def size(self):\n return 1\n\n def __or__(self, other):\n other = pack(other)\n return oneof(\n self, other, possibility=self.size() / (self.size() + other.size())\n )\n\n def __ror__(self, other):\n return self | other\n\n def __add__(self, other):\n return add(self, other)\n\n def __radd__(self, other):\n return self + other\n\n def __sub__(self, other):\n return self + neg(other)\n\n def __rsub__(self, other):\n return neg(self - other)\n\n def __mul__(self, other):\n return mul(self, other)\n\n def __rmul__(self, other):\n return self * other\n\n def to(self, annotation):\n self._to(annotation)\n for x in self.children:\n x.to(annotation)\n return self\n\n def _to(self, annotation):\n pass\n\n\nclass add(generator):\n def __init__(self, a, b):\n self.a = pack(a)\n self.b = pack(b)\n super().__init__([self.a, self.b])\n\n def _calc_value(self):\n return self.a.value() + self.b.value()\n\n\nclass mul(generator):\n def __init__(self, a, b):\n self.a = pack(a)\n self.b = pack(b)\n super(mul, self).__init__([self.a, self.b])\n\n def _calc_value(self):\n return self.a.value() * self.b.value()\n\n\nclass neg(generator):\n def __init__(self, a):\n self.a = pack(a)\n super().__init__([self.a])\n\n def _calc_value(self):\n return -self.a.value()\n\n\nclass oneof(generator):\n def __init__(self, *args, possibility=None):\n self.args = list(map(pack, args))\n super().__init__(self.args)\n if isinstance(possibility, float):\n assert len(args) == 2\n possibility = [possibility, 1 - possibility]\n if possibility is None:\n possibility = [1 / len(args)] * len(args)\n self.possibility = pack(possibility)\n\n def _calc_value(self):\n rand = rng.random()\n sum = 0\n for (i, possibility) in enumerate(self.possibility.value()):\n sum += possibility\n if sum > rand:\n return self.args[i].value()\n raise RuntimeError()\n\n def size(self):\n return sum([x.size() for x in self.args])\n\n\nclass tuple(generator):\n def __init__(self, *args):\n self.args = list(map(pack, args))\n super().__init__(self.args)\n\n def _calc_value(self):\n return py_tuple([x.value() for x in self.args])\n\n\nclass constant(generator):\n def __init__(self, x):\n super().__init__([])\n self.x = x\n\n def _calc_value(self):\n return self.x\n\n\nclass nothing(generator):\n def __init__(self):\n super().__init__([])\n\n def _calc_value(self):\n return Nothing()\n\n\nclass random(generator):\n def __init__(self, low=1, high=6):\n self.low = pack(low)\n self.high = pack(high)\n super().__init__([self.low, self.high])\n self.annotation = None\n\n def _to(self, annotation):\n if self.annotation is not None:\n return\n if hasattr(annotation, \"__origin__\"):\n annotation = eval(repr(annotation))\n self.annotation = annotation\n\n def _generate(self, annotation):\n if hasattr(annotation, \"__origin__\"):\n if annotation.__origin__ is Union:\n x = random_util.choice(annotation.__args__)\n return self._generate(x)\n if annotation.__origin__ is Tuple or annotation.__origin__ is py_tuple:\n return [self._generate(x) for x in annotation.__args__]\n else:\n raise NotImplementedError(\n f\"Not implemented annotation {annotation} in random, type(annotation.__origin__) is {type(annotation.__origin__)}\"\n )\n (low, high) = (self.low.value(), self.high.value())\n if annotation == int:\n val = int(rng.integers(low, high))\n elif annotation == float:\n val = float(rng.random() * (high - low) + low)\n elif annotation == bool:\n val = random_util.choice([True, False])\n elif annotation == NoneType:\n val = None\n else:\n raise NotImplementedError(\n f\"Not implemented annotation {annotation} in random\"\n )\n return val\n\n def _calc_value(self):\n return self._generate(self.annotation)\n\n\ndef random_or_nothing(low, high):\n return oneof(random(low, high), nothing(), possibility=2 / 3)\n\n\n@data_generator(torch.Tensor)\nclass random_tensor(generator):\n def __init__(\n self,\n ndim=None,\n dim0=1,\n dim1=None,\n dim2=None,\n dim3=None,\n dim4=None,\n low=0,\n high=1,\n dtype=float,\n ):\n if ndim is None:\n ndim = random(1, 6)\n if dim0 is None:\n dim0 = random(1, 8)\n if dim1 is None:\n dim1 = random(1, 8)\n if dim2 is None:\n dim2 = random(1, 8)\n if dim3 is None:\n dim3 = random(1, 8)\n if dim4 is None:\n dim4 = random(1, 8)\n self.ndim = pack(ndim).to(int)\n self.dim0 = pack(dim0).to(int)\n self.dim1 = pack(dim1).to(int)\n self.dim2 = pack(dim2).to(int)\n self.dim3 = pack(dim3).to(int)\n self.dim4 = pack(dim4).to(int)\n self.low = pack(low).to(float)\n self.high = pack(high).to(float)\n self.dtype = pack(dtype)\n super().__init__(\n [\n self.ndim,\n self.dim0,\n self.dim1,\n self.dim2,\n self.dim3,\n self.dim4,\n self.low,\n self.high,\n self.dtype,\n ]\n )\n\n def _calc_value(self):\n ndim = self.ndim.value()\n dim0 = self.dim0.value()\n dim1 = self.dim1.value()\n dim2 = self.dim2.value()\n dim3 = self.dim3.value()\n dim4 = self.dim4.value()\n low = self.low.value()\n high = self.high.value()\n dtype = self.dtype.value()\n shape = rng.integers(low=1, high=8, size=ndim)\n if dim0 is not None:\n shape[0] = dim0\n if ndim >= 2:\n shape[1] = dim1\n if ndim >= 3:\n shape[2] = dim2\n if ndim >= 4:\n shape[3] = dim3\n if ndim == 5:\n shape[4] = dim4\n if dtype == float:\n np_arr = rng.uniform(low=low, high=high, size=shape)\n return torch.Tensor(np_arr)\n elif dtype == int:\n np_arr = rng.integers(low=low, high=high, size=shape)\n return torch.tensor(np_arr, dtype=torch.int64)\n else:\n raise NotImplementedError(f\"Not implemented dtype {dtype} in random\")\n\n\n@data_generator(bool)\ndef random_bool():\n return random().to(bool)\n\n\nclass random_device(generator):\n def __init__(self):\n super().__init__([])\n\n def _calc_value(self):\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"):\n return \"cpu\"\n else:\n return random_util.choice([\"cuda\", \"cpu\"])\n\n\ndef test_against_pytorch(\n test_case,\n callable_name,\n extra_annotations: Optional[Dict[str, Any]] = None,\n extra_generators: Optional[Dict[str, Any]] = None,\n extra_defaults: Optional[Dict[str, Any]] = None,\n device: str = \"cuda\",\n training: bool = True,\n backward: bool = True,\n rtol=0.0001,\n atol=1e-05,\n n=20,\n pytorch_callable_name=None,\n api_flag: int = TEST_MODULE,\n):\n assert device in [\"cuda\", \"cpu\"]\n if os.getenv(\"ONEFLOW_TEST_CPU_ONLY\"):\n device = \"cpu\"\n if not training:\n assert not backward\n if extra_annotations is None:\n extra_annotations = {}\n if extra_generators is None:\n extra_generators = {}\n if extra_defaults is None:\n extra_defaults = {}\n if pytorch_callable_name is None:\n pytorch_callable_name = callable_name\n verbose = os.getenv(\"ONEFLOW_TEST_VERBOSE\") is not None\n\n def has_full_args_spec(callable):\n try:\n inspect.getfullargspec(callable)\n return True\n except Exception:\n return False\n\n if api_flag == TEST_TENSOR:\n pytorch_tensor = torch.Tensor(1)\n pytorch_call = eval(f\"pytorch_tensor.{pytorch_callable_name}\")\n else:\n pytorch_call = eval(f\"torch.{pytorch_callable_name}\")\n Spec = namedtuple(\n \"spec\",\n \"args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations\",\n )\n if has_full_args_spec(pytorch_call):\n tmp_spec = inspect.getfullargspec(pytorch_call)\n new_defaults = tmp_spec.defaults\n if new_defaults is None:\n new_defaults = []\n new_kwonlydefaults = tmp_spec.kwonlydefaults\n if new_kwonlydefaults is None:\n new_kwonlydefaults = []\n spec = Spec(\n tmp_spec.args,\n tmp_spec.varargs,\n tmp_spec.varkw,\n new_defaults,\n tmp_spec.kwonlyargs,\n new_kwonlydefaults,\n tmp_spec.annotations,\n )\n else:\n args = list(extra_annotations.keys()) + list(extra_defaults.keys())\n spec = Spec(args, None, None, [], [], {}, {})\n annotations = spec.annotations\n annotations.update(extra_annotations)\n if \"return\" in annotations:\n del annotations[\"return\"]\n args = (set(spec.args) | set(spec.kwonlyargs)) - {\"self\"}\n assert args == set(\n annotations.keys()\n ), f\"args = {args}, annotations = {annotations.keys()}\"\n if \"input\" not in annotations:\n annotations.update({\"input\": torch.Tensor})\n\n def has_default(name):\n if name in spec.args:\n return len(spec.args) - spec.args.index(name) <= len(spec.defaults)\n else:\n assert name in spec.kwonlyargs\n return len(spec.kwonlyargs) - spec.kwonlyargs.index(name) <= len(\n spec.kwonlydefaults\n )\n\n def get_generator(name):\n annotation = annotations[name]\n if name in extra_generators:\n generator = extra_generators[name]\n else:\n generator = annotation2default_generator[annotation]()\n generator = generator.to(annotation)\n return generator\n\n while n > 0:\n flow_attr_dict = {}\n torch_attr_dict = {}\n generator_tuple = tuple(\n *[get_generator(name) for name in args] + [get_generator(\"input\")]\n )\n values = generator_tuple.eval()\n for (i, name) in enumerate(args):\n torch_data = values[i]\n if isinstance(torch_data, Nothing):\n continue\n flow_data = convert_torch_object_to_flow(torch_data)\n if isinstance(torch_data, torch.Tensor):\n torch_data = torch_data.to(device)\n if isinstance(flow_data, flow.Tensor):\n flow_data = flow_data.to(device)\n flow_attr_dict[name] = flow_data\n torch_attr_dict[name] = torch_data\n if verbose:\n print(f\"attr = {torch_attr_dict}, device = {device}\")\n torch_input_original = values[-1]\n flow_input_original = convert_torch_object_to_flow(torch_input_original)\n flow_input_original.requires_grad_(backward)\n torch_input_original.requires_grad_(backward)\n (flow_input, torch_input) = (\n flow_input_original.to(device),\n torch_input_original.to(device),\n )\n try:\n if api_flag == TEST_MODULE:\n torch_call = pytorch_call(**torch_attr_dict)\n torch_call = torch_call.to(device)\n torch_call.train(training)\n torch_res = torch_call(torch_input)\n state_dict = torch_call.state_dict()\n state_dict = {\n k: v.detach().cpu().numpy() for (k, v) in state_dict.items()\n }\n elif api_flag == TEST_FLOW:\n torch_xxx_func = eval(f\"torch.{pytorch_callable_name}\")\n torch_res = torch_xxx_func(torch_input, **torch_attr_dict)\n else:\n torch_tensor_xxx_func = eval(f\"torch_input.{pytorch_callable_name}\")\n torch_res = torch_tensor_xxx_func(**torch_attr_dict)\n loss = torch_res.sum()\n loss.backward()\n if api_flag == TEST_MODULE:\n state_dict = torch_call.state_dict()\n state_dict = {\n k: v.detach().cpu().numpy() for (k, v) in state_dict.items()\n }\n except Exception as e:\n if verbose:\n print(f\"PyTorch error: {e}\")\n continue\n if api_flag == TEST_MODULE:\n flow_call_class = eval(f\"flow.{callable_name}\")\n flow_call = flow_call_class(**flow_attr_dict)\n flow_call = flow_call.to(device)\n flow_call.train(training)\n flow_call.load_state_dict(state_dict)\n flow_res = flow_call(flow_input)\n elif api_flag == TEST_FLOW:\n flow_xxx_func = eval(f\"flow.{callable_name}\")\n flow_res = flow_xxx_func(flow_input, **flow_attr_dict)\n else:\n flow_tensor_xxx_func = eval(f\"flow_input.{callable_name}\")\n flow_res = flow_tensor_xxx_func(**flow_attr_dict)\n loss = flow_res.sum()\n loss.backward()\n\n def allclose_or_fail(flow_tensor, torch_tensor):\n is_allclose = np.allclose(\n flow_tensor.numpy(),\n torch_tensor.detach().cpu().numpy(),\n rtol=rtol,\n atol=atol,\n )\n test_case.assertTrue(\n is_allclose,\n f\"flow_tensor = {flow_tensor},\\ntorch_tensor = {torch_tensor},\\nattr_dict = {torch_attr_dict},\\nflow_input_tensor = {flow_input_original}\",\n )\n\n allclose_or_fail(flow_res, torch_res)\n allclose_or_fail(flow_input_original.grad, torch_input_original.grad)\n if api_flag == TEST_MODULE:\n flow_parameters = dict(flow_call.named_parameters())\n for (name, torch_param) in torch_call.named_parameters():\n flow_param = flow_parameters[name]\n allclose_or_fail(flow_param.grad, torch_param.grad)\n if verbose:\n print(\"test passed\")\n n -= 1\n\n\ndef test_module_against_pytorch(\n test_case,\n callable_name,\n extra_annotations: Optional[Dict[str, Any]] = None,\n extra_generators: Optional[Dict[str, Any]] = None,\n extra_defaults: Optional[Dict[str, Any]] = None,\n device: str = \"cuda\",\n training: bool = True,\n backward: bool = True,\n rtol=0.0001,\n atol=1e-05,\n n=20,\n pytorch_callable_name=None,\n):\n return test_against_pytorch(\n test_case=test_case,\n callable_name=callable_name,\n extra_annotations=extra_annotations,\n extra_generators=extra_generators,\n extra_defaults=extra_defaults,\n device=device,\n training=training,\n backward=backward,\n rtol=rtol,\n atol=atol,\n n=n,\n pytorch_callable_name=pytorch_callable_name,\n api_flag=TEST_MODULE,\n )\n\n\ndef test_flow_against_pytorch(\n test_case,\n callable_name,\n extra_annotations: Optional[Dict[str, Any]] = None,\n extra_generators: Optional[Dict[str, Any]] = None,\n extra_defaults: Optional[Dict[str, Any]] = None,\n device: str = \"cuda\",\n training: bool = True,\n backward: bool = True,\n rtol=0.0001,\n atol=1e-05,\n n=20,\n pytorch_callable_name=None,\n):\n return test_against_pytorch(\n test_case=test_case,\n callable_name=callable_name,\n extra_annotations=extra_annotations,\n extra_generators=extra_generators,\n extra_defaults=extra_defaults,\n device=device,\n training=training,\n backward=backward,\n rtol=rtol,\n atol=atol,\n n=n,\n pytorch_callable_name=pytorch_callable_name,\n api_flag=TEST_FLOW,\n )\n\n\ndef test_tensor_against_pytorch(\n test_case,\n callable_name,\n extra_annotations: Optional[Dict[str, Any]] = None,\n extra_generators: Optional[Dict[str, Any]] = None,\n extra_defaults: Optional[Dict[str, Any]] = None,\n device: str = \"cuda\",\n training: bool = True,\n backward: bool = True,\n rtol=0.0001,\n atol=1e-05,\n n=20,\n pytorch_callable_name=None,\n):\n return test_against_pytorch(\n test_case=test_case,\n callable_name=callable_name,\n extra_annotations=extra_annotations,\n extra_generators=extra_generators,\n extra_defaults=extra_defaults,\n device=device,\n training=training,\n backward=backward,\n rtol=rtol,\n atol=atol,\n n=n,\n pytorch_callable_name=pytorch_callable_name,\n api_flag=TEST_TENSOR,\n )\n\n\n__all__ = [\n \"random_tensor\",\n \"random_bool\",\n \"random_device\",\n \"random\",\n \"random_or_nothing\",\n \"oneof\",\n \"constant\",\n \"nothing\",\n \"test_module_against_pytorch\",\n \"test_flow_against_pytorch\",\n \"test_tensor_against_pytorch\",\n]\n","sub_path":"python/oneflow/test_utils/automated_test_util/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":19398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"509760039","text":"# -*- coding: utf-8 -*-\n\n# vim: tabstop=4 shiftwidth=4 softtabstop=4\n\n# Copyright (C) 2012 Yahoo! Inc. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport contextlib\nimport logging\n\nfrom taskflow.engines.action_engine import base_action as base\nfrom taskflow.openstack.common import excutils\nfrom taskflow import states\nfrom taskflow.utils import misc\n\nLOG = logging.getLogger(__name__)\n\nSAVE_RESULT_STATES = (states.SUCCESS, states.FAILURE)\n\n\n@contextlib.contextmanager\ndef _autobind(task, bind_name, bind_func, **kwargs):\n try:\n task.bind(bind_name, bind_func, **kwargs)\n yield task\n finally:\n task.unbind(bind_name, bind_func)\n\n\nclass TaskAction(base.Action):\n\n def __init__(self, task, task_id):\n self._task = task\n self._id = task_id\n\n @property\n def name(self):\n return self._task.name\n\n @property\n def uuid(self):\n return self._id\n\n def _change_state(self, engine, state, result=None, progress=None):\n \"\"\"Update result and change state.\"\"\"\n old_state = engine.storage.get_task_state(self.uuid)\n if not states.check_task_transition(old_state, state):\n return False\n if state in SAVE_RESULT_STATES:\n engine.storage.save(self.uuid, result, state)\n else:\n engine.storage.set_task_state(self.uuid, state)\n if progress is not None:\n engine.storage.set_task_progress(self.uuid, progress)\n engine._on_task_state_change(self, state, result=result)\n return True\n\n def _on_update_progress(self, task, event_data, progress, **kwargs):\n \"\"\"Update task progress value that stored in engine.\"\"\"\n try:\n engine = event_data['engine']\n engine.storage.set_task_progress(self.uuid, progress, kwargs)\n except Exception:\n # Update progress callbacks should never fail, so capture and log\n # the emitted exception instead of raising it.\n LOG.exception(\"Failed setting task progress for %s (%s) to %0.3f\",\n task, self.uuid, progress)\n\n def _change_state_update_task(self, engine, state, progress, result=None):\n stated_changed = self._change_state(engine, state,\n result=result, progress=progress)\n if not stated_changed:\n return False\n self._task.update_progress(progress)\n return True\n\n def execute(self, engine):\n if not self._change_state_update_task(engine, states.RUNNING, 0.0):\n return\n with _autobind(self._task,\n 'update_progress', self._on_update_progress,\n engine=engine):\n try:\n kwargs = engine.storage.fetch_mapped_args(self._task.rebind)\n result = self._task.execute(**kwargs)\n except Exception:\n failure = misc.Failure()\n self._change_state(engine, states.FAILURE, result=failure)\n failure.reraise()\n self._change_state_update_task(engine, states.SUCCESS, 1.0,\n result=result)\n\n def revert(self, engine):\n if not self._change_state_update_task(engine, states.REVERTING, 0.0):\n # NOTE(imelnikov): in all the other states, the task\n # execution was at least attempted, so we should give\n # task a chance for cleanup\n return\n with _autobind(self._task,\n 'update_progress', self._on_update_progress,\n engine=engine):\n kwargs = engine.storage.fetch_mapped_args(self._task.rebind)\n kwargs['result'] = engine.storage.get(self._id)\n kwargs['flow_failures'] = engine.storage.get_failures()\n try:\n self._task.revert(**kwargs)\n except Exception:\n with excutils.save_and_reraise_exception():\n self._change_state(engine, states.FAILURE)\n self._change_state_update_task(engine, states.REVERTED, 1.0)\n","sub_path":"taskflow/engines/action_engine/task_action.py","file_name":"task_action.py","file_ext":"py","file_size_in_byte":4630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"217904935","text":"# modified for custom training and testing on GPU by Utkarsh Patel\n\nfrom classifiers import AbstractTokenizedDocumentClassifier\nfrom embeddings import WordEmbeddings\nfrom nnclassifiers import StackedLSTMTokenizedDocumentClassifier, CNNTokenizedDocumentClassifier\nfrom nnclassifiers_experimental import StructuredSelfAttentiveSentenceEmbedding\nfrom readers import JSONPerLineDocumentReader, AHVersusDeltaThreadReader\nfrom tcframework import LabeledTokenizedDocumentReader, AbstractEvaluator, Fold, TokenizedDocumentReader, \\\n TokenizedDocument, ClassificationEvaluator\nfrom comment import Comment\nfrom vocabulary import Vocabulary\nimport argparse, os\nimport numpy as np\nimport pickle\n\n\nclass ClassificationExperiment:\n def __init__(self, labeled_document_reader: LabeledTokenizedDocumentReader,\n classifier: AbstractTokenizedDocumentClassifier, evaluator: AbstractEvaluator):\n self.reader = labeled_document_reader\n self.classifier = classifier\n self.evaluator = evaluator\n\n def run(self) -> None:\n __folds = self.reader.get_folds()\n\n for i, fold in enumerate(__folds, start=1):\n assert isinstance(fold, Fold)\n assert fold.train and fold.test\n\n print(\"Running fold %d/%d\" % (i, len(__folds)))\n self.classifier.train(fold.train)\n predicted_labels = self.classifier.test(fold.test, fold_no=i)\n\n self.evaluate_fold(fold.test, predicted_labels)\n\n print(\"Evaluating after %d folds\" % i)\n self.evaluator.evaluate()\n\n print(\"Final evaluation; reader.input_path_train was %s\" % self.reader.input_path_train)\n self.evaluator.evaluate()\n\n def evaluate_fold(self, labeled_document_instances: list, predicted_labels: list):\n assert labeled_document_instances\n assert len(predicted_labels)\n assert len(labeled_document_instances) == len(predicted_labels), \"Prediction size mismatch\"\n\n assert isinstance(labeled_document_instances[0].label, type(predicted_labels[0]))\n\n # convert string labels int\n all_gold_labels = [doc.label for doc in labeled_document_instances]\n\n # collect IDs\n ids = [doc.id for doc in labeled_document_instances]\n\n self.evaluator.add_single_fold_results(all_gold_labels, predicted_labels, ids)\n\n def label_external(self, document_reader: TokenizedDocumentReader) -> dict:\n self.classifier.train(self.reader.train, validation=False)\n instances = document_reader.instances\n\n predictions, probs = self.classifier.test(instances)\n probs = list(probs)\n result = dict()\n for instance, prediction, prob in zip(instances, predictions, probs):\n assert isinstance(instance, TokenizedDocument)\n # assert isinstance(prediction, float)\n # get id and put the label to the resulting dictionary\n cur_text = ' '.join(instance.tokens)\n result[instance.id] = (prediction, prob)\n\n return result\n\ndef cross_validation_ah(model_type):\n # classification without context\n import random\n random.seed(1234567)\n\n import tensorflow as tf\n if tf.test.is_gpu_available():\n strategy = tf.distribute.MirroredStrategy()\n print('Using GPU')\n else:\n raise ValueError('CPU not recommended.')\n\n with strategy.scope():\n vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')\n embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')\n reader = JSONPerLineDocumentReader('data/experiments/ah-classification1/exported-3621-sampled-positive-negative-ah-no-context.json', True)\n e = None\n if model_type == 'cnn':\n e = ClassificationExperiment(reader, CNNTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())\n else:\n e = ClassificationExperiment(reader, StackedLSTMTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())\n e.run()\n\ndef cross_validation_thread_ah_delta_context3():\n # classification with context\n import random\n random.seed(1234567)\n\n import tensorflow as tf\n if tf.test.is_gpu_available():\n strategy = tf.distribute.MirroredStrategy()\n print('Using GPU')\n else:\n raise ValueError('CPU not recommended.')\n\n with strategy.scope():\n vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')\n embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')\n reader = AHVersusDeltaThreadReader('data/sampled-threads-ah-delta-context3', True)\n e = ClassificationExperiment(reader, StructuredSelfAttentiveSentenceEmbedding(vocabulary, embeddings, '/tmp/visualization-context3'), ClassificationEvaluator())\n e.run()\n\ndef train_test_model_with_context(train_dir, indir, outdir):\n '''Custom training and testing SSAE model\n :param train_dir: Path to JSON file containing training examples\n :param indir: Path to LOG file containing examples as Comment() object (which has already been classified by Bert)\n :param outdir: Path to LOG file to be created by adding prediction of this model as well'''\n\n import random\n random.seed(1234567)\n\n import tensorflow as tf\n if tf.test.is_gpu_available():\n strategy = tf.distribute.MirroredStrategy()\n print('Using GPU')\n else:\n raise ValueError('CPU not recommended.')\n\n with strategy.scope():\n vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')\n embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')\n reader = JSONPerLineDocumentReader(train_dir, True)\n e = ClassificationExperiment(reader, StructuredSelfAttentiveSentenceEmbedding(vocabulary, embeddings), ClassificationEvaluator())\n test_comments = TokenizedDocumentReader(indir)\n result = e.label_external(test_comments)\n\n for k in result.keys():\n print(f'{k}: {result[k]}')\n\n instances = dict()\n\n e = Comment(-1, 'lol', 'ah')\n f = open(indir, 'rb')\n\n try:\n while True:\n e = pickle.load(f)\n print(e)\n instances[str(e.id)] = e\n except EOFError:\n f.close()\n\n f = open(outdir, 'wb')\n \n for key in result.keys():\n model_label, model_score = result[key]\n model_label = model_label.lower()\n score = model_score[1]\n if model_label == 'none':\n score = model_score[0]\n instances[key].add_model(model_type, model_label, score, None)\n e = instances[key]\n print(e)\n print(e.labels)\n print(e.scores)\n print('=' * 20)\n pickle.dump(instances[key], f)\n \n f.close()\n\n\ndef train_test_model_no_context(model_type, train_dir, indir, outdir):\n # Training and testing CNN / BiLSTM model on custom data\n # :param train_dir: Path to JSON file containing training examples\n # :param indir: Path to LOG file containing examples as Comment() object (which has already been classified by Bert)\n # :param outdir: Path to LOG file to be created by adding prediction of this model as well\n\n import random\n random.seed(1234567)\n\n import tensorflow as tf\n if tf.test.is_gpu_available():\n strategy = tf.distribute.MirroredStrategy()\n print('Using GPU')\n else:\n raise ValueError('CPU not recommended.')\n\n with strategy.scope():\n vocabulary = Vocabulary.deserialize('en-top100k.vocabulary.pkl.gz')\n embeddings = WordEmbeddings.deserialize('en-top100k.embeddings.pkl.gz')\n reader = JSONPerLineDocumentReader(train_dir, True)\n e = None\n if model_type == 'cnn':\n e = ClassificationExperiment(reader, CNNTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())\n else:\n e = ClassificationExperiment(reader, StackedLSTMTokenizedDocumentClassifier(vocabulary, embeddings), ClassificationEvaluator())\n # e.run()\n test_comments = TokenizedDocumentReader(indir)\n result = e.label_external(test_comments)\n for k in result.keys():\n print(f'{k}: {result[k]}')\n\n instances = dict()\n\n e = Comment(-1, 'lol', 'ah')\n f = open(indir, 'rb')\n\n try:\n while True:\n e = pickle.load(f)\n print(e)\n instances[str(e.id)] = e\n except EOFError:\n f.close()\n\n f = open(outdir, 'wb')\n \n for key in result.keys():\n model_label, model_score = result[key]\n model_label = model_label.lower()\n score = model_score[1]\n if model_label == 'none':\n score = model_score[0]\n instances[key].add_model(model_type, model_label, score, None)\n e = instances[key]\n print(e)\n print(e.labels)\n print(e.scores)\n print('=' * 20)\n pickle.dump(instances[key], f)\n \n f.close()\n\ndef main3():\n # Custom training and testing for context-model (SSAE)\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--train_dir\", default=None, type=str, required=True, help=\"Path to JSON file containing training examples\")\n parser.add_argument(\"--indir\", default=None, type=str, required=True, help=\"Path to LOG file containing examples as Comment() object (which has already been classified by Bert)\")\n parser.add_argument(\"--outdir\", default=None, type=str, required=True, help=\"Path to LOG file to be created by adding prediction of this model as well\")\n args = parser.parse_args()\n train_test_model_with_context(args.train_dir, args.indir, args.outdir)\n \ndef main2():\n # Custom training and testing for no-context models\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", default=None, type=str, required=True, help=\"Model used for classification\")\n parser.add_argument(\"--train_dir\", default=None, type=str, required=True, help=\"Path to JSON file containing training examples\")\n parser.add_argument(\"--indir\", default=None, type=str, required=True, help=\"Path to LOG file containing examples as Comment() object (which has already been classified by Bert)\")\n parser.add_argument(\"--outdir\", default=None, type=str, required=True, help=\"Path to LOG file to be created by adding prediction of this model as well\")\n args = parser.parse_args()\n train_test_model_no_context(args.model, args.train_dir, args.indir, args.outdir)\n\ndef main():\n # For supervised learning task (with or without context) as described in the paper\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--model\", default=None, type=str, required=True, help=\"Model used for classification\")\n args = parser.parse_args()\n if args.model == 'ssase':\n cross_validation_thread_ah_delta_context3()\n else:\n cross_validation_ah(args.model)\n\n\nif __name__ == '__main__':\n # main()\n main2()\n","sub_path":"experiments/classification_experiments.py","file_name":"classification_experiments.py","file_ext":"py","file_size_in_byte":10892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541053179","text":"#-*- coding:utf-8 -*-\n# 编写一个模拟福饼(fortune cookie)的程序。该程序每次运行的时候要能随机显示出五条灵签中的任意一条。\n\nimport random\nfortune_cookies=[\"If it seems fates are agains you today, they probably are.\", \\\n \"You will live a long time, long enough to open manuy,many fortune cookies.\",\\\n \"It's amazing how much good you can do if you don't care who gets the credit.\", \\\n \"To avoid criticism,do nothing, say nothing, be nothing.\",\\\n \"People learn little from success, but much from failure.\"\n ]\n\n\n\nwhile True:\n random_number = random.randint(0,4)\n print(\"Welcome to the amazoning world!\")\n# print(\"You want to fortune cookie?\")\n # choice_number=int(input(\"please enter a number(1-5): \"))\n # item_number=choice_number - 1\n print(\"Ta-Da! Here is your fortune cookies:\")\n print(\"---------------------------------------\")\n print(fortune_cookies[random_number])\n print(\"---------------------------------------\")\n safe_number = input(\"Press 'q' to quit the program ro Press Enter to coninue...:\")\n if safe_number =='q':\n break\n","sub_path":"cookie.py","file_name":"cookie.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"409219878","text":"import os\nimport shutil\nimport argparse\nimport sys\nimport json\n\n\nfields_to_replace = [\"base_dir\", \"split_dir\", \"model_file\", \"training_file\", \"validation_file\", \"test_file\"]\n\n\ndef main(configs_folder, new_experiment, to_imitate):\n os.makedirs(os.path.join(configs_folder, new_experiment))\n shutil.copytree(os.path.join(configs_folder,to_imitate,'debug_split'), os.path.join(configs_folder,new_experiment,'debug_split'))\n conf_path = os.path.join(configs_folder, to_imitate, 'config.json')\n with open(os.path.join(configs_folder, to_imitate, 'config.json')) as f:\n config = json.load(f)\n \n for f in fields_to_replace:\n config[f] = config[f].replace(to_imitate, new_experiment)\n with open(os.path.join(configs_folder, new_experiment, 'config.json'), mode='w') as f:\n json.dump(config, f, indent=2)\n \n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--configs_folder\", help=\"specifies config folder path\",\n type=str, required=False, default=r\"/datadrive/configs\")\n parser.add_argument(\"--new_experiment\", help=\"new experiment folder name\",\n type=str, required=True)\n parser.add_argument(\"--to_imitate\", help=\"specifies what experiment to copy config and split from\",\n type=str, required=True)\n opts = parser.parse_args()\n\n main(opts.configs_folder, opts.new_experiment, opts.to_imitate)","sub_path":"brats/run_experiment_from_other.py","file_name":"run_experiment_from_other.py","file_ext":"py","file_size_in_byte":1452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"470124221","text":"from math import sqrt\nfrom PyQt5 import uic\nfrom PyQt5.QtWidgets import QMessageBox, QWidget\n\n\nclass Quadratic_equation(QWidget): # класс квадратных уравнений\n def __init__(self):\n super().__init__()\n uic.loadUi('ui_files/quadro.ui', self)\n self.btn.clicked.connect(self.decision)\n self.ok.clicked.connect(self.ok_)\n self.show()\n\n def decision(self): # функция нахождения дискриминанта и корней\n a = float(self.a_.text())\n b = float(self.b_.text())\n c = float(self.c_.text())\n if a == 0:\n QMessageBox.about(self, 'Ошибка', 'Коэффициент а не может быть равен 0')\n self.a_.clear()\n else:\n discr = b ** 2 - 4 * a * c\n self.dicscriminant.setText(str(int(discr)))\n if discr > 0:\n x1 = (-b + sqrt(discr)) / (2 * a)\n x2 = (-b - sqrt(discr)) / (2 * a)\n self.x1.setText('x1 = ' + str(x1)[:7])\n self.x2.setText('x2 = ' + str(x2)[:7])\n elif discr == 0:\n self.dicscriminant.setText(\n str(int(discr)))\n self.d_.setText('D = 0, следовательно \\nуравнение имеет \\nодин действительный корень.')\n x = -b / (2 * a)\n self.x1.setText('x =' + str(x))\n else:\n self.d_.setText('D < 0, следовательно \\nуравнение не имеет \\nдействительных корней.')\n\n def ok_(self): # функция очистки\n self.a_.clear()\n self.b_.clear()\n self.c_.clear()\n self.x1.clear()\n self.x2.clear()\n self.d_.clear()\n self.dicscriminant.clear()\n","sub_path":"quadratic_equation.py","file_name":"quadratic_equation.py","file_ext":"py","file_size_in_byte":1859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"108630831","text":"import numpy as np\nfrom data import load_nodule_dataset, prepare_data, prepare_data_siamese_overlap\nfrom model import miniXception_loader\n\nfrom Network.directArch import directArch\nfrom Network.siameseArch import siamArch\n\n## --------------------------------------- ##\n## ------- General Setup ----------------- ##\n## --------------------------------------- ##\n\n#dataset = generate_nodule_dataset(0.2, 0.25)\ndataset = load_nodule_dataset()\nsize = 128\ninput_shape = (size, size,1)\n\n# DIR / SIAM\nchoose_model = \"SIAM\"\n\n## --------------------------------------- ##\n## ------- Run Direct Architecture ------- ##\n## --------------------------------------- ##\n\nif choose_model is \"DIR\":\n #run = '005': lr=0.01, reduce factor=0.5, epsilon=0.05, patience=10, decay=lr/100\n #run = '006': lr=0.005, reduce factor=0.5, epsilon=0.05, patience=10, decay=lr/100\n #run = '007': lr=0.005, added dropout\n #run = '008' # categorical_hinge - sucks\n run = '009'\n\n # prepare training data\n images_train, labels_train = prepare_data(dataset[2], classes=2, size=size)\n print(\"Trainings data ready: images({}), labels({})\".format(images_train.shape, labels_train.shape))\n print(\"Range = [{},{}]\".format(np.min(images_train), np.max(images_train)))\n\n # prepare validation data\n images_valid, labels_valid = prepare_data(dataset[1], classes=2, size=size)\n print(\"Validation data ready: images({}), labels({})\".format(images_valid.shape, labels_valid.shape))\n print(\"Range = [{},{}]\".format(np.min(images_valid), np.max(images_valid)))\n\n\n model = directArch(miniXception_loader, input_shape, 2)\n #model.summary()\n\n model.compile(learning_rate=0.005)\n model.load_data( images_train, labels_train,\n images_valid, labels_valid )\n model.train(run)\n\n\n\n## --------------------------------------- ##\n## ------- Run Siamese Architecture ------ ##\n## --------------------------------------- ##\n\nif choose_model is \"SIAM\":\n\n # run = 'siam000' Adam(lr=0.005)\n # run = 'siam001' # RMSpop\n #run = 'siam002' # Nadam\n #run = 'siam003' # Nadam & reversed loss function\n #run = 'siam004' # Nadam & reversed loss function & reversed labels\n #run = 'siam005' # Adam & reversed loss function & reversed labels\n #run = 'siam006' # reload data after 10 epochs\n #run = 'siam0065' # reload data after 25 epochs\n #run = 'siam007' # reload data after 25 epochs, increasebatch size to 64\n #run = 'siam008' # reload after 25 epochs, batch size back to 32, margin = 5\n #run = 'siam009' # overlapped data (reload after 25 epochs, batch size back to 32, margin = 5)\n run = 'siam010' # corrented overlapped data (reload after 25 epochs, batch size back to 32, margin = 5)\n\n # data epoch - overlap (increases epoch size)\n # introduce DataGen\n # on first epochs, more similar examples are required\n\n # model\n\n model = siamArch(miniXception_loader, input_shape)\n model.model.summary()\n model.compile(learning_rate=0.005)\n\n epch_len = 25\n for epch0 in [0,1,2,3]:\n epch0 = epch_len * epch0\n print(\"Next Epoch: {}\".format(epch0))\n\n # prepare training data\n images_train, labels_train = prepare_data_siamese_overlap(dataset[2], size=size)\n assert images_train[0].shape == images_train[1].shape\n print(\"Trainings data ready: image pairs({}), labels({})\".format(images_train[0].shape, labels_train.shape))\n print(\"Range = [{},{}]\".format(np.min(images_train), np.max(images_train)))\n print(\"Labels = [{},{}]\".format(np.min(labels_train), np.max(labels_train)))\n\n # prepare validation data\n images_valid, labels_valid = prepare_data_siamese_overlap(dataset[1], size=size)\n assert images_valid[0].shape == images_valid[1].shape\n print(\"Validation data ready: image pairs({}), labels({})\".format(images_valid[0].shape, labels_valid.shape))\n print(\"Range = [{},{}]\".format(np.min(images_valid), np.max(images_valid)))\n print(\"Labels = [{},{}]\".format(np.min(labels_valid), np.max(labels_valid)))\n\n\n model.load_data(images_train, labels_train,\n images_valid, labels_valid)\n\n model.train(label=run, epoch=epch0, n_epoch=epch_len)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"411309227","text":"\"\"\"Exercício 02\n\nO id de um cliente é um código único (só aquela pessoa tem) composto por números inteiros que inicia do número 1 e vai aumentando de 1 em 1 enquanto for necessário.\n\nExemplo:\nid: 1\nNome: Dudu\n\nid: 2\nNome: Marta\n\nid: 3\nNome: Pedro\n\n\nATENÇÃO!!!!\nO id é um número atribuido automáticamente! O cliente não escolhe o número. O seu programa deve fazer o cadastro deste id automaticamente.\n\n\nCom isso, crie um cadastro de clientes que receba o id, nome e idade. Depois mostre os dados dos clientes individualmente.\n(cadastre no minimo 4 clientes)\n\"\"\"\nwhile True:\n try:\n nomes = []\n idades = []\n id = []\n x = int(input('Informe o número de clientes que você deseja cadastrar: '))\n for i in range(x):\n nomes.append(input('Informe o nome: '))\n idades.append(input('Informe a idade: '))\n id.append(i+1)\n for i in range(len(nomes)):\n print(f'''\n ID: {id[1]}\n Nome: {nomes[i]}\n Idade: {idades[i]}\n ''')\n except ValueError:\n print('Oops! Valor inválido. Por favor, informe apenas números inteiros. ')\n else:\n y = input('Para continuar digite 1: ')\n if not(y == '1') or not(y):\n break\n\n \n ","sub_path":"Exercicios/Aulas00/aula10/exercicio02.py","file_name":"exercicio02.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"362448333","text":"from django.urls.conf import path\nfrom django.views.generic.base import RedirectView\nfrom products import views\nurlpatterns = [\n path('cart/addquantity/',views.additemquantity),\n path('cart/removequantity/',views.removeitemquantity),\n path('cart/add/',views.cart),\n path('deliver/',views.deliverview),\n path('cart/',views.CartView.as_view()),\n path('contact/',views.ContactView.as_view()),\n path('about/',views.AboutView.as_view()),\n path('recents/',views.recentsView),\n path('sold/',views.OrderedView.as_view()),\n path('cart/remove/',views.cartdel),\n path('product_list/',views.ProductListView.as_view()),\n path('product_list//',views.ProductDetailView.as_view()),\n path('profile/edit//',views.ProfileEditView.as_view(success_url=\"/products/product_list\")),\n path('',RedirectView.as_view(url=\"product_list/\")),\n]","sub_path":"eshop2/products/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":911,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121137351","text":"import inspect\nimport subprocess\nfrom pathlib import Path\nfrom typing import List, Union, Dict, Iterable\nfrom unittest.mock import patch\n\nimport ceph_ho_dumper\n\n\ntest_data_dir = Path(inspect.getfile(inspect.currentframe())).absolute().parent / 'test_data'\n\n\nclass TimeModule:\n def __init__(self, curr_time: float, stop_time: float) -> None:\n self.curr_time = curr_time\n self.stop_time = stop_time\n\n def time(self) -> float:\n return self.curr_time\n\n def sleep(self, time: float):\n self.curr_time += time\n if self.curr_time >= self.stop_time:\n raise ceph_ho_dumper.UTExit()\n\n\nclass SubprocessModule:\n def __init__(self, cmdresults: Dict[str, Union[bytes, List[bytes]]], timeout: int = None) -> None:\n self.cmdresults = cmdresults\n self.timeout = timeout\n self.SubprocessError = subprocess.SubprocessError\n self.CalledProcessError = subprocess.CalledProcessError\n\n def check_output(self, cmd: Union[str, Iterable[str]], shell: bool = False, timeout: int = None, **other) -> bytes:\n assert shell == isinstance(cmd, str)\n cmd_s = \" \".join(cmd) if isinstance(cmd, list) else cmd\n if self.timeout:\n assert abs(timeout - self.timeout) < 1E-1\n\n res = self.cmdresults[cmd_s]\n if isinstance(res, bytes):\n return res\n else:\n return res.pop()\n\n\ndef test_simple():\n with patch(\"ceph_ho_dumper.subprocess\", SubprocessModule()):\n with patch(\"ceph_ho_dumper.time\", TimeModule()):\n ceph_ho_dumper.main()\n","sub_path":"collect_historic_ops/test_ho_dumper.py","file_name":"test_ho_dumper.py","file_ext":"py","file_size_in_byte":1571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390291979","text":"# Given a string, we can \"shift\" each of its letter to its successive letter, \n# for example: \"abc\" -> \"bcd\". We can keep \"shifting\" which forms the sequence:\n\n# \"abc\" -> \"bcd\" -> ... -> \"xyz\"\n# Given a list of strings which contains only lowercase alphabets, \n# group all strings that belong to the same shifting sequence.\n\n# For example, given: [\"abc\", \"bcd\", \"acef\", \"xyz\", \"az\", \"ba\", \"a\", \"z\"], \n# A solution is:\n\n# [\n# [\"abc\",\"bcd\",\"xyz\"],\n# [\"az\",\"ba\"],\n# [\"acef\"],\n# [\"a\",\"z\"]\n# ]\n\n# from collections import defaultdict\n\n# Given a string, we can \"shift\" each of its letter to its successive letter, for example: \"abc\" -> \"bcd\". \n# We can keep \"shifting\" which forms the sequence:\n\n# \"abc\" -> \"bcd\" -> ... -> \"xyz\"\n# Given a list of strings which contains only lowercase alphabets, group all strings that belong to the same shifting sequence.\n\n# Example:\n\n# Input: [\"abc\", \"bcd\", \"acef\", \"xyz\", \"az\", \"ba\", \"a\", \"z\"],\n# Output: \n# [\n# [\"abc\",\"bcd\",\"xyz\"],\n# [\"az\",\"ba\"],\n# [\"acef\"],\n# [\"a\",\"z\"]\n# ]\n\nclass Solution(object):\n def groupStrings(self, strings):\n \"\"\"\n :type strings: List[str]\n :rtype: List[List[str]]\n \"\"\"\n maps = collections.defaultdict(list)\n for string in strings:\n temp = [0]\n for j, letter in enumerate(list(string)[1:]):\n diff = ord(string[j]) - ord(string[j-1])\n if diff < 0: diff+=26\n temp.append(diff)\n maps[tuple(temp)].append(string)\n return maps.values()\n","sub_path":"LEETCODE/0249. Group Shifted Strings.py","file_name":"0249. Group Shifted Strings.py","file_ext":"py","file_size_in_byte":1533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"567300076","text":"import multiprocessing\nimport os\nimport utils\nimport subprocess\nimport sys\nimport math\nimport signal\n\n\nclass Error (Exception): pass\n\nclass Scan:\n\n\tdef __init__(self,\n\t\tprep_id,\n\t\tscan_id,\n\t\thmm_db, \n\t\tout_dir = \".\",\n\t\tcpu = 2):\n\n\t\tself.args = {\"prep_dir\" : os.path.join(os.path.abspath(out_dir),prep_id + \"_PREPARE\") ,\n\t\t\"scan_dir\" : os.path.join(os.path.abspath(out_dir),scan_id + \"_SCAN\") ,\n\t\t\"cpu\" : cpu,\n\t\t\"hmm_db\" : hmm_db,\n\t\t\"configs\" : utils.load_config_file() ,\n\t\t\"prep_id\": prep_id,\n\t\t\"scan_id\": scan_id}\n\n\n\tdef _run_hmmpress(self):\n\t\tself.args[\"hmm_db\"] = os.path.abspath(self.args[\"hmm_db\"])\n\t\tres = subprocess.call([self.args[\"configs\"][\"hmmpress\"],self.args[\"hmm_db\"]])\n\n\n\n\tdef _get_hmmer_version(self,command):\n\t\tp = subprocess.Popen([self.args[\"configs\"][command], \"-h\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\t\toutput, err = p.communicate(b\"input data that is passed to subprocess' stdin\")\n\t\trc = p.returncode\n\t\toutput = output.split()\n\t\tfor i in range(0,len(output)):\n\t\t\tif output[i] == \"HMMER\":\n\t\t\t\treturn (command + \": HMMER-\" + output[i+1] + \"\\n\")\n\t\treturn (command + \": HMMER version not found\\n\")\n\n\n\tdef run(self):\n\t\t\n\t\t## create output directory\n\t\tutils.assure_path_exists(self.args[\"scan_dir\"])\n\n\t\tif self.args[\"hmm_db\"] not in utils.databases:\n\t\t\tself._run_hmmpress()\n\t\telse:## before starting, copy the data directory to the out direcotry\n\t\t\td = os.path.abspath(os.path.dirname(__file__))\n\t\t\tdata_env = os.path.join(d, 'data/')\n\t\t\tos.system(\"mkdir -p \" + os.path.join(self.args[\"scan_dir\"],\"data\"))\n\t\t\t## Do both??? -> this seems to choose how to behave arbitrarily\n\t\t\tos.system(\"cp -r \" + data_env + \" \" + os.path.join(self.args[\"scan_dir\"]))\n\t\t\tos.system(\"cp -r \" + data_env + \" \" + os.path.join(self.args[\"scan_dir\"],\"data\"))\n\t\t\tself.args[\"hmm_db\"] = os.path.join(self.args[\"scan_dir\"],'data',self.args[\"hmm_db\"])\n\n\t\t\n\n\t\t\n\t\t## get version of hmmscan for log file\n\t\tlog_other = self._get_hmmer_version(\"hmmscan\")\n\t\tlog_other = log_other + self._get_hmmer_version(\"hmmpress\")\n\t\tlog_other = log_other + \"### INPUT ### \\ncnt\\tgenome\\tfasta_file\\tgff_file\\n\" # keeping a text file of all the genomes used\n\n\t\tjobs = []\n\t\tcnt = 1\n\t\tfor file in os.listdir(self.args[\"prep_dir\"]):\n\t\t\tif file.endswith(\".sixframe.fasta\"):\n\t\t\t\tbasename = os.path.basename(file)\n\t\t\t\tbasename = basename.replace(\".sixframe.fasta\",\"\")\n\n\t\t\t\tsixframe_file = os.path.join(self.args[\"prep_dir\"], basename + \".sixframe.fasta\")\n\t\t\t\tannotated_file = os.path.join(self.args[\"prep_dir\"], basename + \".annotated.fasta\")\n\n\t\t\t\tscan_genome = {\"basename\" :basename, \"source\": \"sixframe\", \"fasta_file\": sixframe_file, \"out_dir\": self.args[\"scan_dir\"], \"hmm_db\": self.args[\"hmm_db\"], \"hmmscan\": self.args[\"configs\"][\"hmmscan\"]}\n\t\t\t\tjobs.append(scan_genome)\n\n\n\t\t\t\tif os.path.isfile(annotated_file):\n\t\t\t\t\tscan_genome = {\"basename\" :basename, \"source\": \"annotated\", \"fasta_file\": annotated_file, \"out_dir\": self.args[\"scan_dir\"], \"hmm_db\": self.args[\"hmm_db\"], \"hmmscan\": self.args[\"configs\"][\"hmmscan\"]}\n\t\t\t\t\tjobs.append(scan_genome)\n\t\t\t\t\tlog_other = log_other + str(cnt) +\"\\t\" + basename +\"\\t\"+ sixframe_file +\"\\t\"+ annotated_file+\"\\n\"\n\t\t\t\telse:\n\t\t\t\t\tlog_other = log_other + str(cnt) +\"\\t\" + basename +\"\\t\"+ sixframe_file +\"\\tnot found\\n\"\n\t\t\t\tcnt += 1\n\n\n\t\tutils.write_log(os.path.join(self.args[\"scan_dir\"], \"LOG\"), \"STEP 2 : GENOME SCANNING\", self.args, log_other)\n\n\t\t## Allocated CPUs in an efficient way\n\t\tCPUs_per_scan = 5\n\t\t\n\t\t\n\t\t## if there are more CPUs than jobs, or we have less than 5 CPUs, don't use multiprocessing, just give \n\t\t## the maximum number of CPUs to each hmmscan call\n\t\tif self.args[\"cpu\"] > len(jobs) or self.args[\"cpu\"] < CPUs_per_scan:\n\t\t\tpool_size = 1\n\t\t\tCPUs_per_scan = self.args[\"cpu\"]\n\t\t## otherwise, create pool of size CPUS/5, and each scan will run on 5 CPUs (what to do with CPUs I lose?)\n\t\telse:\n\t\t\tpool_size = self.args[\"cpu\"] / CPUs_per_scan \n\n\t\tfor j in jobs:\n\t\t\tj[\"scan_cpu\"] = CPUs_per_scan\n\t\t\n\n\n\t\tpool = multiprocessing.Pool(processes = pool_size)\n\t\tresults = pool.map_async(run_scan,tuple(jobs))\n\t\tpool.close()\n\t\tpool.join()\n\n\ndef run_scan(args):\n\tcommand = map(str,[args[\"hmmscan\"],\"--cpu\",args[\"scan_cpu\"],\"--max\",\"--noali\",\"--domtblout\",\n\t\tos.path.join(args[\"out_dir\"] , args[\"basename\"] + \".\" + args[\"source\"] + \".result\"), args[\"hmm_db\"], args[\"fasta_file\"]])\n\n\tres = 1 \n\tattempt = 0\n\twhile attempt < utils.MAX_ATTEMPTS and res != 0:\n\t\tres = subprocess.call(command)\n\tif res != 0:\n\t\tsys.exit(\"Error: Failed to complete hmmscan for [\" + self.basename + \"_\" + self.source +\"]. Please check log files.\")\n\n\treturn res\n\n","sub_path":"sling/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96300318","text":"from Renderer import Renderer\n\nclass TextRenderer(Renderer):\n\tdef __setColor(self,rgb):\n\t\tindex = 16 + int(rgb[2]*5) + 6*int(rgb[1]*5) + 36*int(rgb[0]*5)\n\t\tprint(\"\\033[48;5;\"+str(index)+\"m\", end=\"\")\t\n\t\n\tdef render(self, color):\n\t\tprint(\"\\033[2J\\033[;H\")\n\t\t\n\t\ti = 0\n\t\tfor y in range(self.height):\n\t\t\tfor x in range(self.width):\n\t\t\t\trgb = color[i]\n\t\t\t\tself.__setColor(rgb)\n\t\t\t\tprint(\" \", end=\"\")\t\t\t\t\n\t\t\t\ti += 1\n\t\t\tprint()\n\t\tself.__setColor([0,0,0])\n\t\tprint()","sub_path":"py/tetris/TextRenderer.py","file_name":"TextRenderer.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79762732","text":"import sys\nimport os\n\nsys.path.append(os.path.join(os.path.dirname(__file__), '..'))\n\nfrom pytorch_pretrained_bert import BertTokenizer, BertModel\nimport logging\nimport torch\nfrom tqdm import tqdm\n\nlogger = logging.getLogger(__name__)\n\nlist_folder = \"/Users/daniel/ideaProjects/pytorch-pretrained-BERT/lists/\"\ncache_folder = \"/Users/daniel/ideaProjects/pytorch-pretrained-BERT/examples/\"\n\ndef get_phrases():\n arr = os.listdir(list_folder)\n phrases = []\n for file in arr:\n print(file)\n filepath = list_folder + file\n with open(filepath) as fp:\n line = fp.readline()\n counter = 0\n while line:\n line = fp.readline().replace(\"\\n\", \"\").strip()\n if len(line) > 1:\n phrases.append(line)\n counter += 1\n if counter > 20:\n break\n phrases = list(set(phrases))\n print(len(phrases))\n return phrases\n\ndef extract_representations():\n phrases = get_phrases()\n pairs = []\n for p1 in phrases:\n for p2 in phrases:\n if p1 != p2:\n pairs.append((p1, p2))\n print(len(pairs))\n add_new_vocabulary(pairs)\n\ndef add_missing():\n add_new = [\"Ford\", \"Lincoln\"]\n phrases = get_phrases()\n pairs = []\n for p1 in add_new:\n for p2 in phrases:\n if p1 != p2:\n pairs.append((p1, p2))\n print(len(pairs))\n add_new_vocabulary(pairs)\n\ndef add_new_vocabulary(pairs):\n model = BertModel.from_pretrained('bert-base-uncased')\n model.eval()\n # Load pre-trained model tokenizer (vocabulary)\n tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')\n\n with open(cache_folder + \"cache.txt\", 'a') as out:\n for (p1, p2) in tqdm(pairs, desc=\"Evaluating\"):\n tokenized_text1 = tokenizer.tokenize(p1)\n tokenized_text2 = tokenizer.tokenize(p2)\n tokens = []\n segment_ids = []\n\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n\n for token in tokenized_text1:\n tokens.append(token)\n segment_ids.append(0)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n second_part_start_index = len(tokens)\n for token in tokenized_text2:\n tokens.append(token)\n segment_ids.append(1)\n\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n tokens_tensor = torch.tensor([input_ids])\n segments_tensors = torch.tensor([segment_ids])\n\n encoded_layers, _ = model(tokens_tensor, segments_tensors, output_all_encoded_layers=False)\n\n CLS_representations = encoded_layers[0,0].cpu().tolist()\n first_tokens_representation1 = encoded_layers[0,1].cpu().tolist()\n first_tokens_representation2 = encoded_layers[0,second_part_start_index].cpu().tolist()\n\n out.write(p1 + \"\\t\" + p2 + \"\\t\" + str(CLS_representations) +\n \"\\t\" + str(first_tokens_representation1) +\n \"\\t\" + str(first_tokens_representation2) + '\\n')\n\nif __name__ == \"__main__\":\n # extract_representations_single_question()\n add_missing()\n","sub_path":"examples/representations_for_lists.py","file_name":"representations_for_lists.py","file_ext":"py","file_size_in_byte":3303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"47938312","text":"from aiogram import types\nfrom aiogram.dispatcher.filters.builtin import CommandStart\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.builtin import Text\n\nfrom loader import dp, log\n\nfrom keyboards.default.defaults import do_registration\nfrom keyboards.dispatcher import dispatcher\nfrom states.state_groups import CustomUser\n\nfrom db.models import User, Rate, RequestToAdmin\nfrom data.config import check_if_user_is_admin\n\nimport random\n\n\n@dp.message_handler(Text(equals=['Back']), state='*')\nasync def back(message: types.Message, state: FSMContext):\n async with state.proxy() as data:\n await state.finish()\n keyboard, prev_level = await dispatcher(data.get('prev_level', 'LEVEL_1'))\n message_text = 'Back to previous level'\n await message.answer(message_text, reply_markup=keyboard)\n data['prev_level'] = prev_level # ALWAYS save new prev_level to state\n\n\n@dp.message_handler(CommandStart(), state='*')\nasync def bot_start(m: types.Message, state: FSMContext):\n await m.answer(f\"Hello, {m.from_user.full_name}!\\n\" +\n 'Please, sign up first.',\n reply_markup=do_registration)\n log.info(f'User: {m.from_user.id} comes')\n await state.finish()\n\n\n@dp.message_handler(Text(equals=['Add custom user(For test only)']))\nasync def add_custom_user(m: types.Message, state: FSMContext):\n keyboard, prev_level = await dispatcher('LEVEL_2_PROFILES')\n text = 'Because testers live in different cities, it can be hard to test distance calculating.\\n' + \\\n 'So you can create test user with custom coordinates for testing.\\n' + \\\n 'This user will liked you\\n' + \\\n 'Coordinates you can get from Google Maps for your city or any place you want\\n\\n' + \\\n 'Input coordinates in format - longitude:latitude'\n await m.answer(text, reply_markup=keyboard)\n await CustomUser.coord.set()\n await state.update_data(prev_level=prev_level)\n\n\n@dp.message_handler(state=CustomUser.coord)\nasync def set_custom_user(m: types.Message, state: FSMContext):\n if ':' not in m.text:\n await m.answer('Wrong format')\n return\n lat, long = m.text.split(':')\n try:\n long, lat = float(long), float(lat)\n rand_id = random.randint(0, 10000)\n photo_id = 'AgACAgIAAxkBAAINLWEhJltMgCWIPVTZ_27n9ZgnVSLSAAICszEbpycISbTVoFhMhtIaAQADAgADeQADIAQ'\n user = await User.create(user_id=rand_id,\n full_name=f'Test {rand_id}',\n username=f'Test username {rand_id}',\n description=f'Test desc {rand_id}',\n gender=True,\n interested_gender=False,\n age=30,\n longitude=long, latitude=lat,\n search_distance=100000,\n photo=photo_id)\n me = await User.get(user_id=m.from_user.id)\n await Rate.create(rate_owner=user, target=me, type=True)\n await m.answer('Saved')\n data = await state.get_data()\n await state.finish()\n await state.update_data(**data)\n except ValueError:\n await m.answer('Wrong format')\n return\n\n\n@dp.message_handler(Text(equals=['Request admin status(For test only)']))\nasync def request_admin_status(m: types.Message):\n if await check_if_user_is_admin(str(m.from_user.id)):\n await m.answer('You are already admin')\n return\n user = await User.get(user_id=m.from_user.id)\n request, created = await RequestToAdmin.get_or_create(user=user)\n if not created:\n await m.answer('You already sent request. Wait for response')\n return\n await m.answer('Request has been created. Wait for response')\n","sub_path":"handlers/users/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":3836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"186127187","text":"class Foo:\n def __init__(self, name):\n self.name = name\n\n def fun1(self):\n print('fun1')\n\n\ndef fun2(arg):\n print(arg)\n\n\nFoo.fun2 = fun2\n\nf = Foo('ok')\nf.fun2()\nf.age = 18\nprint(f.age)\n\nprint('*'*20)\nf1 = Foo('bad')\nprint(f1.age)\n\nf1.fun2()\n","sub_path":"toDemo/demo01.py","file_name":"demo01.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"361781665","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib import auth\nfrom .models import SiteUsers, Group, Lesson, User, Univercity, Admin\nimport json\nfrom . import site_backend, alice_backend\n\n\ndef main_page(request):\n groups = Group.objects.all()\n return render(request, 'main.html', {'user': auth.get_user(request), 'groups': groups})\n\n\n@csrf_exempt\ndef get_request(request):\n if request.method == 'POST':\n request_json = json.loads(request.body.decode('utf-8'))\n text, text_to_speech = alice_backend.get_answer(request_json)\n response = {\n \"response\": {\n \"text\": text,\n \"tts\": text_to_speech,\n \"end_session\": False\n },\n \"session\": {\n \"session_id\": request_json['session']['session_id'],\n \"message_id\": request_json['session']['message_id'],\n \"user_id\": request_json['session']['user_id']\n },\n \"version\": \"1.0\"\n }\n return JsonResponse(response)\n return HttpResponse('No POST in request')\n\n\ndef home_page(request):\n return redirect('timetable')\n\n\ndef timetable(request):\n if auth.get_user(request).is_authenticated:\n try:\n ##Если пользователь уже выбрал расписание\n user = SiteUsers.objects.get(user_id=auth.get_user(request))\n group = user.group_id\n try:\n Admin.objects.get(user_id=auth.get_user(request), group_id=group)\n admin = True\n except Admin.DoesNotExist:\n admin = False\n lessons = Lesson.objects.filter(group_id=group)\n mon = lessons.filter(day_of_week=0)\n tue = lessons.filter(day_of_week=1)\n wed = lessons.filter(day_of_week=2)\n thu = lessons.filter(day_of_week=3)\n fri = lessons.filter(day_of_week=4)\n sat = lessons.filter(day_of_week=5)\n sun = lessons.filter(day_of_week=6)\n tomorrow = site_backend.get_tomorrow_lessons(SiteUsers.objects.get(user_id=auth.get_user(request)).group_id)\n\n admin_message = ''\n if request.method == 'POST':\n\n ##Выбрать другое расписание\n if 'change_timetable' in request.POST:\n user.delete()\n return redirect('timetable')\n\n ##Добавление нового админа\n if 'add_admin' in request.POST:\n email = request.POST.get('email')\n try:\n new_admin = User.objects.get(email=email)\n try:\n new_admin = Admin.objects.get(user_id=new_admin, group_id=group)\n admin_message = email + ' уже является админом.'\n except Admin.DoesNotExist:\n Admin.objects.create(user_id=new_admin, group_id=group)\n admin_message = email + ' теперь является админом.'\n except User.DoesNotExist:\n admin_message = 'Такого пользователя не существует.'\n\n response = {\n 'user': auth.get_user(request),\n 'admin': admin,\n 'group': group.name,\n 'admin_message': admin_message,\n 'mon': mon,\n 'mon1_len': len(mon.filter(repeat=0)) + len(mon.filter(repeat=1)) > 0,\n 'mon2_len': len(mon.filter(repeat=0)) + len(mon.filter(repeat=2)) > 0,\n 'tue': tue,\n 'tue1_len': len(tue.filter(repeat=0)) + len(tue.filter(repeat=1)) > 0,\n 'tue2_len': len(tue.filter(repeat=0)) + len(tue.filter(repeat=2)) > 0,\n 'wed': wed,\n 'wed1_len': len(wed.filter(repeat=0)) + len(wed.filter(repeat=1)) > 0,\n 'wed2_len': len(wed.filter(repeat=0)) + len(wed.filter(repeat=2)) > 0,\n 'thu': thu,\n 'thu1_len': len(thu.filter(repeat=0)) + len(thu.filter(repeat=1)) > 0,\n 'thu2_len': len(thu.filter(repeat=0)) + len(thu.filter(repeat=2)) > 0,\n 'fri': fri,\n 'fri1_len': len(fri.filter(repeat=0)) + len(fri.filter(repeat=1)) > 0,\n 'fri2_len': len(fri.filter(repeat=0)) + len(fri.filter(repeat=2)) > 0,\n 'sat': sat,\n 'sat1_len': len(sat.filter(repeat=0)) + len(sat.filter(repeat=1)) > 0,\n 'sat2_len': len(sat.filter(repeat=0)) + len(sat.filter(repeat=2)) > 0,\n 'sun': sun,\n 'sun1_len': len(sun.filter(repeat=0)) + len(sun.filter(repeat=1)) > 0,\n 'sun2_len': len(sun.filter(repeat=0)) + len(sun.filter(repeat=2)) > 0,\n 'tomorrow': tomorrow,\n 'tomorrow_len': len(tomorrow) > 0\n }\n\n return render(request, 'timetable.html', response)\n\n except SiteUsers.DoesNotExist:\n if request.method == 'POST':\n ##Если посльзователь выбирает расписание\n if 'choose_timetable' in request.POST:\n univercity_input = request.POST.get('univercity')\n try:\n univercity = Univercity.objects.get(name=univercity_input)\n except Univercity.DoesNotExist:\n try:\n readable_univercity_input = univercity_input.lower().replace(' ', '').replace('-', '')\n univercity = Univercity.objects.get(readable_name=readable_univercity_input)\n except Univercity.DoesNotExist:\n return render(request, 'timetable.html', {'does_not_exist': 'Для такого ВУЗа нет расписания',\n 'user': auth.get_user(request),\n 'site_user_does_not_exist': '-'})\n try:\n group_input = request.POST.get('group')\n group = Group.objects.get(univerсity_id=univercity, name=group_input)\n SiteUsers.objects.create(user_id=auth.get_user(request), univerсity_id=univercity,\n group_id=group)\n return redirect('timetable')\n except Group.DoesNotExist:\n readable_group_input = group_input.lower().replace(' ', '').replace('-', '')\n try:\n group = Group.objects.get(univerсity_id=univercity, readable_name=readable_group_input)\n SiteUsers.objects.create(user_id=auth.get_user(request), univerсity_id=univercity,\n group_id=group)\n return redirect('timetable')\n except Group.DoesNotExist:\n return render(request, 'timetable.html', {'does_not_exist': 'Для такой группы нет расписания',\n 'user': auth.get_user(request),\n 'site_user_does_not_exist': '-'})\n ##Если пользователь добавляет новое расписание\n elif 'add_timetable' in request.POST:\n univercity = request.POST.get('univercity')\n group = request.POST.get('group')\n start_date = request.POST.get('start_date')\n end_date = request.POST.get('end_date')\n user = auth.get_user(request)\n success = site_backend.add_timetable(univercity, group, start_date, end_date, user)\n if success:\n return redirect('timetable')\n else:\n return render(request, 'timetable.html', {'site_user_does_not_exist': '-',\n 'add_message': 'Для такой группы уже есть расписание',\n 'user': auth.get_user(request)})\n\n return render(request, 'timetable.html', {'user': auth.get_user(request),\n 'site_user_does_not_exist': 'У вас нет расписания, создайте его, либо, попросите '\n 'доступ к нему у другого человека, который его создал'})\n return render(request, 'timetable.html', {'user': auth.get_user(request)})\n\n\ndef change_lesson(request):\n if request.method == 'POST':\n message = ''\n print(request.POST.get('lesson_id'))\n lesson = Lesson.objects.get(pk=request.POST.get('lesson_id'))\n if 'change_lesson' in request.POST:\n name = request.POST.get('lesson')\n teacher = request.POST.get('teacher')\n classroom = request.POST.get('classroom')\n start_time = request.POST.get('start_time')\n end_time = request.POST.get('end_time')\n type = request.POST.get('type')\n day_of_week = int(request.POST.get('day_of_week'))\n repeat = int(request.POST.get('repeat'))\n user = SiteUsers.objects.get(user_id=auth.get_user(request))\n start_date = user.group_id.start_date\n end_date = user.group_id.end_date\n message = site_backend.change_lesson(lesson, name, teacher, classroom, start_time, end_time, type,\n day_of_week, repeat, start_date, end_date)\n lesson = Lesson.objects.get(pk=request.POST.get('lesson_id'))\n start_time = str(lesson.start_time)\n end_time = str(lesson.end_time)\n user = auth.get_user(request)\n group = SiteUsers.objects.get(user_id=user).group_id\n try:\n Admin.objects.get(user_id=user, group_id=group)\n admin = True\n except Admin.DoesNotExist:\n admin = False\n message = 'Для того, что бы делать изменения, попросите разрешение у админа'\n response = {\n 'admin': admin,\n 'lesson': lesson,\n 'start_time': start_time[0:len(start_time)-3],\n 'end_time': end_time[0:len(end_time)-3],\n 'user': auth.get_user(request),\n 'message': message\n }\n return render(request, 'change_lesson.html', response)\n\n\ndef add_timetable(request):\n # if request.method == 'POST':\n # group_name = request.POST.get('group')\n # lesson = request.POST.get('lesson')\n # teacher = request.POST.get('teacher')\n # classroom = request.POST.get('classroom')\n # start_time = request.POST.get('start_time')\n # end_time = request.POST.get('end_time')\n # day_of_week = request.POST.get('day_of_week')\n # start_date = request.POST.get('start_date')\n # end_date = request.POST.get('end_date')\n # repeat = request.POST.get('repeat')\n # success = site_backend.add_lesson(group_name, lesson, teacher, classroom, start_time, end_time, day_of_week,\n # start_date, end_date, repeat)\n # if success:\n # return render(request, 'add_timetable.html', {'if_add': 'Предмет добавлен'})\n # else:\n # return render(request, 'add_timetable.html', {'if_add': 'Даты не добавлены'})\n return render(request, 'add_timetable.html', {'user': auth.get_user(request)})\n\n\ndef add_lesson(request):\n if request.method == 'POST':\n user = SiteUsers.objects.get(user_id=auth.get_user(request))\n group_name = user.group_id\n lesson = request.POST.get('lesson')\n teacher = request.POST.get('teacher')\n classroom = request.POST.get('classroom')\n start_time = request.POST.get('start_time')\n end_time = request.POST.get('end_time')\n start_date = user.group_id.start_date\n end_date = user.group_id.end_date\n day_of_week = request.POST.get('day_of_week')\n repeat = request.POST.get('repeat')\n type = request.POST.get('type')\n success = site_backend.add_lesson(group_name, lesson, teacher, classroom, start_time, end_time,\n day_of_week, repeat, type, start_date, end_date)\n return render(request, 'add_lesson.html', {'message': success, 'user': auth.get_user(request)})\n return render(request, 'add_lesson.html', {'user': auth.get_user(request)})\n\n\ndef login(request):\n if request.method == 'POST':\n email = request.POST.get('email')\n password = request.POST.get('password')\n user = auth.authenticate(username=email, password=password)\n if user is not None:\n auth.login(request, user)\n return redirect('timetable')\n else:\n return render(request, 'login.html', {'error': 'Нерпавильный email или пароль', 'user': auth.get_user(request)})\n return render(request, 'login.html', {'user': auth.get_user(request)})\n\n\ndef register(request):\n if request.method == 'POST':\n input_username = request.POST.get('email')\n input_password = request.POST.get('password')\n try:\n User.objects.get(username=input_username)\n return render(request, 'register.html', {'error': 'Пользователь с таким email уже существует', 'user': auth.get_user(request)})\n except User.DoesNotExist:\n user = User.objects.create(username=input_username, password=input_password)\n user.set_password(input_password)\n user.save()\n user = auth.authenticate(username=input_username, password=input_password)\n auth.login(request, user)\n return redirect('timetable')\n return render(request, 'register.html', {'user': auth.get_user(request)})\n\n\ndef logout(request):\n auth.logout(request)\n return redirect('timetable')","sub_path":"alice/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41749531","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport logging\nlogger = logging.getLogger(\"icecube.LedDisplay\")\nlogger.setLevel(logging.INFO)\nhandler = logging.StreamHandler()\nlogger.addHandler(handler)\n\n\nfrom time import sleep\n\nimport sys, os\nsys.path.append(os.path.dirname(os.path.realpath(__file__))+\"/../steamshovel\")\n\nfrom LedDisplay import DisplayManager, DisplayController\n\nmanager = DisplayManager()\n\nimport argparse\nparser = argparse.ArgumentParser(description=\"Test IceCube LED display\")\nparser.add_argument(\n \"-m\"\n , \"--mode\"\n , choices=[\"string\", \"rgb\"]\n , help=\"Test string per string or display RGB loop\"\n , required=True\n)\nparser.add_argument(\"-d\", \"--duration\", type=float, help=\"Duration of a single frame. Defaults to 1/25.\", default=1./25)\nparser.add_argument(\"-c\", \"--count\", type=int, help=\"Number of frames to render. Defaults to 0, or 1 if offset is not 0.\", default=0)\nparser.add_argument(\"-o\", \"--offset\", type=int, help=\"Initial frame offset number. Defaults to 0.\", default=0)\nparser.add_argument(\"-p\", \"--persist\", action=\"store_true\", help=\"If present, do not clear display after last frame.\", default=False)\nargs = parser.parse_args(sys.argv[1:])\n\ninitial_offset = args.offset\nframe_count = args.count\nduration = args.duration\n\n\nclass OMKey:\n def __init__(self, string, om):\n self.string = string\n self.om = om\n\nimport colorsys\n\n#\n# Draw an HSV loop to the display\n#\ndef draw_hsv(display, offset):\n pixel_size = display.led_class.DATA_LENGTH\n rgb_to_data = display.led_class.float_to_led_data\n buffer_length = display.buffer_length\n frame = bytearray(buffer_length)\n\n if os.getenv(\"VIRTUAL_DEVICES\") is None:\n lightness = 0.1\n else:\n lightness = 0.5\n\n if display.data_type == DisplayController.DATA_TYPE_IC_STRING:\n pixel_offset = offset%60\n for pixel in range(60):\n hue = float(pixel-pixel_offset)/60\n if hue < 0:\n hue += 1\n data = rgb_to_data(colorsys.hls_to_rgb(hue, lightness, 1.0))\n for string_offset in range(display.string_count):\n buffer_offset = string_offset*60+pixel\n frame[buffer_offset*pixel_size:(buffer_offset+1)*pixel_size] = data\n\n elif display.data_type == DisplayController.DATA_TYPE_IT_STATION:\n pixel_offset = offset%display.string_count\n for pixel in range(display.string_count):\n hue = (pixel-pixel_offset)/display.string_count\n if hue < 0:\n hue += 1\n data = rgb_to_data(colorsys.hls_to_rgb(hue, lightness, 1.0))\n buffer_offset = pixel_size*pixel_offset\n frame[pixel*pixel_size:(pixel+1)*pixel_size] = data\n\n return frame\n\n\ndef draw_loop(display, offset):\n \"Light up one string (nr. `offset`) of the detector\"\n pixel_size = display.led_class.DATA_LENGTH\n rgb_to_data = display.led_class.float_to_led_data\n buffer_length = display.buffer_length\n frame = bytearray(buffer_length)\n\n if os.getenv(\"VIRTUAL_DEVICES\") is None:\n pixel_data = rgb_to_data((0.1, 0.1, 0.1))\n else:\n pixel_data = rgb_to_data((1, 1, 1))\n\n if display.data_type == DisplayController.DATA_TYPE_IC_STRING:\n string_offset = offset % display.string_count\n string = display.string_range[0]-1 + string_offset\n pixel_offset = display.getLedIndex(OMKey(string+1, 1))\n for pixel in range(pixel_offset, pixel_offset+60):\n buffer_offset = pixel*pixel_size\n frame[buffer_offset:buffer_offset+pixel_size] = pixel_data\n\n elif display.data_type == DisplayController.DATA_TYPE_IT_STATION:\n pixel = offset % display.string_count\n frame[pixel*pixel_size:(pixel+1)*pixel_size] = pixel_data\n\n return frame\n\nif len(manager.displays) and (frame_count > 0 or initial_offset > 0):\n for display in manager.displays:\n print(\"Opening display\")\n display.open()\n\n if initial_offset > 0:\n if frame_count > 0:\n frame_range = range(initial_offset-1, initial_offset-1+frame_count)\n else:\n frame_range = range(initial_offset-1, initial_offset)\n else:\n frame_range = range(0, frame_count)\n\n for offset in frame_range:\n logger.debug(\"{} frames to go\".format(frame_count-offset))\n for display in manager.displays:\n if args.mode == 'string':\n frame = draw_loop(display, offset)\n elif args.mode == 'rgb':\n frame = draw_hsv(display, offset)\n else:\n frame = bytearray(display.buffer_length)\n\n display.transmitDisplayBuffer(frame)\n \n sleep(duration)\n\n for display in manager.displays:\n logger.debug(\"Clearing display\")\n if not args.persist:\n clear_frame = bytearray(display.buffer_length)\n display.transmitDisplayBuffer(clear_frame)\n display.close()\n\n","sub_path":"muenster_icecube_display/test_draw.py","file_name":"test_draw.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539824062","text":"\nfrom Gaudi.Configuration import *\nfrom Configurables import FilterDesktop, CombineParticles, LoKi__VertexFitter\nfrom PhysSelPython.Wrappers import Selection, DataOnDemand\nfrom StrippingConf.StrippingLine import StrippingLine\nfrom StrippingUtils.Utils import LineBuilder\nfrom StandardParticles import StdJets\nfrom CommonParticles.Utils import *\nfrom Configurables import FilterJet, ClearDaughters\nfrom GaudiKernel.SystemOfUnits import GeV\n\n__author__=[\"Xabier Cid Vidal\",\"Cedric Potterat\",\"William Barter\"]\n__all__ = [\"default_name\",\"default_config\",\"MicroDiJetsConf\"]\n\n## if you want to prescale this line, please contact the authors before!\ndefault_name = 'MicroDiJets'\n\ndefault_config = {\n 'NAME': default_name,\n 'BUILDERTYPE' : 'MicroDiJetsConf',\n 'WGs' : [ 'QEE' ],\n 'STREAMS' : [ 'Leptonic' ],\n 'CONFIG':{'MicroDiJetsLine_Prescale' : 0.5,\n 'MicroDiJetsLine_Postscale' : 1.0,\n 'min_jet_pT' : 20. * GeV\n }\n }\n\n\nclass MicroDiJetsConf( LineBuilder ) :\n\n __configuration_keys__ = ( 'MicroDiJetsLine_Prescale',\n 'MicroDiJetsLine_Postscale',\n 'min_jet_pT'\n )\n\n def __init__( self, name, config ) :\n\n LineBuilder.__init__( self, name, config )\n\n self._myname = name\n self._config = config\n\n self.emptySel = ClearDaughters(\"MDSTJets\", Inputs = [\"Phys/StdJets/Particles\"])\n self.emptySelLoc = updateDoD(self.emptySel)\n #print self.emptySelLoc\n self.emptySelLoc = self.emptySelLoc.keys()[0]\n #print self.emptySelLoc\n\n self.sel_MicroDiJets = self.makeJetGroup (self._myname + 'MicroDiJets')\n\n self.line_MicroDiJetsLine = StrippingLine( self._myname + 'Line',\n prescale = config[ 'MicroDiJetsLine_Prescale' ],\n postscale = config[ 'MicroDiJetsLine_Postscale' ],\n MDSTFlag = True,\n RequiredRawEvents = [\"Calo\"],\n checkPV = False,\n selection = self.sel_MicroDiJets\n )\n\n self.registerLine( self.line_MicroDiJetsLine )\n\n\n\n\n\n def makeJetGroup(self,_name):\n\n JetGroup = CombineParticles(\"Combine\"+_name)\n JetGroup.DecayDescriptor = \"H_10 -> CELLjet CELLjet\"\n\n\n\n JetGroup.ParticleCombiners = {\"\" : \"MomentumCombiner\"}\n # JetGroup.addTool( LoKi__VertexFitter, name=\"LoKi::VertexFitter\" )\n # vfitter = getattr ( JetGroup , \"LoKi::VertexFitter\" )\n # vfitter.Jets = \"\"\n\n JetGroup.DaughtersCuts = { \"CELLjet\" :\" (PT > %(min_jet_pT)s ) \" %self._config }\n\n JetGroup.CombinationCut = \"AALLSAMEBPV \"\n JetGroup.MotherCut = \"ALL\"\n\n return Selection(\"Sel\"+_name, Algorithm = JetGroup, RequiredSelections = [DataOnDemand(self.emptySelLoc)])\n\n\n\n\n","sub_path":"DaVinci_v39r1/InstallArea/x86_64-slc6-gcc49-opt/python/StrippingSelections/StrippingQEE/StrippingMicroDiJets.py","file_name":"StrippingMicroDiJets.py","file_ext":"py","file_size_in_byte":3076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"220778914","text":"'''\nВ Англии валютой являются фунты стерлингов £ и пенсы p, и в обращении есть восемь монет:\n\n1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) и £2 (200p).\n£2 возможно составить следующим образом:\n\n1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p\nСколькими разными способами можно составить £2, используя любое количество монет?\n'''\ncoins = (200, 100, 50, 20, 10, 5, 2, 1)\n\ncount = 0\n\ndef getCountCoins(value, part):\n global count\n for i in range(0, 201, coins[value]):\n if part + i < 200 and value < 7:\n temp = getCountCoins(value + 1, part + i)\n count += temp\n elif part + i == 200 or value == 7:\n return 1\n else:\n return 0\ntemp = getCountCoins(0, 0)\ncount += temp\nprint(count)","sub_path":"31/prog.py","file_name":"prog.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"4062831","text":"import keras\nimport matplotlib.pyplot as plt\nimport tensorflow as tf\nfrom utils import *\n\nclass Net():\n def __init__(self, lr, input_dim, layers, activations):\n self.lr = lr\n self.model = keras.models.Sequential()\n for i in range(len(layers)):\n if i!=0:\n self.model.add(keras.layers.Dense(layers[i], activation=activations[i], input_dim=input_dim))\n else:\n self.model.add(keras.layers.Dense(layers[i], activation=activations[i]))\n print(str(input_dim) + \" features\")\n self.history = None\n\n def fit(self, X, Y, epochs = 100, batch_size = 200, Visual = False):\n SGD = keras.optimizers.SGD(learning_rate = self.lr)\n self.model.compile(loss='mean_squared_error', optimizer=SGD, metrics=[keras.metrics.MeanSquaredError()])\n self.history = self.model.fit(X, Y, batch_size, epochs)\n if Visual: \n plt.plot(self.history.history['loss'])\n plt.title('loss over epoch')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.show()\n\n def predict(self, X):\n return self.model.predict(X)\n","sub_path":"rn_tf.py","file_name":"rn_tf.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"427694053","text":"import theano.sandbox.cuda.dnn as _dnn\n\nfrom .Module import Module\n\n\nclass SpatialMaxPoolingCUDNN(Module):\n def __init__(self, k_w, k_h, d_w=None, d_h=None, pad_w=0, pad_h=0):\n Module.__init__(self)\n self.k_w = k_w\n self.k_h = k_h\n\n if d_w is None:\n self.d_w = self.k_w\n else:\n self.d_w = d_w\n\n if d_h is None:\n self.d_h = self.k_h\n else:\n self.d_h = d_h\n\n self.pad_w = pad_w\n self.pad_h = pad_h\n\n def symb_forward(self, symb_input):\n return _dnn.dnn_pool(\n img=symb_input,\n ws=(self.k_w, self.k_h),\n stride=(self.d_w, self.d_h),\n mode='max',\n pad=(self.pad_w, self.pad_h)\n )","sub_path":"beacon8/layers/SpatialMaxPoolingCUDNN.py","file_name":"SpatialMaxPoolingCUDNN.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"520986307","text":"from lib.esp8266.wemos.d1mini import pinmap\n\nNAME = 'Light'\n\n#### Light / Switch ####\n\n## Change the component for Home Assistant:\nCOMPNT = 'light' # options: 'Light', 'Switch', None\n\n## Relay shield is D1 by default:\nGPIO = pinmap.D1\n\nBRIGHTNESS = False\n\nINIT_BRI = 100 # percent\n\n#### Button ####\n\nBTN = False\n\n## Wemos 1-button shield is D3 by default:\nBTN_GPIO = pinmap.D3\n\n## Set to 0 for Wemos 1-button shield:\nBTN_VAL = 0\n\n#### Motion Sensor ####\n\nMOTN = False\n\n## Wemos PIR shield is D3 by default:\nMOTN_GPIO = pinmap.D3\n\n## Set to 1 for Wemos PIR Shield:\nMOTN_VAL = 1\n\n## Turn off light n-seconds after being turned on:\nMOTN_TIME = 10 # seconds\n\n#### Battery ####\n\nBATT = True\n\nBATT_ADC = pinmap.A0\n\n## Update interval:\nBATT_INT = 10 * 60 # seconds\n\n## Disable light when battery below:\nBATT_LOW = 10 # percent or None\n","sub_path":"configs/light/battery.py","file_name":"battery.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74201721","text":"Students = []\r\nStudentID = []\r\nCourses = []\r\nCoursesID = []\r\nMark = []\r\n\r\ndef numstudent():\r\n stu = int(input(\"Let's enter the numbers of Student:\"))\r\n if stu > 0:\r\n return stu\r\n else:\r\n print(\"Should stop!!!\")\r\n return 0\r\n\r\ndef infStudent():\r\n print(\"Enter the information of student\")\r\n inf = {\r\n 'ID ': '',\r\n 'Name': '',\r\n 'DOB': ''\r\n }\r\n print(\"Enter the ID of Student:\")\r\n inf['s_ID'] = s_Id = input()\r\n print(\"Enter the name of Student:\")\r\n inf['Name'] = input()\r\n print(\"Enter date of brith of Student:\")\r\n inf['DOB'] = input()\r\n Students.append(inf)\r\n StudentID.append(s_Id)\r\n\r\ndef numcourses():\r\n cou = int(input(\"Let's enter the numbers of course:\"))\r\n if cou > 0:\r\n return cou\r\n else:\r\n print(\"Should stop!!!\")\r\n return 0\r\n\r\ndef infCourses():\r\n print(\"Enter the information of courses\")\r\n inf_C = {\r\n 'cID': '',\r\n 'Name': ''\r\n }\r\n print(\"Enter the ID of Courses:\")\r\n inf_C['c_ID'] = c_Id = input()\r\n print(\"Enter the name of Courses:\")\r\n inf_C['Name'] = input()\r\n Courses.append(inf_C)\r\n CoursesID.append(c_Id)\r\n\r\ndef infmark():\r\n print(\"Enter the point mark\")\r\n inf_M = {\r\n 'c_ID': '',\r\n 's_ID': '',\r\n 'Mark': ''\r\n }\r\n print(\"Enter the ID of Courses: \")\r\n inf_M['c_ID'] = c = input()\r\n if c in CoursesID:\r\n print(\"Enter the ID of Student:\")\r\n inf_M['s_ID'] = c1 = input()\r\n if c1 in StudentID:\r\n print(\"Enter marks of that Student:\")\r\n inf_M['Mark'] = float(input())\r\n else:\r\n return -1\r\n else:\r\n return -1\r\n Mark.append(inf_M)\r\n\r\ndef ShowCourse():\r\n print(\"Show the lists of courses:\")\r\n for a in range(0, len(Courses)):\r\n print(\"[\", Courses[a]['c_ID'], \"]\", \"[\", Courses[a]['Name'], \"]\", )\r\n\r\n\r\ndef ShowStudent():\r\n print(\"Show the lists of Student:\")\r\n for a in range(0, len(Students)):\r\n print(\"[\", Students[a]['s_ID'], \"]\", \"[\", Students[a]['Name'], \"]\", \"[\", Students[a]['DOB'], \"]\", )\r\n\r\n\r\ndef ShowMark():\r\n print(\"Show marks of Student in courses:\")\r\n for a in range(len(Students)):\r\n print(\"[\", Mark[a]['c_ID'], \"]\", \"[\", Mark[a]['s_ID'], \"]\", \"[\", Mark[a]['Mark'], \"]\", )\r\n\r\ndef Student_Management():\r\n cou = numcourses()\r\n stu = numstudent()\r\n for i in range(cou):\r\n infCourses()\r\n for i in range(stu):\r\n infStudent()\r\n infmark()\r\n ShowCourse()\r\n ShowStudent()\r\n ShowMark()\r\n print(\"Done\")\r\n\r\nStudent_Management()","sub_path":"1.student.mark.py","file_name":"1.student.mark.py","file_ext":"py","file_size_in_byte":2624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"173347373","text":"'''\r\n\r\n ************************************************************************\r\n * FILE NAME: heater.py\r\n * AUTHOR: Dylan Vogel, Peter Feng\r\n * PURPOSE: This file contains functions for heater control using PWM.\r\n *\r\n *\r\n * EXTERNAL REFERENCES: RPi.GPIO, thmcouple\r\n *\r\n *\r\n * NOTES: The only function you'll likely need to change is the constants\r\n * in initial_heating_time based on the thermal mass you're trying to heat.\r\n *\r\n * You could base these on calculations which take into account your input\r\n * wattage, thermal volume, convection, etc., but I find that it's easier to run\r\n * a characterization test and adjust based on that.\r\n *\r\n * REVISION HISTORY:\r\n *\r\n * 2017-05-22: Created file. Wrote basic functions.\r\n * 2017-05-23: Wrote initial_heating_time based on empirically\r\n * derived values for our setup.\r\n *\r\n\r\n'''\r\n\r\nimport RPi.GPIO as GPIO\r\nimport thmcouple as thm\r\nimport heatingProcess\r\nglobal PWM_PIN_1, PWM_PIN_2, freq\r\n\r\n# GPIO, not board pins on the RPi\r\nPWM_PIN_1 = 23 # Center\r\nPWM_PIN_2 = 24 # Edge\r\n\r\n# PWM frequency in Hz\r\nfreq = 500\r\n\r\ndef setup1():\r\n\r\n GPIO.setmode(GPIO.BCM)\r\n GPIO.setup(PWM_PIN_1, GPIO.OUT)\r\n\r\n pwm_1 = GPIO.PWM(PWM_PIN_1, freq)\r\n\r\n # Start both PWM channels at 0% duty cycle.\r\n pwm_1.start(0)\r\n\r\n return pwm_1\r\n\r\ndef setup2():\r\n\r\n GPIO.setmode(GPIO.BCM)\r\n GPIO.setup(PWM_PIN_2, GPIO.OUT)\r\n\r\n pwm_2 = GPIO.PWM(PWM_PIN_2, freq)\r\n\r\n # Start both PWM channels at 0% duty cycle.\r\n pwm_2.start(0)\r\n\r\n return pwm_2\r\n\r\ndef initial_heating_time(process):\r\n # Apply some math to figure out how long to heat for.\r\n\r\n temp1 = thm.read(process.thm1)\r\n temp2 = thm.read(process.thm2)\r\n avg = (temp1 + temp2) / 2.0\r\n\r\n heating_time = ((process.temp - avg) / 2.0) - 4\r\n\r\n return heating_time\r\n\r\ndef initial_heating_time_new(process):\r\n # Revised method to determine the initial heating time\r\n\r\n temp1 = thm.read(process.thm1)\r\n temp2 = thm.read(process.thm2)\r\n avg = (temp1 + temp2) / 2.0\r\n\r\n temp_diff = process.temp - avg\r\n heating_time = (process.heat_capacity * process.mass * temp_diff / process.watt) - 10\r\n\r\n return heating_time\r\n\r\ndef calc_kp(work_temp):\r\n\r\n kp = 0.2 * ((work_temp / 100.0) * (work_temp / 100.0))\r\n kp = clamp(kp, 0.2, 1)\r\n\r\n return kp\r\n\r\ndef update_temp(temp_avg, temp):\r\n # Simple weighting scheme to smooth out large variations.\r\n new_temp = ((temp_avg * 2.0) + temp) / 3.0\r\n return new_temp\r\n\r\ndef change_duty(process):\r\n process.pwm_1.ChangeDutyCycle(process.pwm_center)\r\n process.pwm_2.ChangeDutyCycle(process.pwm_edge)\r\n\r\ndef clamp(n, minn, maxn):\r\n return max(min(n, maxn), minn)\r\n\r\ndef close(pwm):\r\n pwm.stop()\r\n","sub_path":"heater.py","file_name":"heater.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"307643065","text":"import sys\n\ninput = sys.stdin.readline\n\ndx = [-1, 1, 0, 0]\ndy = [0, 0, -1, 1]\n\ndef in_bound(x, y):\n if x in range(0, R) and y in range(0, C):\n return True\n else:\n return False\n\ndef dfs(x, y, count):\n global answer\n answer = max(answer, count)\n for i in range(4):\n nx, ny = x+dx[i], y+dy[i]\n if in_bound(nx, ny):\n if path[ord(Map[nx][ny]) - 65] != 1:\n path[ord(Map[nx][ny]) - 65] = 1\n dfs(nx, ny, count+1)\n path[ord(Map[nx][ny]) - 65] = 0\n\nR, C = map(int, input().split())\nMap = []\n\nfor _ in range(R):\n Map.append(list(input().strip()))\n\nanswer = 1\npath = [0]*26\npath[ord(Map[0][0]) - 65] = 1\ndfs(0, 0, answer)\nprint(answer)\n","sub_path":"DFS_BFS/알파벳_dfs.py","file_name":"알파벳_dfs.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"475669974","text":"from django.conf.urls import patterns\nfrom django.conf.urls import url\nfrom . import views\n\n\nurlpatterns = patterns(\n '',\n url(r'^$', views.main, name='main'),\n url(r'^requests/$', views.requests, name='requests'),\n url(r'^requests/(?P\\d+)/$', views.requests, name='requests'),\n url(r'^edit/(?P\\d+)/$', views.edit, name='edit'),\n url(r'^login/$', 'django.contrib.auth.views.login', name='login'),\n url(r'^logout/$', 'django.contrib.auth.views.logout',\n {'next_page': '/'}, name='logout'),\n)\n","sub_path":"apps/hello/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"543106510","text":"\"\"\"\n This was originally pilfered from\n https://github.com/adeept/Adeept_RaspTank/blob/a6c45e8cc7df620ad8977845eda2b839647d5a83/server/camera_opencv.py\n\n\"Great artists steal\". Thank you, @adeept!\n\"\"\"\n\nimport os\nimport threading\nimport datetime\nimport numpy as np\nimport cv2\n\nfrom base_camera import BaseCamera\n\nCVRun = 1\nlineColorSet = 255\nframeRender = 1\nfindLineError = 20\n\nImgIsNone = 0\n\ncolorUpper = np.array([44, 255, 255])\ncolorLower = np.array([24, 100, 100])\n\n\nclass CVThread(threading.Thread):\n font = cv2.FONT_HERSHEY_SIMPLEX\n\n def __init__(self, *args, **kwargs):\n self.CVThreading = 0\n self.CVMode = 'none'\n self.imgCV = None\n\n self.radius = 0\n self.box_x = None\n self.box_y = None\n self.drawing = 0\n\n self.findColorDetection = 0\n\n super(CVThread, self).__init__(*args, **kwargs)\n self.__flag = threading.Event()\n self.__flag.clear()\n\n def mode(self, invar, imgInput):\n self.CVMode = invar\n self.imgCV = imgInput\n self.resume()\n\n def elementDraw(self, imgInput):\n if self.CVMode == 'none':\n pass\n\n elif self.CVMode == 'findColor':\n if self.findColorDetection:\n cv2.putText(imgInput, 'Target Detected', (40, 60),\n CVThread.font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n self.drawing = 1\n else:\n cv2.putText(imgInput, 'Target Detecting', (40, 60),\n CVThread.font, 0.5, (255, 255, 255), 1, cv2.LINE_AA)\n self.drawing = 0\n\n if self.radius > 10 and self.drawing:\n cv2.rectangle(imgInput, (int(self.box_x-self.radius), int(self.box_y+self.radius)),\n (int(self.box_x+self.radius), int(self.box_y-self.radius)), (255, 255, 255), 1)\n\n return imgInput\n\n def pause(self):\n self.__flag.clear()\n\n def resume(self):\n self.__flag.set()\n\n def run(self):\n while 1:\n self.__flag.wait()\n if self.CVMode == 'none':\n continue\n elif self.CVMode == 'findColor':\n self.CVThreading = 1\n self.findColor(self.imgCV)\n self.CVThreading = 0\n pass\n\n\nclass Camera(BaseCamera):\n video_source = 0\n modeSelect = 'none'\n # modeSelect = 'findlineCV'\n # modeSelect = 'findColor'\n # modeSelect = 'watchDog'\n\n def __init__(self):\n if os.environ.get('OPENCV_CAMERA_SOURCE'):\n Camera.set_video_source(int(os.environ['OPENCV_CAMERA_SOURCE']))\n super(Camera, self).__init__()\n\n @staticmethod\n def set_video_source(source):\n Camera.video_source = source\n\n @staticmethod\n def frames():\n print('initializing VideoCapture')\n camera = cv2.VideoCapture(Camera.video_source)\n if not camera.isOpened():\n raise RuntimeError('Could not start camera.')\n\n cvt = CVThread()\n cvt.start()\n\n while True:\n # read current frame\n _, img = camera.read()\n if img is None:\n if ImgIsNone == 0:\n print(\n \"The camera has not read data, please check whether the camera can be used normally.\")\n print(\n \"Use the command: 'raspistill -t 1000 -o image.jpg' to check whether the camera can be used correctly.\")\n ImgIsNone = 1\n continue\n\n if Camera.modeSelect == 'none':\n cvt.pause()\n else:\n if cvt.CVThreading:\n pass\n else:\n cvt.mode(Camera.modeSelect, img)\n cvt.resume()\n try:\n img = cvt.elementDraw(img)\n except:\n pass\n\n # encode as a jpeg image and return it\n if cv2.imencode('.jpg', img)[0]:\n yield cv2.imencode('.jpg', img)[1].tobytes()\n","sub_path":"video-server/src/camera_opencv.py","file_name":"camera_opencv.py","file_ext":"py","file_size_in_byte":4055,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"350048649","text":"from django.shortcuts import render\nfrom .models import SubAnnouncement \t# this model is not being used. maybe change it as Announcements\nfrom todo.models import ToDoList\nfrom blog.models import BlogPost\nfrom django.contrib.auth.models import User\nfrom django.core.paginator import Paginator\n\n\n\ndef landing(request):\n\t# For accessing just the \"/\" even when the user is automatically logged-in.\n\t# Default will be an announcements/advertisements page.\n\t# \"/home\" will be the homepage linked with the \"home\" icon; not this one.\n\tuser = User\n\tcontext = {\n\t\t'subannouncements': SubAnnouncement.objects.all(),\n\t\t'users': user.objects.order_by('-date_joined'), # try 'date_registered' to see all options\n\t\t# 'ads': \n\t}\n\tif user:\n\t\treturn render(request, 'home/wb.html', context)\n\telse:\n\t\treturn render(request, 'home/landing.html', context)\n\n\n\nfrom django.contrib.auth.decorators import login_required\n\n@login_required\ndef home(request):\n\tuser = User\n\tuser_list = User.objects.all()\n\t#paginator = Paginator(user_list, 10) # not yet implemented\n\tcontext = {\n\t\t'todos': ToDoList.objects.filter(author=request.user).order_by(\"finish_by\"),\n\t\t'blogposts': BlogPost.objects.all().order_by(\"-date_posted\"),\n\t\t'users': user_list\n\t}\n\n\t# template_folder/html_file\n\treturn render(request, 'home/home.html', context)\n\n\n\n\ndef announcement(request):\n\t# not being used atm\n\tcontext = {\n\n\t}\n\n\treturn render(request, 'home/test.html', context)\n\n\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1449,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"348300498","text":"# **name = it receives a dictionary\n# *name = receives a tuple \ndef foo(string, *args, **keyword): \n print(string)\n\n if args:\n print(args)\n\n if keyword:\n print(keyword)\n if 'name' in keyword:\n print(keyword['name'])\n\ndef concatenate(*args, sep='/'):\n return sep.join(args)\n\nif __name__ == \"__main__\":\n tupla = (1,2,3,4,5)\n dictionary = {\"name\" : \"simone\", \"surename\" : \"vaiasinni\"}\n \n #foo(\"Hello world\", 1,2,3,4,5, name=\"simone\", surename=\"vaiasinni\" )\n #foo(\"Hello world\", 1,2,3,4,5, **dictionary)\n\n print(concatenate(\"home\", \"simone\", \"desktop\"))\n\n\n","sub_path":"Flow_control_tools/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"277175746","text":"#%matplotlib tk\nimport urllib.request\nimport json\nimport matplotlib.pyplot as plt\nfrom matplotlib import animation\nimport cartopy.crs as ccrs\nfrom cartopy.io.img_tiles import OSM\n\n#SET AXES\nfig, ax = plt.subplots()\nax=plt.axes(projection=ccrs.PlateCarree())\nax.set_ylim(40.6051,40.6825)\nax.set_xlim(-73.8288,-73.7258)\n\n#ADD OSM BASEMAP\nosm_tiles=OSM()\nax.add_image(osm_tiles,13) #Zoom Level 13\n\n#PLOT JFK INTL AIRPORT\nax.text(-73.778889,40.639722,'JFK Intl',horizontalalignment='right',size='large')\nax.plot([-73.778889],[40.639722],'bo')\n\n#PLOT TRACK\ntrack, = ax.plot([], [],'ro')\n\nopener = urllib.request.build_opener()\nopener.addheaders = [('User-agent', 'Mozilla/5.0')]\n\n#UPDATE FUNCTION\ndef update(self):\n # Find possible query items:\n # http://www.virtualradarserver.co.uk/Documentation/Formats/AircraftList.aspx\n #SEND QUERY\n fp=opener.open('http://public-api.adsbexchange.com/VirtualRadar/AircraftList.json?lat=40.639722&lng=-73.778889&fDstL=0&fDstU=20')\n mybyte=fp.read()\n mystr=mybyte.decode(\"utf8\")\n js_str=json.loads(mystr)\n fp.close()\n lat_list=[]\n long_list=[]\n #\n # Find flight description details:\n # https://www.adsbexchange.com/datafields/ \n for num,flight_data in enumerate(js_str['acList']):\n lat=flight_data['Lat']\n lon=flight_data['Long']\n lat_list.append(lat)\n long_list.append(lon)\n track.set_data(long_list,lat_list)\n return track,\n \n#UPDATING EVERY SECOND\nanim = animation.FuncAnimation(fig, update,interval=1000, blit=False)\n\nplt.show()","sub_path":"scratch/flighttrack.py","file_name":"flighttrack.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"246749149","text":"#!/usr/bin/env python3.8\n#\n########################################\n# \n# Python Tips, by Wolfgang Azevedo\n# https://github.com/wolfgang-azevedo/python-tips\n#\n# Loop While\n# 2020-03-08\n#\n########################################\n#\n#\nimport time\nfrom datetime import datetime\n\n\nnum = input(\"Digite um número: \") \n\ntry:\n\n # While the value returns true, execute the loop\n # Enquanto o valor for verdadeiro, execute o loop\n while True:\n\n # If the conditional is true, loop will continue\n # Se a condicional retornar verdade, o loop continuará\n if num == '1':\n print(f'Hoje {datetime.strftime(datetime.now(), \"%d/%m/%Y %H:%M:%S\")} vamos aprender Python!!!')\n time.sleep(1)\n\n # If the conditional is false, stop the loop\n # Se a condicional retornar falso, paramos o loop\n else:\n print(\"Numero inválido!!!!\")\n break\n\n# Throw and exception when keyboard CTRl+C is pressed\n# Lança uma execeção quando interrompido pela combinação CTRL+C\nexcept KeyboardInterrupt as e: \n print(\"Fim, loop interrompido pelo usuario\")","sub_path":"tip10_loop_while/tip10_loop_while.py","file_name":"tip10_loop_while.py","file_ext":"py","file_size_in_byte":1111,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"193536915","text":"import os \nimport numpy as np \nimport random\nfrom keras.datasets import imdb \nfrom keras.models import Model \nfrom keras.layers import Conv1D, GlobalMaxPooling1D,Dense \nfrom keras.layers import Dropout, Embedding, Input\nfrom keras.callbacks import ReduceLROnPlateau \nfrom keras.optimizers import Adam,SGD\nfrom keras.preprocessing import sequence\n\nimport seaborn as sb \nimport matplotlib.pyplot as plt \nimport itertools\nfrom sklearn.metrics import confusion_matrix\n\n\n\nreduce_lr = ReduceLROnPlateau(monitor=\"val_acc\",\n verbose=1, factor=0.1, min_lr=0.00001,\n patience = 3)\n\nclass Config:\n max_features = 3000\n max_len = 400\n embedding_dims = 64\n\n batch_size = 32\n filters = 250\n kernel_size = 3\n hidden_dims = 250\n epochs = 3\n\n lr = 0.0003\n dropout = 0.2\n\nopt = Config()\ndef iter_generator(x, y, batch_size = 32):\n data_length = len(y)\n data_order = np.arange(data_length)\n data_steps = data_length // batch_size\n # print(data_order)\n # print(data_steps)\n # print(\"数据重置.....\")\n count = 0\n while True:\n yield [x[data_order][batch_size*count:batch_size*(count+1)], \n y[data_order][batch_size*count:batch_size*(count+1)]]\n \n count+=1\n if count!=0 and count % data_steps == 0:\n count = 0\n random.shuffle(data_order)\n print(\"数据重置.....\")\ndef data_preprocess(sequence_x,max_length):\n sequence_x = sequence.pad_sequences(\n sequence_x,\n maxlen = max_length,\n padding = \"post\",\n truncating=\"post\",\n value = 0)\n #sequence_y = to_categorical(train_y[:10], num_classes=2)\n return sequence_x\n\ndef model_build():\n inputs = Input(shape=(opt.max_len,))\n embedding_output = Embedding(opt.max_features,\n opt.embedding_dims,\n input_length=opt.max_len)(inputs)\n embedding_output = Dropout(opt.dropout)(embedding_output)\n\n conv_output = Conv1D(opt.filters, opt.kernel_size,\n padding = \"valid\",\n activation=\"relu\")(embedding_output)\n\n pool_output = GlobalMaxPooling1D()(conv_output)\n hidden_output = Dense(opt.hidden_dims, activation=\"relu\")(pool_output)\n hidden_output = Dropout(opt.dropout)(hidden_output)\n\n outputs = Dense(1, activation=\"sigmoid\")(hidden_output)\n\n model = Model(inputs = inputs, outputs = outputs)\n adam = Adam(lr=opt.lr)\n\n model.compile(optimizer=adam, loss=\"binary_crossentropy\",\n metrics = [\"accuracy\"])\n return model \n\nmodel = model_build()\nprint(model.summary())\n\n\nprint('Loading data...')\n(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=opt.max_features,\n path = \"imdb.npz\")\nprint(len(x_train), 'train sequences')\nprint(len(x_test), 'test sequences')\n \nprint('Pad sequences (samples x time)')\nx_train = sequence.pad_sequences(x_train, maxlen=opt.max_len)\nx_test = sequence.pad_sequences(x_test, maxlen=opt.max_len)\nprint('x_train shape:', x_train.shape)\nprint('x_test shape:', x_test.shape)\n\n\nhistory = model.fit_generator(iter_generator(x_train, y_train,\n batch_size=opt.batch_size), epochs = opt.epochs,\n verbose = 2,steps_per_epoch = len(x_train)//opt.batch_size,\n validation_data=[x_test, y_test])\n\n\n# learning curves\nfig,ax = plt.subplots(2,1,figsize=(10,10))\nax[0].plot(history.history['loss'], color='r', label='Training Loss')\nax[0].plot(history.history['val_loss'], color='g', label='Validation Loss')\nax[0].legend(loc='best',shadow=True)\nax[0].grid(True)\n\n\nax[1].plot(history.history['acc'], color='r', label='Training Accuracy')\nax[1].plot(history.history['val_acc'], color='g', label='Validation Accuracy')\nax[1].legend(loc='best',shadow=True)\nax[1].grid(True)\n\nplt.show()\n\n\ndef plot_sonfusion_matrix(cm, classes, normalize=False, title='Confusion matrix',cmap=plt.cm.Blues):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n if normalize:\n cm = cm.astype('float')/cm.sum(axis=1)[:,np.newaxis]\n thresh = cm.max()/2.0\n # print(\"thresh:\",thresh)\n\n for i,j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n # print(\"i,j:\",i,j)\n plt.text(j,i,cm[i,j], horizontalalignment='center',color='white' if\n (i==j) else 'red')\n # plt.text(j,i,cm[i,j], horizontalalignment=\"center\",color=\"green\")\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predict label')\n\ny_pred = model.predict(x_test)\n# pred_label = np.argmax(y_pred, axis=1)\n# true_label = np.argmax(y_test, axis=1)\npred_label = (y_pred>0.5).astype(\"uint8\")\ntrue_label = y_test\nconfusion_mat = confusion_matrix(true_label, pred_label)\nprint(\"混淆矩阵:\", confusion_mat)\nplot_sonfusion_matrix(confusion_mat, classes = range(2))\nplt.show()\n","sub_path":"imdb_generator.py","file_name":"imdb_generator.py","file_ext":"py","file_size_in_byte":4905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329009020","text":"\"\"\"\nAll the api parts of spotlogin_api\n\"\"\"\nimport requests\nimport models\n\ndef get_user_call(auth):\n \"\"\"\n Calls to get the user object\n \"\"\"\n url = \"https://api.spotify.com/v1/me\"\n response = requests.get(url, headers={\"Authorization\": \"Bearer \" + auth})\n return response.json()\n\n\ndef get_artists_call(auth):\n \"\"\"\n Gets top Artists on Login\n \"\"\"\n response = requests.get(\n \"https://api.spotify.com/v1/me/top/artists\",\n headers={\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + auth,\n },\n )\n return response.json()\n\n\ndef get_top_call(flaskid):\n \"\"\"\n Gets the Names of the Top Artists\n \"\"\"\n query = models.ActiveUsers.query\n auth = query.filter_by(serverid=flaskid).first().authtoken\n headers = {\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n \"Authorization\": \"Bearer \" + auth,\n }\n response = requests.get(\n \"https://api.spotify.com/v1/me/top/artists\", headers=headers\n )\n return response.json()\n\n\ndef get_current_call(flaskid):\n \"\"\"\n Get Users Current Song from spotify\n \"\"\"\n query = models.ActiveUsers.query\n auth = query.filter_by(serverid=flaskid).first().authtoken\n url = \"https://api.spotify.com/v1/me/player/currently-playing\"\n header = {\"Authorization\": \"Bearer \" + auth}\n response = requests.get(url, headers=header)\n return response.json()\n","sub_path":"spotlogin_api.py","file_name":"spotlogin_api.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384062590","text":"\"\"\"\nExample script for loading MNIST data\n\"\"\"\n\nimport sys\nsys.path += ['../data']\n\nfrom load_MNIST_images import load_MNIST_images\nfrom load_MNIST_labels import load_MNIST_labels\n\ndef main():\n # Load training data\n train_data = load_MNIST_images('../data/train-images.idx3-ubyte')\n train_label = load_MNIST_labels('../data/train-labels.idx1-ubyte')\n # Load testing data\n test_data = load_MNIST_images('../data/t10k-images.idx3-ubyte')\n test_label = load_MNIST_labels('../data/t10k-labels.idx1-ubyte')\n return train_data, train_label, test_data, test_label\n\nif __name__ == '__main__':\n main()\n","sub_path":"load_MNIST_data.py","file_name":"load_MNIST_data.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"253328037","text":"from flask import Flask, render_template, redirect, request, session\nfrom flask_pymongo import PyMongo\nfrom datetime import date\nfrom pymongo import MongoClient\nfrom flask_mongoengine import MongoEngine\n\napp = Flask(__name__)\n\n# app.config[\"MONGO_URI\"] = \"mongodb://localhost:27017/six_months_db\"\napp.config[\"MONGO_URI\"] = \"mongodb://54.175.159.57:27017/stock_db\"\n\nmongo = PyMongo(app)\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n@app.route('/all')\ndef read_all():\n users = mongo.db.six_months.find()\n output = {'All': []}\n # cycle through users\n for user in users:\n symbol = user['symbol']\n historical = user['historical']\n predictions = user['prediction']\n # put symbol in symbol\n out_one = {'symbol': symbol, 'historical': [], 'prediction': []}\n # cycle through historical to extract data\n for h in historical:\n # append formatted data to output\n out_one['historical'].append(h)\n # cycle through predictions to extract data \n for p in predictions:\n # append formatted data to output\n out_one['prediction'].append(p)\n\n output['All'].append(out_one)\n\n # print(output)\n return output\n \nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"Stock Project/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100266666","text":"\"\"\"\n改变通道 图片的 改变图片每个像素点每个通道的灰度值\n\"\"\"\nimport cv2 as cv\n\n\n# 遍历访问图片每个像素点,并修改相应的RGB\ndef access_pixels(image):\n print(image.shape)\n height = image.shape[0]\n width = image.shape[1]\n channels = image.shape[2]\n print(\"width: %s height: %s channels: %s\" % (width, height, channels))\n for row in range(height):\n for col in range(width):\n for c in range(channels):\n # 获取每个像素点的每个通道的数值\n pv = image[row, col, c]\n # 灰度值是0-255 这里是修改每个像素点每个通道灰度值\n image[row, col, c] = 255 - pv\n cv.imshow(\"second_image\", image)\n\n\ndef main():\n # blue, green, red\n src = cv.imread(\"../media/lena/lena.jpg\")\n cv.namedWindow('first_image', cv.WINDOW_AUTOSIZE)\n cv.imshow('first_image', src)\n # GetTickCount函数返回从操作系统启动到当前所经过的毫秒数\n t1 = cv.getTickCount()\n # 处理后的图片\n access_pixels(src)\n t2 = cv.getTickCount()\n # getTickFrequency 函数返回CPU的频率,就是每秒的计时周期数\n time = (t2 - t1) / cv.getTickFrequency()\n # 输出运行时间\n print(\"time : %s ms\" % (time * 1000))\n cv.waitKey(0)\n cv.destroyAllWindows()\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"cn/opencv/chapter01/02ChangeChannel.py","file_name":"02ChangeChannel.py","file_ext":"py","file_size_in_byte":1383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"503899458","text":"from manimlib.constants import YELLOW, FRAME_X_RADIUS\nimport numpy as np\nfrom manimlib.mobject.types.vectorized_mobject import VMobject\nfrom manimlib.mobject.geometry import Dot\nfrom manimlib.utils.config_ops import digest_config\n\n\nclass ParametricFunction(VMobject):\n CONFIG = {\n \"t_min\": 0,\n \"t_max\": 1,\n # TODO, be smarter about choosing this number\n \"step_size\": 0.01,\n \"dt\": 1e-8,\n # TODO, be smart about figuring these out?\n \"discontinuities\": [],\n \"smoothing\": True\n }\n\n def __init__(self, function, **kwargs):\n self.function = function\n VMobject.__init__(self, **kwargs)\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, t):\n return self.function(t)\n\n def generate_points(self):\n t_min, t_max = self.t_min, self.t_max\n dt = self.dt\n step_size = self.step_size\n\n discontinuities = filter(\n lambda t: t_min <= t <= t_max,\n self.discontinuities\n )\n discontinuities = np.array(list(discontinuities))\n boundary_times = [\n self.t_min, self.t_max,\n *(discontinuities - dt),\n *(discontinuities + dt),\n ]\n boundary_times.sort()\n for t1, t2 in zip(boundary_times[0::2], boundary_times[1::2]):\n t_range = list(np.arange(t1, t2, step_size))\n if t_range[-1] != t2:\n t_range.append(t2)\n points = np.array([self.function(t) for t in t_range])\n valid_indices = np.apply_along_axis(\n np.all, 1, np.isfinite(points)\n )\n points = points[valid_indices]\n if len(points) > 0:\n self.start_new_path(points[0])\n self.add_points_as_corners(points[1:])\n if self.smoothing:\n self.make_smooth()\n return self\n\n\nclass FunctionGraph(ParametricFunction):\n CONFIG = {\n \"color\": YELLOW,\n \"x_min\": -FRAME_X_RADIUS,\n \"x_max\": FRAME_X_RADIUS,\n }\n\n def __init__(self, function, **kwargs):\n digest_config(self, kwargs)\n self.parametric_function = \\\n lambda t: np.array([t, function(t), 0])\n ParametricFunction.__init__(\n self,\n self.parametric_function,\n t_min=self.x_min,\n t_max=self.x_max,\n **kwargs\n )\n self.function = function\n\n def get_function(self):\n return self.function\n\n def get_point_from_function(self, x):\n return self.parametric_function(x)\n\n\nclass DiscreteFunction(VMobject):\n CONFIG = {\n \"x_min\": 0,\n \"x_max\": 1,\n }\n\n def __init__(self, values, **kwargs):\n self.values = values\n VMobject.__init__(self, **kwargs)\n\n def set_values(self, values):\n self.values = values\n self.submobjects = []\n self.generate_points()\n\n def get_values(self):\n return self.values\n\n def generate_points(self):\n self.clear_points()\n self.start_new_path(self.values[0])\n self.add_points_as_corners(self.values[1:])\n\n for p in self.values:\n dot = Dot(p, fill_color=self.color)\n self.add(dot)\n\n return self\n","sub_path":"manimlib/mobject/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"236087340","text":"import numpy as np\nfrom scipy.constants import c, mu_0, epsilon_0\n\nclass Structure(object):\n\n\tdef __init__(self,Space):\n\t\t\"\"\"Define structure object.\n\n\t\tThis script is not perfect because it cannot put dispersive materials.\n\t\tOnly simple isotropic dielectric materials are possible.\n\t\t\"\"\"\n\t\n\t\tself.Space = Space\n\nclass Box(Structure):\n\tdef __init__(self, Space, srt, end, eps_r, mu_r):\n\t\t\"\"\"Place a rectangle inside of a simulation space.\n\t\t\n\t\tArgs:\n\n\t\t\teps_r : float\n\t\t\t\t\tRelative electric constant or permitivity.\n\n\t\t\tmu_ r : float\n\t\t\t\t\tRelative magnetic constant or permeability.\n\t\t\t\t\n\t\t\tsize : a list or tuple (iterable object) of ints\n\t\t\t\t\tx: height, y: width, z: thickness of a box.\n\n\t\t\tloc : a list or typle (iterable objext) of ints\n\t\t\t\t\tx : x coordinate of bottom left upper coner\n\t\t\t\t\ty : y coordinate of bottom left upper coner\n\t\t\t\t\tz : z coordinate of bottom left upper coner\n\n\t\tReturns:\n\t\t\tNone\n\n\t\t\"\"\"\n\n\t\tself.eps_r = eps_r\n\t\tself.mu_r = mu_r\n\n\t\tStructure.__init__(self, Space)\n\n\t\tassert len(srt) == 3, \"Only 3D material is possible.\"\n\t\tassert len(end) == 3, \"Only 3D material is possible.\"\n\n\t\tassert type(eps_r) == float, \"Only isotropic media is possible. eps_r must be a single float.\"\t\n\t\tassert type( mu_r) == float, \"Only isotropic media is possible. mu_r must be a single float.\"\t\n\n\t\t# Start index of the structure.\n\t\txsrt = srt[0]\n\t\tysrt = srt[1]\n\t\tzsrt = srt[2]\n\n\t\t# End index of the structure.\n\t\txend = end[0]\n\t\tyend = end[1]\n\t\tzend = end[2]\n\n\t\tassert xsrt < xend\n\t\tassert ysrt < yend\n\t\tassert zsrt < zend\n\n\t\tum = 1e-6\n\t\tnm = 1e-9\n\n\t\tSpace.MPIcomm.barrier()\n\n\t\tif Space.MPIrank == 0:\n\t\t\tprint(\"Box size: x={} um, y={} um, z={:.3f} um\" .format((xend-xsrt)*Space.dx/um, (yend-ysrt)*Space.dy/um, (zend-zsrt)*Space.dz/um))\n\n\t\tMPIrank = self.Space.MPIrank\n\t\tMPIsize = self.Space.MPIsize\n\n\t\t# Global x index of each node.\n\t\tnode_xsrt = self.Space.myNx_indice[MPIrank][0]\n\t\tnode_xend = self.Space.myNx_indice[MPIrank][1]\n\n\t\tif xend < node_xsrt:\n\t\t\tself.global_loc = None\n\t\t\tself. local_loc = None\n\t\tif xsrt < node_xsrt and xend >= node_xsrt and xend <= node_xend:\n\t\t\tself.global_loc = ((node_xsrt , ysrt, zsrt), ( xend , yend, zend))\n\t\t\tself. local_loc = ((node_xsrt-node_xsrt, ysrt, zsrt), ( xend-node_xsrt, yend, zend))\n\t\tif xsrt < node_xsrt and xend > node_xend:\n\t\t\tself.global_loc = ((node_xsrt , ysrt, zsrt), (node_xend , yend, zend))\n\t\t\tself. local_loc = ((node_xsrt-node_xsrt, ysrt, zsrt), (node_xend-node_xsrt, yend, zend))\n\t\tif xsrt >= node_xsrt and xsrt < node_xend and xend < node_xend:\n\t\t\tself.global_loc = (( xsrt , ysrt, zsrt), ( xend , yend, zend))\n\t\t\tself. local_loc = (( xsrt-node_xsrt, ysrt, zsrt), ( xend-node_xsrt, yend, zend))\n\t\tif xsrt >= node_xsrt and xsrt < node_xend and xend >= node_xend:\n\t\t\tself.global_loc = (( xsrt , ysrt, zsrt), (node_xend , yend, zend))\n\t\t\tself. local_loc = (( xsrt-node_xsrt, ysrt, zsrt), (node_xend-node_xsrt, yend, zend))\n\t\tif xsrt >= node_xend:\n\t\t\tself.global_loc = None\n\t\t\tself. local_loc = None\n\t\t\n\t\tself.Space.MPIcomm.Barrier()\n\n\t\tif self.global_loc != None:\n\t\t\tself.local_size = (self.local_loc[1][0] - self.local_loc[0][0], yend-ysrt, zend-zsrt)\n\t\t\t#print(\"rank {:>2}: x idx of a Box >>> global \\\"{:4d},{:4d}\\\" and local \\\"{:4d},{:4d}\\\"\" \\\n\t\t\t#\t.format(MPIrank, self.global_loc[0][0], self.global_loc[1][0], self.local_loc[0][0], self.local_loc[1][0]))\n\n\t\t\tloc_xsrt = self.local_loc[0][0]\n\t\t\tloc_ysrt = self.local_loc[0][1]\n\t\t\tloc_zsrt = self.local_loc[0][2]\n\n\t\t\tloc_xend = self.local_loc[1][0]\n\t\t\tloc_yend = self.local_loc[1][1]\n\t\t\tloc_zend = self.local_loc[1][2]\n\n\t\t\tself.Space.eps_HEE[loc_xsrt:loc_xend, loc_ysrt:loc_yend, loc_zsrt:loc_zend] = self.eps_r * epsilon_0\n\t\t\tself.Space.eps_EHH[loc_xsrt:loc_xend, loc_ysrt:loc_yend, loc_zsrt:loc_zend] = self.eps_r * epsilon_0\n\n\t\t\tself.Space. mu_HEE[loc_xsrt:loc_xend, loc_ysrt:loc_yend, loc_zsrt:loc_zend] = self. mu_r * mu_0\n\t\t\tself.Space. mu_EHH[loc_xsrt:loc_xend, loc_ysrt:loc_yend, loc_zsrt:loc_zend] = self. mu_r * mu_0\n\n\t\treturn\n\nclass Cone(Structure):\n\tdef __init__(self, Space, axis, height, radius, center, eps_r, mu_r):\n\t\t\"\"\"Place a rectangle inside of a simulation space.\n\t\t\n\t\tArgs:\n\t\t\tSpace : Space object\n\n\t\t\taxis : string\n\t\t\t\tA coordinate axis parallel to the center axis of the cone. Choose 'x','y' or 'z'.\n\n\t\t\theight : int\n\t\t\t\tA height of the cone in terms of index.\n\n\t\t\tradius : int\n\t\t\t\tA radius of the bottom of a cone.\n\n\t\t\tcenter : tuple\n\t\t\t\tA coordinate of the center of the bottom.\n\n\t\t\teps_r : float\n\t\t\t\t\tRelative electric constant or permitivity.\n\n\t\t\tmu_ r : float\n\t\t\t\t\tRelative magnetic constant or permeability.\n\n\t\tReturns:\n\t\t\tNone\n\n\t\t\"\"\"\n\n\t\tself.eps_r = eps_r\n\t\tself. mu_r = mu_r\n\n\t\tStructure.__init__(self, Space)\n\n\t\tassert self.Space.dy == self.Space.dz, \"dy and dz must be the same. For the other case, it is not developed yet.\"\n\t\tassert axis == 'x', \"Sorry, a cone parallel to the y and z axis are not developed yet.\"\n\n\t\tassert len(center) == 3, \"Please insert x,y,z coordinate of the center.\"\n\n\t\tassert type(eps_r) == float, \"Only isotropic media is possible. eps_r must be a single float.\"\t\n\t\tassert type( mu_r) == float, \"Only isotropic media is possible. mu_r must be a single float.\"\t\n\n\t\t# Global start index of the structure.\n\t\tgxsrt = center[0] - height\n\n\t\t# Global end index of the structure.\n\t\tgxend = center[0]\n\n\t\tassert gxsrt >= 0\n\n\t\tMPIrank = self.Space.MPIrank\n\t\tMPIsize = self.Space.MPIsize\n\n\t\t# Global x index of each node.\n\t\tnode_xsrt = self.Space.myNx_indice[MPIrank][0]\n\t\tnode_xend = self.Space.myNx_indice[MPIrank][1]\n\n\t\tif gxend < node_xsrt:\n\t\t\tself.gxloc = None\n\t\t\tself.lxloc = None\n\n\t\t\tportion_srt = None\n\t\t\tportion_end = None\n\t\t\tself.portion = None\n\n\t\t# Last part\n\t\tif gxsrt < node_xsrt and gxend >= node_xsrt and gxend <= node_xend:\n\t\t\tself.gxloc = (node_xsrt , gxend )\n\t\t\tself.lxloc = (node_xsrt-node_xsrt, gxend-node_xsrt)\n\n\t\t\tportion_srt = height - (self.gxloc[1] - self.gxloc[0])\n\t\t\tportion_end = height\n\t\t\tself.portion = (portion_srt, portion_end) \n\n\t\t\tmy_lxloc = np.arange (self.lxloc[0], self.lxloc[1] )\n\t\t\tmy_height = np.linspace(portion_srt , portion_end, len(my_lxloc))\n\t\t\tmy_radius = (radius * my_height ) / height\n\n\t\t\tfor i in range(len(my_radius)):\n\t\t\t\tfor j in range(self.Space.Ny):\n\t\t\t\t\tfor k in range(self.Space.Nz):\n\n\t\t\t\t\t\tif ((j-center[1])**2 + (k-center[2])**2) <= (my_radius[i]**2):\n\n\t\t\t\t\t\t\tself.Space.eps_HEE[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\t\t\t\t\t\t\tself.Space.eps_EHH[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\n\t\t\t\t\t\t\tself.Space. mu_HEE[my_lxloc[i], j, k] = self. mu_r * mu_0\n\t\t\t\t\t\t\tself.Space. mu_EHH[my_lxloc[i], j, k] = self. mu_r * mu_0\n\n\t\t# Middle part\n\t\tif gxsrt <= node_xsrt and gxend >= node_xend:\n\t\t\tself.gxloc = (node_xsrt , node_xend )\n\t\t\tself.lxloc = (node_xsrt-node_xsrt, node_xend-node_xsrt)\n\n\t\t\tportion_srt = self.gxloc[0] - gxsrt\n\t\t\tportion_end = portion_srt + (node_xend - node_xsrt)\n\t\t\tself.portion = (portion_srt, portion_end) \n\n\t\t\tmy_lxloc = np.arange (self.lxloc[0], self.lxloc[1] )\n\t\t\tmy_height = np.linspace(portion_srt , portion_end, len(my_lxloc))\n\t\t\tmy_radius = (radius * my_height ) / height\n\n\t\t\tfor i in range(len(my_radius)):\n\t\t\t\tfor j in range(self.Space.Ny):\n\t\t\t\t\tfor k in range(self.Space.Nz):\n\n\t\t\t\t\t\tif ((j-center[1])**2 + (k-center[2])**2) <= (my_radius[i]**2):\n\n\t\t\t\t\t\t\tself.Space.eps_HEE[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\t\t\t\t\t\t\tself.Space.eps_EHH[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\n\t\t\t\t\t\t\tself.Space. mu_HEE[my_lxloc[i], j, k] = self. mu_r * mu_0\n\t\t\t\t\t\t\tself.Space. mu_EHH[my_lxloc[i], j, k] = self. mu_r * mu_0\n\n\t\t# First part but small\n\t\tif gxsrt >= node_xsrt and gxsrt <= node_xend and gxend <= node_xend:\n\t\t\tself.gxloc = (gxsrt , gxend )\n\t\t\tself.lxloc = (gxsrt-node_xsrt, gxend-node_xsrt)\n\n\t\t\tportion_srt = self.lxloc[0]\n\t\t\tportion_end = self.lxloc[1]\n\t\t\tself.portion = (portion_srt, portion_end) \n\n\t\t\tmy_lxloc = np.arange (self.lxloc[0], self.lxloc[1] )\n\t\t\tmy_height = np.linspace(portion_srt , portion_end, len(my_lxloc))\n\t\t\tmy_radius = (radius * my_height ) / height\n\n\t\t\tfor i in range(len(my_radius)):\n\t\t\t\tfor j in range(self.Space.Ny):\n\t\t\t\t\tfor k in range(self.Space.Nz):\n\n\t\t\t\t\t\tif ((j-center[1])**2 + (k-center[2])**2) <= (my_radius[i]**2):\n\n\t\t\t\t\t\t\tself.Space.eps_HEE[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\t\t\t\t\t\t\tself.Space.eps_EHH[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\n\t\t\t\t\t\t\tself.Space. mu_HEE[my_lxloc[i], j, k] = self. mu_r * mu_0\n\t\t\t\t\t\t\tself.Space. mu_EHH[my_lxloc[i], j, k] = self. mu_r * mu_0\n\n\t\t# First part but big\n\t\tif gxsrt >= node_xsrt and gxsrt <= node_xend and gxend >= node_xend:\n\t\t\tself.gxloc = (gxsrt , node_xend )\n\t\t\tself.lxloc = (gxsrt-node_xsrt, node_xend-node_xsrt)\n\n\t\t\tportion_srt = 0\n\t\t\tportion_end = self.gxloc[1] - self.gxloc[0]\n\t\t\tself.portion = (portion_srt, portion_end) \n\n\t\t\tmy_lxloc = np.arange (self.lxloc[0], self.lxloc[1] )\n\t\t\tmy_height = np.linspace(portion_srt , portion_end, len(my_lxloc))\n\t\t\tmy_radius = (radius * my_height ) / height\n\n\t\t\tfor i in range(len(my_radius)):\n\t\t\t\tfor j in range(self.Space.Ny):\n\t\t\t\t\tfor k in range(self.Space.Nz):\n\n\t\t\t\t\t\tif ((j-center[1])**2 + (k-center[2])**2) <= (my_radius[i]**2):\n\n\t\t\t\t\t\t\tself.Space.eps_HEE[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\t\t\t\t\t\t\tself.Space.eps_EHH[my_lxloc[i], j, k] = self.eps_r * epsilon_0\n\n\t\t\t\t\t\t\tself.Space. mu_HEE[my_lxloc[i], j, k] = self. mu_r * mu_0\n\t\t\t\t\t\t\tself.Space. mu_EHH[my_lxloc[i], j, k] = self. mu_r * mu_0\n\n\t\tif gxsrt >= node_xend:\n\t\t\t\tself.gxloc = None\n\t\t\t\tself.lxloc = None\n\t\t\t\n\t\t\t\tportion_srt = None\n\t\t\t\tportion_end = None\n\t\t\t\tself.portion = None\n\t\t\"\"\"\n\t\tif self.gxloc != None:\n\t\t\tprint('rank: ', MPIrank)\n\t\t\tprint('Global loc: ', self.gxloc)\n\t\t\tprint('Local loc: ', self.lxloc)\n\t\t\tprint('height portion: ', self.portion)\n\t\t\tprint('Local loc array: ', my_lxloc, len(my_lxloc))\n\t\t\tprint('my height array: ', my_height, len(my_height))\n\t\t\tprint('my radius array: ', my_radius, len(my_radius))\n\n\t\t#print(MPIrank, self.portion, self.gxloc, self.lxloc)\n\t\t\"\"\"\n\t\tself.Space.MPIcomm.Barrier()\n\n\t\treturn\n\n\nclass Sphere(Structure):\n\n\tdef __init__(self, Space, center, radius, eps_r, mu_r, sigma):\n\n\t\tif Space.rank == 0:\n\n\t\t\tStructure.__init__(self, Space)\n\n\t\t\tx = center[0]\n\t\t\ty = center[1]\n\t\t\tz = center[2]\n\n\t\t\tfor k in range(self.gridz):\n\t\t\t\tfor j in range(self.gridy):\n\t\t\t\t\tfor i in range(self.gridx):\n\t\t\t\t\t\tif ((i-x)**2 + (j-y)**2 + (k-z)**2) < (radius**2):\n\n\t\t\t\t\t\t\tself.space_eps_on[i,j,k] *= eps_r\n\t\t\t\t\t\t\tself.space_mu_on [i,j,k] *= mu_r\n\n\t\t\t\t\t\t\tself.Esigma_onx[i,j,k] = sigma\n\t\t\t\t\t\t\tself.Esigma_ony[i,j,k] = sigma\n\t\t\t\t\t\t\tself.Esigma_onz[i,j,k] = sigma\n\n\t\t\t\t\t\t\tself.space_eps_off[i,j,k] *= eps_r\n\t\t\t\t\t\t\tself.space_mu_off [i,j,k] *= mu_r\n\n\t\t\t\t\t\t\tself.Esigma_offx[i,j,k] = sigma\n\t\t\t\t\t\t\tself.Esigma_offy[i,j,k] = sigma\n\t\t\t\t\t\t\tself.Esigma_offz[i,j,k] = sigma\n\n\t\telse: pass\n\n\t\tSpace.MPIcomm.Barrier()\n\n\t\treturn\n","sub_path":"pyctypes/PSTD.rfft.diel.CPML/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":10922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"638154106","text":"import torch\nimport itertools\nfrom util.image_pool import ImagePool\nfrom .base_model import BaseModel\nfrom . import networks\nfrom . import vgg19_model\n\n\nclass CycMaskCOGANModel(BaseModel):\n def name(self):\n return 'CycMaskCOGANModel'\n\n @staticmethod\n def modify_commandline_options(parser, is_train=True):\n # default CycleGAN did not use dropout\n parser.set_defaults(no_dropout=True)\n if is_train:\n parser.add_argument('--lambda_A', type=float, default=10.0, help='weight for cycle loss (A -> B -> A)')\n parser.add_argument('--lambda_B', type=float, default=10.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_CO_A', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--lambda_CO_B', type=float, default=1.0, help='weight for cycle loss (B -> A -> B)')\n parser.add_argument('--tau', type=float, default=0.1, help='threshold for mask')\n parser.add_argument('--lambda_GAN', type=float, default=1.0, help='threshold for mask')\n\n return parser\n\n def initialize(self, opt):\n BaseModel.initialize(self, opt)\n\n # specify the training losses you want to print out. The program will call base_model.get_current_losses\n self.loss_names = ['D_A', 'G_AB', 'cycle_A', 'coseg_A', 'D_B', 'G_BA', 'cycle_B', 'coseg_B']\n # specify the images you want to save/display. The program will call base_model.get_current_visuals\n visual_names_A = ['real_A', 'mask_A_to_show', 'real_bg_A', 'real_fg_A', 'fake_fg_B', 'fake_B', 'mask_fakeB', 'fake_bg_B', 'rec_fg_fakeB', 'rec_fg_A', 'rec_A']\n visual_names_B = ['real_B', 'mask_B_to_show', 'real_bg_B', 'real_fg_B', 'fake_fg_A', 'fake_A', 'mask_fakeA', 'fake_bg_A', 'rec_fg_fakeA', 'rec_fg_B', 'rec_B']\n\n self.visual_names = [visual_names_A, visual_names_B]\n # specify the models you want to save to the disk. The program will call base_model.save_networks and base_model.load_networks\n if self.isTrain:\n self.model_names = ['A_A', 'A_B', 'G_AB', 'G_BA', 'D_A', 'D_B']\n else: # during test time, only load Gs\n self.model_names = ['A_A', 'A_B', 'G_A', 'G_B']\n\n # load/define networks\n # The naming conversion is different from those used in the paper\n # Code (paper): G_A (G), G_B (F), D_A (D_Y), D_B (D_X)\n self.netG_AB = networks.define_G(opt.input_nc, opt.output_nc,\n opt.ngf, opt.which_model_netG, 'none', not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netG_BA = networks.define_G(opt.output_nc, opt.input_nc,\n opt.ngf, opt.which_model_netG, 'none', not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netA_A = networks.define_A(opt.input_nc, opt.output_nc,\n opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n self.netA_B = networks.define_A(opt.input_nc, opt.output_nc,\n opt.ngf, opt.which_model_netG, opt.norm, not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)\n\n\n self.netE = vgg19_model.define_Vgg19(opt)\n\n if self.isTrain:\n use_sigmoid = opt.no_lsgan\n opt.start_thresh_epoch = 50\n\n self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.which_model_netD,\n opt.n_layers_D, opt.norm, use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n\n if opt.continue_train:\n if int(opt.which_epoch) > opt.start_thresh_epoch:\n self.netD_A = networks.define_D(opt.output_nc, opt.ndf, opt.which_model_netD,\n opt.n_layers_D, 'none', use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n self.netD_B = networks.define_D(opt.input_nc, opt.ndf, opt.which_model_netD,\n opt.n_layers_D, 'none', use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n\n\n if self.isTrain:\n self.fake_A_pool = ImagePool(opt.pool_size)\n self.fake_A_thresh_pool = ImagePool(opt.pool_size)\n self.fake_B_pool = ImagePool(opt.pool_size)\n self.fake_B_thresh_pool = ImagePool(opt.pool_size)\n # define loss functions\n self.criterionGAN = networks.GANLoss(use_lsgan=not opt.no_lsgan).to(self.device)\n self.criterionCycle = torch.nn.L1Loss()\n self.criterionIdt = torch.nn.L1Loss()\n self.criterionCOSEG = networks.COSEGLoss()\n # initialize optimizers\n self.optimizer_A = torch.optim.Adam(itertools.chain(self.netA_B.parameters(), self.netA_A.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_G = torch.optim.Adam(itertools.chain(self.netG_AB.parameters(), self.netG_BA.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n self.optimizers = []\n self.optimizers.append(self.optimizer_A)\n self.optimizers.append(self.optimizer_G)\n self.optimizers.append(self.optimizer_D)\n\n def set_input(self, input):\n AtoB = self.opt.which_direction == 'AtoB'\n self.real_A = input['A' if AtoB else 'B'].to(self.device)\n self.real_B = input['B' if AtoB else 'A'].to(self.device)\n self.image_paths = input['A_paths' if AtoB else 'B_paths']\n\n def forward(self):\n self.mask_A = self.netA_A(self.real_A)\n self.mask_A_to_show = 2 * (self.mask_A - 0.5)\n self.real_fg_A = torch.mul(self.real_A, self.mask_A)\n self.real_bg_A = torch.mul(self.real_A, 1 - self.mask_A)\n\n self.real_fg_A_features = self.netE(self.real_fg_A)\n self.real_bg_A_features = self.netE(self.real_bg_A)\n\n self.fake_fg_B = self.netG_AB(self.real_fg_A)\n self.fake_B = self.real_bg_A + torch.mul(self.fake_fg_B, self.mask_A)\n\n self.mask_fakeB = self.netA_B(self.fake_B)\n self.mask_fakeB_to_show = 2 * (self.mask_fakeB - 0.5)\n self.fake_bg_B = torch.mul(self.fake_B, 1 - self.mask_fakeB)\n self.rec_fg_fakeB = torch.mul(self.fake_B, self.mask_fakeB)\n self.rec_fg_A = torch.mul(self.netG_BA(self.rec_fg_fakeB), self.mask_fakeB)\n self.rec_A = self.rec_fg_A + self.fake_bg_B\n\n self.fake_B_thresh = torch.mul(self.fake_B, (self.mask_A > self.opt.tau).float())\n self.real_A_thresh = torch.mul(self.real_A, (self.mask_A > self.opt.tau).float())\n\n # self.rec_fg_A = self.netG_BA(self.fake_fg_B)\n\n self.mask_B = self.netA_B(self.real_B)\n self.mask_B_to_show = 2 * (self.mask_B - 0.5)\n self.real_fg_B = torch.mul(self.real_B, self.mask_B)\n self.real_bg_B = torch.mul(self.real_B, 1 - self.mask_B)\n self.fake_fg_A = self.netG_BA(self.real_fg_B)\n\n self.real_fg_B_features = self.netE(self.real_fg_B)\n self.real_bg_B_features = self.netE(self.real_bg_B)\n\n self.fake_A = self.real_bg_B + torch.mul(self.fake_fg_A, self.mask_B)\n self.mask_fakeA = self.netA_A(self.fake_A)\n self.fake_bg_A = torch.mul(self.fake_A, 1 - self.mask_fakeA)\n self.rec_fg_fakeA = torch.mul(self.fake_A, self.mask_fakeA)\n self.rec_fg_B = torch.mul(self.netG_AB(self.rec_fg_fakeA), self.mask_fakeA)\n self.rec_B = self.rec_fg_B + self.fake_bg_A\n # self.rec_fg_B = self.netG_AB(self.fake_fg_A)\n\n self.fake_A_thresh = torch.mul(self.fake_A, (self.mask_B > self.opt.tau).float())\n self.real_B_thresh = torch.mul(self.real_B, (self.mask_B > self.opt.tau).float())\n\n\n def backward_D_basic(self, netD, real, fake):\n # Real\n pred_real = netD(real)\n loss_D_real = self.criterionGAN(pred_real, True)\n # Fake\n pred_fake = netD(fake.detach())\n loss_D_fake = self.criterionGAN(pred_fake, False)\n # Combined loss\n loss_D = (loss_D_real + loss_D_fake) * 0.5\n # backward\n loss_D.backward()\n return loss_D\n\n def backward_D_B(self, to_crop=False):\n if to_crop:\n fake_B = self.fake_B_thresh_pool.query(self.fake_B_thresh)\n real_B = self.real_B_thresh\n else:\n fake_B = self.fake_B_pool.query(self.fake_B)\n real_B = self.real_B\n\n self.loss_D_B = self.backward_D_basic(self.netD_B, real_B, fake_B)\n\n def backward_D_A(self, to_crop=False):\n if to_crop:\n fake_A = self.fake_A_thresh_pool.query(self.fake_A_thresh)\n real_A = self.real_A_thresh\n else:\n fake_A = self.fake_A_pool.query(self.fake_A)\n real_A = self.real_A\n\n self.loss_D_A = self.backward_D_basic(self.netD_A, real_A, fake_A)\n\n def backward_G(self, to_crop=False):\n lambda_A = self.opt.lambda_A\n lambda_B = self.opt.lambda_B\n lambda_CO_A = self.opt.lambda_CO_A\n lambda_CO_B = self.opt.lambda_CO_B\n lambda_GAN = self.opt.lambda_GAN\n\n if to_crop:\n self.loss_G_BA = self.criterionGAN(self.netD_A(self.fake_A_thresh), True) * lambda_GAN\n self.loss_G_AB = self.criterionGAN(self.netD_B(self.fake_B_thresh), True) * lambda_GAN\n else:\n self.loss_G_BA = self.criterionGAN(self.netD_A(self.fake_A), True) * lambda_GAN\n self.loss_G_AB = self.criterionGAN(self.netD_B(self.fake_B), True) * lambda_GAN\n\n # Forward cycle loss\n self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A\n # Backward cycle loss\n self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B\n\n # Features loss\n self.loss_coseg_A = self.criterionCOSEG(self.real_bg_A_features, self.real_fg_A_features) * lambda_CO_A\n self.loss_coseg_B = self.criterionCOSEG(self.real_bg_B_features, self.real_fg_B_features) * lambda_CO_B\n\n # combined loss\n self.loss_G = self.loss_G_AB + self.loss_G_BA + self.loss_cycle_A + self.loss_cycle_B + self.loss_coseg_A + self.loss_coseg_B\n self.loss_G.backward()\n\n def optimize_parameters(self, to_crop=False):\n # forward\n self.forward()\n # G_A and G_B\n self.set_requires_grad([self.netD_A, self.netD_B], False)\n self.optimizer_A.zero_grad()\n self.optimizer_G.zero_grad()\n self.backward_G(to_crop)\n self.optimizer_A.step()\n self.optimizer_G.step()\n # D_A and D_B\n self.set_requires_grad([self.netD_A, self.netD_B], True)\n self.optimizer_D.zero_grad()\n self.backward_D_A(to_crop)\n self.backward_D_B(to_crop)\n self.optimizer_D.step()\n\n def reset_D(self, which_epoch='latest'):\n opt = self.opt\n use_sigmoid = opt.no_lsgan\n netD_A_temp = networks.define_D(opt.output_nc, opt.ndf, opt.which_model_netD,\n opt.n_layers_D, 'none', use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n netD_B_temp = networks.define_D(opt.input_nc, opt.ndf, opt.which_model_netD,\n opt.n_layers_D, 'none', use_sigmoid, opt.init_type, opt.init_gain, self.gpu_ids)\n\n\n params_D_A = self.netD_A.named_parameters()\n params_D_B = self.netD_B.named_parameters()\n params_new_D_A = netD_A_temp.named_parameters()\n params_new_D_B = netD_B_temp.named_parameters()\n\n dict_params_new_D_A = dict(params_new_D_A)\n dict_params_new_D_B = dict(params_new_D_B)\n\n for name1, param1 in params_D_A:\n if name1 in dict_params_new_D_A:\n print(\"copy\", name1)\n dict_params_new_D_A[name1].data.copy_(param1.data)\n\n for name1, param1 in params_D_B:\n if name1 in dict_params_new_D_B:\n print(\"copy\", name1)\n dict_params_new_D_B[name1].data.copy_(param1.data)\n\n self.netD_A = netD_A_temp\n self.netD_B = netD_B_temp\n\n self.optimizer_D = torch.optim.Adam(itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),\n lr=opt.lr, betas=(opt.beta1, 0.999))\n\n self.optimizers[2] = self.optimizer_D\n\n def update_learning_rate(self, epoch, initial_epoch=50, ratio=100):\n new_lr = self.opt.lr - self.opt.lr * (epoch - initial_epoch) / initial_epoch\n for param_group in self.optimizer_A.param_groups:\n param_group['lr'] = new_lr / ratio\n for param_group in self.optimizer_G.param_groups:\n param_group['lr'] = new_lr\n for param_group in self.optimizer_D.param_groups:\n param_group['lr'] = new_lr\n","sub_path":"models/cycMask_coGAN_model.py","file_name":"cycMask_coGAN_model.py","file_ext":"py","file_size_in_byte":13257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611454698","text":"from django.conf.urls import patterns, url\nfrom home import views\n\nurlpatterns = [\n # Examples:\n # url(r'^$', 'proyecto_django.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', views.index_view, name='index_view'),\n url(r'^login/', views.login_view, name='login_view'),\n url(r'^logout/$', views.logout_view, name='logout_view'),\n url(r'^formulario/$', views.formulario_view, name='vista_formulario'),\n #url(r'^registro/$', views.registro_view, name='registro_view'),\n]\n","sub_path":"home/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"195117818","text":"\"\"\"\nthis file will include all the view endpoints for the application.\n\"\"\"\nimport os\nimport datetime\nimport json\nfrom flask import jsonify, make_response, request, url_for, send_from_directory\nfrom flask_jwt_extended import (create_access_token, create_refresh_token,\n get_jwt_identity, jwt_refresh_token_required,\n jwt_required, get_raw_jwt)\nfrom flask_restful import Resource\nfrom werkzeug.utils import secure_filename\nfrom .models import User, Incident, RevokeToken\nfrom .validators import IncidentEditSchema, IncidentSchema, UserSchema\nfrom flask_mail import Message, Mail\nfrom flask import current_app\n\nUPLOAD_FOLDER = os.path.abspath(\"app/uploads\")\nALLOWED_EXTENSIONS = set(['mp4', 'png', 'jpg', 'jpeg'])\n\n\nclass BaseEndpoint(Resource):\n def __init__(self):\n \"\"\" Sets up the base reusable variables accross the views\"\"\"\n self.u = User()\n self.i = Incident()\n self.mail = Mail(current_app)\n\n @staticmethod\n def allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\n\nclass SignUpEndpoint(BaseEndpoint):\n \"\"\"\n A resource that provides the endpoint POST /signup.\n\n \"\"\"\n\n def post(self):\n \"\"\"\n Registers new users based on data sent\n \"\"\"\n data = request.get_json(force=True)\n user_data, error = UserSchema().load(data)\n\n if error:\n return make_response(jsonify({\n \"message\": \"Missing or invalid field members\",\n \"required\": error}), 400)\n\n if 'isAdmin' in user_data:\n success = self.u.save(\n user_data[\"first_name\"],\n user_data[\"last_name\"],\n user_data[\"other_names\"],\n user_data[\"phonenumber\"],\n user_data[\"email\"],\n user_data[\"username\"],\n user_data[\"password\"],\n user_data[\"isAdmin\"]\n )\n else:\n success = self.u.save(\n user_data[\"first_name\"],\n user_data[\"last_name\"],\n user_data[\"other_names\"],\n user_data[\"phonenumber\"],\n user_data[\"email\"],\n user_data[\"username\"],\n user_data[\"password\"]\n )\n if success:\n msg = Message('Welcome', recipients=[user_data['email']])\n msg.body = \"Welcome to iReporter, your sign up was successful\"\n self.mail.send(msg)\n return make_response(jsonify({\n \"message\": \"Sign Up successful. Welcome!\"}\n ), 201)\n\n return make_response(jsonify({\"message\": \"Username/Email already exists\"}), 400)\n\n\nclass LoginEndpoint(BaseEndpoint):\n \"\"\" This endpoints handles all login posts POST /login\"\"\"\n\n def post(self):\n \"\"\" Accepts login credentials and return success on succcessful authentication\"\"\"\n\n data = request.get_json(force=True)\n user_data, error = UserSchema(\n only=('username', 'password',)).load(data)\n if error:\n return make_response(jsonify({\n \"message\": \"Missing or invalid field members\",\n \"required\": error}), 400)\n\n result = self.u.get_user(user_data['username'])\n\n if result == False or result == None:\n return make_response(jsonify({\"message\": \"Login Failed, Incorrect Username/Password!\"}), 401)\n\n if self.u.check_encrypted_password(user_data['password'], result['password']):\n return make_response(jsonify({\n \"message\": \"Login Success!\",\n \"refresh_token\": create_refresh_token(identity=user_data[\"username\"]),\n \"access_token\": create_access_token(identity=user_data[\"username\"], expires_delta=False)}), 200)\n\n return make_response(jsonify({\"message\": \"Login Failed, Incorrect Username/Password!\"}), 401)\n\n\nclass LogoutEndpoint(BaseEndpoint):\n \"\"\" This endpoint handles User logout and blacklisting of that access token\"\"\"\n @jwt_required\n def post(self):\n jti = get_raw_jwt()['jti']\n if RevokeToken().add(jti):\n return make_response(jsonify({\n \"message\": \"Successfully logged out!\",\n \"status\": 200\n }), 200)\n\n\nclass RefreshTokenEndpoint(Resource):\n \"\"\"Returns the a new refresh token\"\"\"\n\n @jwt_refresh_token_required\n def post(self):\n current_user = get_jwt_identity()\n access_token = create_access_token(identity=current_user)\n return make_response(jsonify({\n 'message': \"New access token created\",\n 'access_token': access_token}),\n 201)\n\n\nclass AllIncidentsEndpoint(BaseEndpoint):\n \"\"\"Allows for getting all incidents and posting of any new one\"\"\"\n\n @jwt_required\n def post(self):\n \"\"\"Endpoint POST /incidents\n Allows creation of new incidents\"\"\"\n if request.form:\n data = request.form\n else:\n data = json.loads(request.data)\n incident_data, error = IncidentSchema(\n only=('incidentType', 'location', 'comment',)\n ).load(data)\n if error:\n return make_response(jsonify({\n \"message\": \"Missing or invalid field members\",\n \"required\": error}), 400)\n\n user = get_jwt_identity()\n createdBy = self.u.get_user(user)['id']\n videos = []\n images = []\n if 'image' in request.files:\n files = request.files.getlist('image')\n for file in files:\n if file and self.allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n images.append(url_for('uploaded_file', filename=filename))\n\n if 'video' in request.files:\n files = request.files.getlist('video')\n for file in files:\n if file and self.allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(UPLOAD_FOLDER, filename))\n if filename.rsplit('.', 1)[1].lower() == 'mp4':\n videos.append(\n url_for('uploaded_file', filename=filename))\n\n success = self.i.save(\n incident_data['incidentType'],\n incident_data['comment'],\n incident_data['location'],\n createdBy,\n images,\n videos,\n )\n if success:\n return make_response(jsonify({\n \"message\": \"New incident created\",\n \"status\": 201}\n ), 201)\n\n return make_response(jsonify({\n \"message\": \"Incident can only be red-flag/intervention\",\n \"status\": 400}), 400)\n\n @jwt_required\n def get(self):\n \"\"\"Endpoint GET /incidents.\n Returns list of all incidents\"\"\"\n\n user = get_jwt_identity()\n createdBy = self.u.get_user(user)['id']\n isAdmin = self.u.get_user(user)['isadmin']\n if isAdmin:\n results = self.i.get_all()\n else:\n results = self.i.get_incidents(createdBy)\n if not results:\n return make_response(jsonify({\"message\": \"No incidents\"}))\n return make_response(jsonify(results), 200)\n\n\nclass IncidentEndpoint(BaseEndpoint):\n @jwt_required\n def get(self, incidentId):\n \"\"\"\n GET /incident/\n Returns a single instance\n \"\"\"\n try:\n incidentId = int(incidentId)\n except ValueError:\n return make_response(jsonify({\n \"message\": \"Failed! incidentId is not an id\",\n \"status\": 400\n }), 400)\n\n user = get_jwt_identity()\n createdBy = self.u.get_user(user)['id']\n results = self.i.get_incident(incidentId, createdBy)\n if results == False or results is None:\n return make_response(jsonify({\n \"message\": \"No incident by that id/ Not owned\",\n \"status\": 404\n }), 404)\n return make_response(jsonify(results), 200)\n\n @jwt_required\n def delete(self, incidentId):\n \"\"\"\n DELETE /incident/\n deletes a single instance\n \"\"\"\n try:\n incidentId = int(incidentId)\n except ValueError:\n return make_response(jsonify({\n \"message\": \"Failed! incidentId is not an id\",\n \"status\": 400}), 400)\n\n user = get_jwt_identity()\n createdBy = self.u.get_user(user)['id']\n exists_owned = self.i.get_incident(incidentId, createdBy)\n\n if exists_owned == False or exists_owned is None:\n return make_response(jsonify({\"message\": \"Forbiden cannot delete,record may not exist\",\n \"status\": 403}), 403)\n\n result = self.i.delete(incidentId, createdBy)\n if result:\n return make_response(jsonify({\n \"message\": \"Incident record has been deleted\",\n \"status\": 200}\n ), 200)\n\n\nclass IncidentEditCommentEndpoint(BaseEndpoint):\n \"\"\"\n Endpoint PUT /incident/1/comment\n Allows for editing the comment on an incident\n \"\"\"\n @jwt_required\n def put(self, incidentId):\n \"\"\"Allows for editing the comment on an incident\"\"\"\n try:\n incidentId = int(incidentId)\n except ValueError:\n return make_response(jsonify({\n \"message\": \"Failed! incidentId is not an id\",\n \"status\": 400}), 400)\n\n user = get_jwt_identity()\n createdBy = self.u.get_user(user)['id']\n data = request.get_json(force=True)\n incident_data = IncidentEditSchema(\n only=('comment',)).load(data)\n if incident_data.errors:\n return make_response(jsonify({\n \"message\": \"Comment is not present\",\n \"status\": 400,\n \"required\": incident_data.errors}),\n 400)\n\n exists_owned = self.i.validate_edit(incidentId, createdBy)\n\n if exists_owned == False or exists_owned is None:\n return make_response(jsonify({\n \"message\": \"Forbidden: Record not owned/ Not in draft status\"}), 403)\n\n edit = self.i.edit_comment(incidentId, data['comment'], createdBy)\n if edit == True:\n return make_response(jsonify({\n 'message': \"Incident Updated\",\n }), 200)\n\n return make_response(jsonify({\n \"message\": \"Cannot update a record at the moment\"}), 403)\n\n\nclass IncidentEditLocationEndpoint(BaseEndpoint):\n \"\"\"\n Endpoint PUT /incident/1/location\n Allows for editing the location on an incident\n \"\"\"\n @jwt_required\n def put(self, incidentId):\n \"\"\" Allows for editing the location on an incident\"\"\"\n try:\n incidentId = int(incidentId)\n except ValueError:\n return make_response(jsonify({\"message\": \"Failed! incidentId is not an id\"}), 400)\n user = get_jwt_identity()\n createdBy = self.u.get_user(user)['id']\n data = request.get_json(force=True)\n incident_data = IncidentEditSchema(\n only=('location',)).load(data)\n if incident_data.errors:\n return make_response(jsonify({\n \"message\": \"location is not present\",\n \"required\": incident_data.errors}),\n 400)\n\n exists_owned = self.i.validate_edit(incidentId, createdBy)\n\n if exists_owned == False or exists_owned is None:\n return make_response(jsonify({\n \"message\": \"Forbidden: Record not owned/ Not in draft status\"}), 403)\n\n edit = self.i.edit_location(incidentId, data['location'], createdBy)\n if edit == True:\n return make_response(jsonify({\n 'message': \"Incident Updated\",\n }), 200)\n\n return make_response(jsonify({\n \"message\": \"Cannot update a record at the moment\"}), 403)\n\n\nclass AdminStatusEndpoint(BaseEndpoint):\n\n \"\"\"\n Endpoint PUT /incident/status\n Allows for and admin to update the status of a record\n \"\"\"\n @jwt_required\n def put(self, incidentId):\n try:\n incidentId = int(incidentId)\n except ValueError:\n return make_response(jsonify({\"message\": \"Failed! incidentId is not an id\"}), 400)\n data = request.get_json(force=True)\n incident_data = IncidentEditSchema(\n only=('status',)).load(data)\n\n if incident_data.errors:\n return make_response(jsonify({\n \"message\": \"status is not present\",\n \"required\": incident_data.errors}),\n 400)\n user = get_jwt_identity()\n\n isAdmin = self.u.get_user(user)['isadmin']\n\n if isAdmin == True:\n update = self.i.update_status(incidentId, data['status'])\n else:\n return make_response(jsonify({\n \"message\": \"Incident does not exist/ Not Admin\"\n }), 401)\n\n if update == True:\n return make_response(jsonify({\n 'message': 'Incident status updated',\n 'status': 200}),\n 200)\n return make_response(jsonify({\n \"message\": \"Status can only be draft,under-investigation,resolved or rejected\",\n }), 400)\n","sub_path":"app/api/v2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"570985178","text":"from flask import render_template , flash , redirect\nfrom app import app\nfrom .forms import LoginForm\n\n@app.route(\"/\")\n@app.route(\"/index\")\ndef index():\n\tuser = {\"nickname\" : \"笨齐齐\"}\n\tposts = [ # fake array of posts\n {\n 'author': { 'nickname': '笨齐齐' },\n 'body': 'I love you!'\n },\n {\n 'author': { 'nickname': '聪明机智还帅' },\n 'body': 'I love you too!'\n }\n ]\n\treturn render_template(\"index.html\",title=\"齐笨笨\",user=user,posts=posts)\n\n@app.route(\"/login\",methods = [\"GET\",\"POST\"])\ndef login():\n\tform = LoginForm()\n\tif form.validate_on_submit():\n\t\tflash('login requested for OpenId = \" ' + form.openid.data + ' \" , remember_me = ' + str (form.remember_me.data))\n\t\treturn redirect (\"/index\")\n\treturn render_template(\"login.html\",title = \"sign in\", form = form , providers = app.config[\"OPENID_PROVIDERS\"])\n\n","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330109673","text":"import matplotlib.dates as mat_dates\nimport matplotlib.pyplot as plt\n\nfrom dateutil import parser\n\n\nimport weather_analyse.constances as _c\n\n\ndef three_cities_humidity_analyse():\n # near\n y_ravenna = _c.df_ravenna['humidity']\n x_ravenna = _c.df_ravenna['day']\n y_faenza = _c.df_faenza['humidity']\n x_faenza = _c.df_faenza['day']\n y_cesena = _c.df_cesena['humidity']\n x_cesena = _c.df_cesena['day']\n # far\n y_milano = _c.df_milano['humidity']\n x_milano = _c.df_milano['day']\n y_asti = _c.df_asti['humidity']\n x_asti = _c.df_asti['day']\n y_torino = _c.df_torino['humidity']\n x_torino = _c.df_torino['day']\n\n day_ravenna = [parser.parse(x) for x in x_ravenna]\n day_faenza = [parser.parse(x) for x in x_faenza]\n day_cesena = [parser.parse(x) for x in x_cesena]\n\n day_milano = [parser.parse(x) for x in x_milano]\n day_asti = [parser.parse(x) for x in x_asti]\n day_torino = [parser.parse(x) for x in x_torino]\n hours = mat_dates.DateFormatter('%H:%M')\n fig, ax = plt.subplots()\n plt.xticks(rotation=70)\n\n ax.xaxis.set_major_formatter(hours)\n\n ax.plot(day_ravenna, y_ravenna, 'r', day_faenza, y_faenza, 'r', day_cesena, y_cesena, 'r',\n day_milano, y_milano, 'b', day_asti, y_asti, 'b', day_torino, y_torino, 'b')\n plt.show()\n\n\ndef max_humidity_plt():\n # 湿度和离海远近\n plt.plot(_c.dis, _c.hum_max, 'ro')\n plt.savefig('./max_hum')\n\n plt.plot(_c.dis, _c.hum_min, 'bo')\n plt.savefig('./min_hum')\n plt.show()\n\nif __name__ == '__main__':\n max_humidity_plt()\n","sub_path":"weather_analyse/humidity.py","file_name":"humidity.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"55347902","text":"from algoritmia.viewers.graph2d_viewer import Graph2dViewer\nfrom algoritmia.algorithms.traversers import dijkstra_edge_traverser, dijkstra_metric_edge_traverser\nfrom algoritmia.data.iberia import iberia2d, km2d, coords2d\n\nif __name__ == '__main__':\n v_initial = coords2d[\"Madrid\"]\n v_final = coords2d[\"Bilbao\"]\n\n edges = dijkstra_edge_traverser(iberia2d, km2d, v_initial)\n # edges = dijkstra_metric_edge_traverser(iberia2d, km2d, v_initial, v_final)\n colors = {}\n for (u, v) in edges:\n colors[v] = 'red'\n if v == v_final: break\n colors[v_initial] = 'palegreen'\n colors[v_final] = 'palegreen'\n\n gv = Graph2dViewer(iberia2d, canvas_width=800, canvas_height=800, vertexmode=Graph2dViewer.X_Y, colors=colors)\n gv.run()\n","sub_path":"src/algoritmia/viewers/demos/demo_metric_dijkstra_iberia.py","file_name":"demo_metric_dijkstra_iberia.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433695198","text":"'''\n.. moduleauthor:: Rasmus Diederichsen\n\nThis module contains the base definition for subscriber functionality. The\n:class:`~ikkuna.export.subscriber.Subscriber` class should be subclassed for adding new metrics.\n\n'''\nimport abc\nfrom collections import defaultdict\nimport ikkuna.visualization\nfrom ikkuna.export.messages import MessageBundle, TrainingMessage, ALLOWED_KINDS, get_default_bus\n\n\nclass Subscription(object):\n '''Specification for a subscription that can span multiple kinds and a tag.\n\n Attributes\n ----------\n _tag : str\n Tag for filtering the processed messages\n _subscriber : ikkuna.export.subscriber.Subscriber\n The subscriber associated with the subscription\n counter : dict(ikkuna.utils.NamedModule or str, int)\n Number of times the subscriber was called for each module label or meta data\n identifier. Since one :class:`ikkuna.export.subscriber.Subscriber` is associated\n with only one configuration of :class:`ikkuna.export.messages.MessageBundle`, this\n will enable proper subsampling of message streams.\n kinds : list(str)\n List of string identifiers for different message kinds. These are all the\n message kinds the subscriber wishes to receive\n _subsample : int\n Factor for subsampling incoming messages. Only every ``subsample``-th\n message will be processed.\n '''\n\n def __init__(self, subscriber, kinds, tag=None, subsample=1):\n '''\n Parameters\n ----------\n subscriber : ikkuna.export.subscriber.Subscriber\n Object that wants to receive the messages\n tag : str or None\n Optional tag for filtering messages. If ``None``, all messages will be\n relayed\n '''\n self._tag = tag\n self._subscriber = subscriber\n if not all(map(lambda k: k in ALLOWED_KINDS, kinds)):\n raise ValueError('Unknown message kind encountered.')\n self._kinds = kinds\n self._counter = defaultdict(int)\n self._subsample = subsample\n\n @property\n def counter(self):\n # caution: if you alter this dict, you're on your own\n return self._counter\n\n @property\n def kinds(self):\n return self._kinds\n\n def _handle_message(self, message):\n '''Process a newly arrived message. Subclasses should override this method for any special\n treatment.\n\n Parameters\n ----------\n message : ikkuna.export.messages.Message\n '''\n data = MessageBundle(message.key, message.kind)\n data.add_message(message)\n self._subscriber.process_message_bundle(data)\n\n def handle_message(self, message):\n '''Callback for receiving an incoming message.\n\n Parameters\n ----------\n message : ikkuna.export.messages.TrainingMessage\n '''\n if not (self._tag is None or self._tag == message.tag):\n return\n\n if message.kind not in self.kinds:\n return\n\n if isinstance(message, TrainingMessage):\n key = (message.module, message.kind)\n else:\n key = message.kind\n if self._counter[key] % self._subsample == 0:\n self._handle_message(message)\n self._counter[key] += 1\n\n\nclass SynchronizedSubscription(Subscription):\n '''A subscription which buffers messages and publishes a set of messages, each of a different\n kind, when one round (a train step) is over. This is useful for receiving several kinds of\n messages in each train step and always have them be processed together.'''\n\n def __init__(self, subscriber, kinds, tag=None, subsample=1):\n super().__init__(subscriber, kinds, tag, subsample)\n self._current_seq = None\n self._identifiers = {}\n self._step = None\n\n def _new_round(self, seq):\n '''Start a new round of buffering, clearing the previous cache and resetting the record for\n which kinds were received in this round.\n\n Parameters\n ----------\n seq : int\n Sequence number for the new round\n\n Raises\n ------\n RuntimeError\n If not all desired kinds have been received for all identifiers yet in the current round\n '''\n for bundle in self._identifiers.values():\n if not bundle.complete():\n raise RuntimeError(f'Bundle for module {bundle._module} not yet complete.')\n self._current_seq = seq\n self._identifiers = {}\n\n def _publish_complete(self):\n delete_these = []\n # any full? publish\n for identifier, message_bundle in self._identifiers.items():\n if message_bundle.complete():\n self._subscriber.process_message_bundle(message_bundle)\n delete_these.append(identifier)\n\n # purge published data\n for identifier in delete_these:\n del self._identifiers[identifier]\n\n def _handle_message(self, message):\n '''Start a new round if a new sequence number is seen.'''\n\n # if we get a new sequence number, a new train step must have begun\n if self._current_seq is None or self._current_seq != message.seq:\n self._new_round(message.seq)\n\n # module not seen -> init data\n key = message.key\n if key not in self._identifiers:\n self._identifiers[key] = MessageBundle(key, self.kinds)\n self._identifiers[key].add_message(message)\n\n self._publish_complete()\n\n\nclass Subscriber(abc.ABC):\n '''Base class for receiving and processing activations, gradients and other stuff into\n insightful metrics.'''\n\n def __init__(self, subscription, message_bus):\n '''\n Parameters\n ----------\n subscription : Subscription\n '''\n self._subscription = subscription\n self._msg_bus = message_bus\n\n @property\n def subscription(self):\n return self._subscription\n\n @property\n def message_bus(self):\n return self._msg_bus\n\n @abc.abstractmethod\n def compute(self, message_or_data):\n '''This is where the magic happens. Subclasses should override this method so that they can\n compute their metric upon reception of their desired messages or do whatever else they want.\n If interested in plotting, they should then use their\n :attr:`~ikkuna.export.subscriber.PlotSubscriber.backend` property to plot the metric (if\n they display line plots) and their ``message_bus`` to publish a new message with the metric.\n\n Parameters\n ----------\n message_or_data : ikkuna.export.messages.Message\n Can either be :class:`~ikkuna.export.messages.MetaMessage` if the\n Subscriber is not interested in actual training artifacts, or\n :class:`~ikkuna.export.messages.TrainingMessage`\n '''\n pass\n\n def receive_message(self, message):\n '''Process a message received from an :class:`~ikkuna.export.Exporter`.'''\n self._subscription.handle_message(message)\n\n def process_message_bundle(self, message_bundle):\n '''Callback for processing a :class:`~ikkuna.export.messages.MessageBundle` object with\n :class:`~ikkuna.export.messages.MetaMessage`\\ s or\n :class:`~ikkuna.export.messages.TrainingMessage`\\ s in it.\n\n Parameters\n ----------\n message_bundle : ikkuna.export.messages.MessageBundle\n The exact nature of this package is determined by the\n :class:`ikkuna.export.subscriber.Subscription` attached to this\n :class:`ikkuna.export.subscriber.Subscriber`.\n\n Raises\n ------\n ValueError\n If the received :class:`~ikkuna.export.messages.MessageBundle` object is not\n :meth:`~ikkuna.export.messages.MessageBundle.complete()`\n '''\n if not message_bundle.complete():\n raise ValueError(f'Data received for \"{message_bundle._module}\" is not complete.')\n\n self.compute(message_bundle)\n\n\nclass PlotSubscriber(Subscriber):\n '''Base class for subscribers that output scalar or histogram values per time and module\n\n Attributes\n ----------\n _backend : ikkuna.visualization.Backend\n Plotting backend\n '''\n\n def __init__(self, subscription, message_bus, plot_config, backend='tb', **tbx_params):\n '''\n Parameters\n ----------\n ylims : tuple(int, int)\n Optional Y-axis limits\n plot_config : dict\n Configuration parameters for plotting. Relevant keys are ``title``,\n ``xlabel``, ``ylabel`` and ``ylims``. Which of them are actually used\n depends on the :class:`~ikkuna.visualization.Backend`\n **tbx_params : dict\n Keywords for the :class:`tensorboardX.SummaryWriter`\n '''\n super().__init__(subscription, message_bus)\n\n self._backend = ikkuna.visualization.get_backend(backend, plot_config, **tbx_params)\n\n @property\n def backend(self):\n '''ikkuna.visualization.Backend: The backend to use for plotting'''\n return self._backend\n\n @abc.abstractmethod\n def compute(self, message_or_data):\n pass\n","sub_path":"ikkuna/export/subscriber/subscriber.py","file_name":"subscriber.py","file_ext":"py","file_size_in_byte":9574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"471257549","text":"from flask_caching import Cache\nfrom flask_migrate import Migrate\nfrom flask_sqlalchemy import SQLAlchemy\n\ndb = SQLAlchemy()\nmigrate = Migrate()\ncache = Cache(config={'CACHE_TYPE': 'redis','CACHE_KEY_PREFIX': 'flask(cache)'})\n\ndef init_ext(app):\n db.init_app(app)\n migrate.init_app(app=app,db=db)\n cache.init_app(app)\n","sub_path":"app/ext.py","file_name":"ext.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191803457","text":"from discord.ext import commands\nfrom os import environ\nfrom lib import db\nfrom models import CryptoModel, Crypto, UserModel\nfrom sqlalchemy.engine.url import URL\nimport asyncio\nfrom lib import EmbedMaker\nimport discord\nimport traceback\n\n\nwiki_commands = {\n \"info\": \"https://github.com/virtualCrypto-discord/virtualCrypto/wiki/Commands#vcinfo-通貨名\",\n \"pay\": \"https://github.com/virtualCrypto-discord/virtualCrypto/wiki/Commands#vcpaytipsend-メンション-通貨の単位-数量\",\n \"create\": \"https://github.com/virtualCrypto-discord/virtualCrypto/wiki/Commands#vccreate-通貨名-通貨の単位-10分に増える通貨の量\",\n \"give\": \"https://github.com/virtualCrypto-discord/virtualCrypto/wiki/Commands#vcgive-メンション-数量\",\n}\n\n\nclass VirtualCrypto(commands.Bot):\n def __init__(self) -> None:\n super().__init__(command_prefix=commands.when_mentioned_or(\"vc.\"), help_command=None)\n self.loop.create_task(self.init_db())\n self.loop.create_task(self.give_hold_batch())\n\n async def init_db(self):\n await db.set_bind(\n URL(\n drivername=\"postgresql\",\n username=environ.get(\"POSTGRES_USER\"),\n password=environ.get(\"POSTGRES_PASSWORD\"),\n host=\"virtualcrypto_postgres\",\n port=\"5430\",\n database=environ.get(\"POSTGRES_DB\")\n )\n )\n await db.gino.create_all()\n\n async def give_hold_batch(self):\n await self.wait_until_ready()\n while not self.is_closed():\n crypts = await CryptoModel().all()\n try:\n for crypto in crypts:\n if not crypto.distribution:\n continue\n all_amount = sum([i.amount for i in await UserModel().get_crypto_all(crypto.id)])\n guild = self.get_guild(crypto.id)\n if guild is None:\n continue\n online_count = len(\n [member for member in guild.members if\n member.status is not discord.Status.offline\n and member.status is not discord.Status.idle\n and not member.bot]\n )\n if all_amount + (online_count * 10) > crypto.max_amount:\n self.loop.create_task(\n crypto.update(\n hold=Crypto.hold + (crypto.max_amount - (online_count * 10)),\n distribution=False).apply()\n )\n continue\n\n self.loop.create_task(\n crypto.update(hold=Crypto.hold + (online_count * 10)).apply()\n )\n except Exception as e:\n await self.mention_error(e)\n await asyncio.sleep(60 * 60) # 60 minutes\n\n async def on_command_error(self, context: commands.Context, exception):\n if isinstance(exception, commands.BadArgument) or isinstance(exception, commands.MissingRequiredArgument):\n if context.command.name in wiki_commands.keys():\n await EmbedMaker(context).by_error_text(\"コマンドの引数が間違っています。こちらからご確認ください: \" + wiki_commands[context.command.name]).send()\n return\n if isinstance(exception, commands.CommandNotFound):\n return\n\n await self.mention_error(exception)\n\n async def mention_error(self, exception: Exception):\n embed = discord.Embed(\n title=str(exception),\n description=\"\\n\".join(traceback.format_exception(type(exception), exception, exception.__traceback__))[:1500]\n )\n await self.get_channel(757236120475009108).send(\"<@212513828641046529>\", embed=embed)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"526901290","text":"#! /usr/bin/env python\nimport re\nimport datetime\nfrom astropy.table import Table,Column\nfrom astropy.io import registry\nimport numpy as np\nfrom collections import OrderedDict, deque\nfrom astropy.coordinates import SkyCoord\nimport astropy.units as u\nimport json\nfrom functools import partial\nfrom pathlib import Path\n\nLEG_RE = re.compile('(Leg\\s\\d.+?)\\n\\n\\n', re.S)\nLEG_APPROACH_RE = re.compile('(Leg\\s\\d*\\s\\(Approach.+?)\\Z', re.S)\n\nLEG_TAB_RE = re.compile('(UTC\\s*MHdg.+?)(?:(?:Leg)|\\Z)', re.S)\n\nLEG_NAME_RE = re.compile('(Leg\\s\\d.+?)\\s(?:Start)', re.S)\nLEG_NUM_RE = re.compile('Leg\\s(\\d+)\\s')\n\nTHDG_RATE_RE = re.compile('THdg:.*(rate:)')\ndef rate_repl(match):\n return(match.group(0).replace('rate','rate2'))\n\nTOP_META_RE = re.compile('Filename:\\s(.*)\\sSaved:\\s(.*)')\nMISSION_META_RE = re.compile('Mission\\sSummary.*(Flight.+?)Leg\\s1', re.S)\nTOP_AIRPORT2_RE = re.compile('Landing:.*Airport:\\s(.*)')\n\nSUA_RE = re.compile('\\*\\**\\s?Potential\\sSUA.*\\nZone.*', re.S)\n\n#LEG_START_RE = re.compile('Start:\\s(.*?)(?:\\n\\n)', re.S)\n\nMETA_KEYS = ['Flight Plan ID','Start','Leg Dur','Alt.','ObspID','Blk','Priority','Obs Dur',\n 'Target','RA','Dec','Equinox','Elev','ROF','rate','FPI','Moon Angle',\n 'Moon Illum','THdg','rate2','Init Hdg','Sun Az Delta','Runway','End Lat','End Lon',\n 'End lat','End lon',\n 'Sunrise','Sunrise Az','Airport','NAIF ID','Sunset','Sunset Az','Sun Az Delta','Wind Override',\n 'Comment','DCS comments','Note','neighbors',\n 'Legs','Mach','Takeoff','Obs Time','Flt Time','Landing']\n\nSPLIT_CHECK = 'XXXXXXX'\n\ndef convert_column_dtypes(table,\n scols=['ObsBlk','AOR','RA','DEC','Target']):\n '''Ensure that certain columns are processed with appropriate dtypes'''\n for col in scols:\n if col not in table.colnames:\n continue\n table.replace_column(col,Column(table[col],dtype=str))\n \n for row in table:\n if row[col] == 'None':\n row[col] = ''\n \n return table\n\n\ndef get_attrdict(drow,data_attr,ext='_'):\n \"\"\"Expand out _start and _end and get attrs\"\"\" \n for k,v in data_attr.items():\n val = drow.pop(k)\n if val is None:\n val = [None] * len(v)\n else:\n # string like '[val1,val2]'\n try:\n if ' deg/min' in val:\n val = val.split(' deg/min')[0]\n val = [float(x) for x in val[1:-1].split(', ')]\n except:\n val = [None] * len(v)\n \n nkeys = [ext.join((k,vi)) for vi in v]\n drow.update({*zip(nkeys,val)})\n \n return drow\n\n\ndef extract_keys(leg,keys=META_KEYS):\n '''Extract keys from each leg'''\n params = OrderedDict()\n for key in keys:\n # get all values after key\n kcolon = '%s:'%key\n if kcolon in leg:\n #startcol = leg.find(key) + len(key) + 1 # 1 for colon\n startcol = leg.find(kcolon) + len(kcolon)\n match = leg[startcol:]\n else:\n continue\n\n # get positions of every other key in match\n cols = np.array([match.find('%s:'%k) for k in keys],dtype=float)\n #cols = np.array([match.find(k) for k in keys],dtype=float)\n # set -1 to nan and find first key after val\n cols[cols==-1] = np.nan\n try:\n ksplit = keys[np.nanargmin(cols)]\n except ValueError:\n # all nans, therefore this is the last key\n params[key] = match.strip()\n continue\n # split on this key\n match = match.split(ksplit)[0]\n params[key] = match.strip()\n\n return params\n \n\ndef read_UTC_tab(utctext):\n '''Reads utc text table into Table object'''\n\n utclines = utctext.split('\\n')\n\n # Add comment column to headerline\n utclines[0] = '%sComment' % utclines[0]\n\n # delete blank lines\n utclines = [line for line in utclines if line]\n\n # get start of columns in fixed width format\n col_starts = [utclines[0].index(name) for name in utclines[0].split()]\n\n return Table.read(utclines,format='ascii.fixed_width',col_starts=col_starts)\n\n \ndef get_legs(filename):\n '''Divide text into legs'''\n with open(filename,'r') as f:\n text = f.read()\n\n # rate: keyword is specified twice, which brings things\n # replace it\n text = THDG_RATE_RE.sub(rate_repl,text)\n \n legs = LEG_RE.findall(text) # both leg preamble and utc tab\n utc_tabs = LEG_TAB_RE.findall(text) # just utc tab\n\n # add final leg\n fleg = LEG_APPROACH_RE.findall(text)[0]\n try:\n fleg,futc_tab = fleg.split('UTC')\n except ValueError:\n fleg,futc_tab = fleg.split('UTC')[0],fleg.split('UTC')[-1]\n futc_tab = 'UTC%s'%futc_tab\n\n legs.append(fleg)\n utc_tabs.append(futc_tab)\n \n\n # remove possible SUAs\n for idx,tab in enumerate(utc_tabs):\n sua = SUA_RE.findall(tab)\n if sua:\n utc_tabs[idx] = tab.replace(sua[0],'')\n\n # get toplevel metadata\n top = TOP_META_RE.findall(text)[0]\n top = OrderedDict(zip(('Filename','Saved'),top))\n mission = MISSION_META_RE.findall(text)[0]\n top['FILENAME'] = filename\n \n\n # remove utc tabs from legs\n legs = [l.replace(u.strip(),'') for l,u in zip(legs,utc_tabs)]\n \n # make sure comments are attached\n for idx,pair in enumerate(zip(legs,utc_tabs)):\n leg,utc = pair\n if 'Comment' in leg:\n continue\n\n # check for comments by replacing leg header and utc tab text with XXXXXXX\n rep = text.replace(leg.strip(), SPLIT_CHECK)\n rep = rep.replace(utc, SPLIT_CHECK)\n\n splitrep = rep.split(SPLIT_CHECK)[1].strip()\n if splitrep:\n # comment is present and not captured by LEG_RE\n legs[idx] = '\\n'.join((legs[idx], splitrep))\n\n # convert utc_tabs to tables\n utc_tabs = [Table.read(tab,format='utc-tab') for tab in utc_tabs]\n\n # get leg metadata\n leg_names = (LEG_NAME_RE.findall(leg)[0].strip() for leg in legs)\n\n leg_meta = [extract_keys(leg) for leg in legs]\n \n for meta,leg_name in zip(leg_meta,leg_names):\n # add leg name to meta data\n meta['LegName'] = leg_name\n meta['Leg'] = int(LEG_NUM_RE.findall(leg_name)[0])\n meta.move_to_end('LegName',last=False)\n meta.move_to_end('Leg',last=False)\n\n\n # finish top level metadata\n summary = extract_keys(mission)\n summary.update(**top)\n summary['summary'] = mission\n # get second airport\n summary['Airport2'] = TOP_AIRPORT2_RE.findall(mission)[0]\n \n # add metadata to tables\n for tab,meta,raw in zip(utc_tabs,leg_meta,legs):\n tab.meta = meta\n tab.raw = raw\n tab.meta['summary'] = summary\n\n return summary, utc_tabs\n\ndef MIS_table_to_DB(table,miscfg):\n \"\"\"Upconvert legacy .mis table to DB compatibility\"\"\"\n legmap = json.loads(miscfg['legacy_map'])\n data_attr = json.loads(miscfg['data_attr'])\n\n # rate should be ROFRT\n data_attr['rate'] = data_attr.pop('ROFRT')\n # rate2 should be THdgRT\n data_attr['rate2'] = data_attr.pop('THdgRT')\n \n meta_legs = filter(lambda k: 'Leg' in k,table.meta.keys())\n meta_legs = filter(lambda k: k != 'Legs', meta_legs)\n meta_legs = [table.meta.get(k) for k in meta_legs]\n\n drows = map(lambda leg: {k:leg.get(v) for k,v in legmap.items()}, meta_legs)\n drows = list(filter(lambda row: row['Leg'] is not None,drows))\n\n # get additional attributes\n attrs = map(lambda leg: {k:leg.get(k) for k in data_attr.keys()}, meta_legs)\n attr_func = partial(get_attrdict,data_attr=data_attr)\n attrs = list(map(attr_func,attrs))\n\n for attr in attrs:\n attr['ROFRT_start'] = attr.pop('rate_start')\n attr['ROFRT_end'] = attr.pop('rate_end')\n attr['THdgRT_start'] = attr.pop('rate2_start')\n attr['THdgRT_end'] = attr.pop('rate2_end')\n \n flightplan = table.meta['Flight Plan ID']\n flightname = flightplan.split('_')[-1]\n flightseries = '_'.join(flightplan.split('_')[0:-1])\n filename = table.meta['FILENAME']\n stats = Path(filename).stat()\n ts = stats.st_mtime if stats.st_mtime > stats.st_ctime else stats.st_ctime\n\n for d,a in zip(drows,attrs):\n g = d.get('GuideStar')\n if g is not None:\n d['GuideStar'] = 'FPI: %s'%g\n d['FlightPlan'] = flightplan\n d['FlightName'] = flightname\n d['Series'] = flightseries\n d['fkey'] = 'Leg%s_%s' % (d['Leg'],flightplan)\n d['FILENAME'] = filename\n d['TIMESTAMP'] = ts\n d.update(a)\n\n # pull additional info from meta\n legacy_meta = json.loads(miscfg['legacy_meta'])\n emeta = {k:table.meta.get(v) for k,v in legacy_meta.items()}\n for tkey in ('DepartureTime','ArrivalTime'):\n # format datetime\n emeta[tkey] = datetime.datetime.strptime(emeta[tkey],'%Y-%b-%d %H:%M:%S %Z').strftime('%Y-%m-%d %H:%M:%S')\n for d in drows:\n d.update(**emeta)\n\n\n # get utctab\n _,utctabs = get_legs(filename)\n utctabs = map(lambda utctab: utctab.to_pandas().to_dict(orient='records'), utctabs)\n for d,utc in zip(drows,utctabs):\n d['WAYPTS'] = json.dumps(utc) if utc else None\n\n return drows\n \n\ndef read_MIS_file(filename):\n '''Parse MIS file into table'''\n meta, legs = get_legs(filename)\n\n colnames = ['Leg','LegName','Blk','ObspID','Target','RA','Dec','Start','Obs Dur','Alt.']#,'MIS']\n rows = deque()\n for leg in legs:\n # extract keys from each leg's metadata\n row = [leg.meta.get(col) for col in colnames]\n\n # get AOR from Blk\n #idx = colnames.index('Blk')\n #aorid = row[idx].split('OB_')[1] if row[idx] else None\n #row.append(aorid)\n #row[idx] = row[idx].split('OB_')[1] if row[idx] else None\n \n #row = [leg.meta.get(col) for col in colnames[:-1]]\n #row.append(leg)\n rows.append(row)\n\n table = Table(rows=list(rows),names=colnames)\n table.rename_column('Blk','ObsBlk')\n table.rename_column('ObspID','AOR')\n table.rename_column('Dec','DEC')\n\n \n # remove rows with empty leg\n idx = np.where(table['Leg'] == None)\n if idx:\n table.remove_rows(idx[0])\n\n # attach metadata\n table.meta = meta\n\n # change ra, dec columns\n for row in table:\n if not row['RA']:\n continue\n #print(row)\n coord = SkyCoord(ra=row['RA'],dec=row['DEC'],unit=(u.hourangle,u.deg))\n ra,dec = coord.to_string('hmsdms',sep=':',precision=2).split()\n row['RA'], row['DEC'] = ra, dec\n\n # Convert columns to appropriate dtypes\n table = convert_column_dtypes(table)\n\n # add row metadata\n for idx,leg in enumerate(legs):\n try:\n table.meta['Leg%i'%leg.meta['Leg']] = leg.meta\n except KeyError:\n table.meta['Leg%i'%(idx+1)] = leg.meta\n\n return table\n\n\n \n# Register Table class readers\nregistry.register_reader('utc-tab', Table, read_UTC_tab)\nregistry.register_reader('mis-tab', Table, read_MIS_file)\n\n\n\nif __name__ == '__main__':\n #tab = Table.read('mis/201807_HA_IAGO.mis',format='mis-tab')\n #tab.pprint()\n from configparser import ConfigParser\n\n cfg = ConfigParser()\n cfg.read('DBmodels.cfg')\n \n tab = Table.read('/home/msgordo1/Documents/SOFIA/Flights/Cyc7/FORCAST/OC7/OC07G/201910_FO_SERIES_INIT/201910_FO_GAVIN.mis',\n format='mis-tab')\n tab.pprint()\n rows = MIS_table_to_DB(tab, cfg['MIS'])\n","sub_path":"dcs/MIS.py","file_name":"MIS.py","file_ext":"py","file_size_in_byte":11574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611302099","text":"\ndef check_duplicate_seq(array):\n t_dict_map = {}\n for n in range(0, len(array)):\n tmp = array[n]\n if t_dict_map.get(tmp) is None:\n t_dict_map[tmp] = 1\n else:\n return False\n return True\n\n\ndef sliding_windows(s):\n first_flag = 0\n second_flag = 0\n longest = 0\n longest_seq_char = ''\n if longest_seq_char == '':\n longest_seq_char = s[first_flag]\n second_flag = 2\n s_len = len(s)\n while first_flag < s_len:\n current = s[first_flag : second_flag]\n temp_size = len(current)\n if check_duplicate_seq(current) and temp_size > longest:\n longest_seq_char = current\n longest = temp_size\n else:\n first_flag += 1\n second_flag += 1\n return longest_seq_char\n\n\nif __name__ == '__main__':\n S = 'abcdbananaqueenkingjackabcdef'\n output = 'kingjac'\n assert output == sliding_windows(S)\n\n S = 'abcabcbb'\n output = 'abc'\n assert output == sliding_windows(S)\n","sub_path":"longest-sequential-string.py","file_name":"longest-sequential-string.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"573058996","text":"# coding=utf-8\nfrom django.conf.urls import url\nfrom views import Categories, Products, list_categories, ProductView\n\n\n\nurlpatterns = [\n url(r'^$', Categories.as_view(), name='categories'), #вызов через класс\n url(r'^def/$', list_categories, name='categories_def'), #можно с помощью функции\n url(r'^(?P[-\\w]+)/$', Products.as_view(), name='products'),\n url(r'^(?P[-\\w]+)/$', ProductView.as_view(), name='product'),\n]","sub_path":"product/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472225632","text":"import logging\nimport time\nimport os\nimport shutil\nimport json\n\nimport torch\nimport numpy as np\n\nfrom .cuda_helper import cuda, Tensor\nfrom .anneal_helper import AnnealHelper, AnnealParameter\n\nclass BaseModel():\n\tdef __init__(self, param, net, optimizerList):\n\t\tself.param = param\n\t\tself.args = args = param.args\n\t\tself.param.other_weights = {}\n\n\t\tself.net = net\n\n\t\t_ = list(self.net.get_parameters_by_name())\n\t\tself.optimizerList = optimizerList\n\n\t\tself.now_batch = 0\n\t\tself.now_epoch = 0\n\t\tself.best_loss = 1e100\n\t\tself.checkpoint_list = []\n\n\t\tself.anneal_list = []\n\t\tfor key, v in args.items():\n\t\t\tif isinstance(v, AnnealParameter):\n\t\t\t\tif v[0] == \"hold\":\n\t\t\t\t\tself.param.other_weights[key] = v[1][\"value\"]\n\t\t\t\telif v[0] == \"anneal\":\n\t\t\t\t\tself.anneal_list.append(AnnealHelper(self, key, **v[1]))\n\t\t\t\t\tself.param.other_weights[key] = v[1][\"beginValue\"]\n\n\t\tif args.cuda:\n\t\t\tlogging.info(\"initializing cuda\")\n\t\t\tTensor(1)\n\t\t\tlogging.info(\"cuda initialized\")\n\n\t\tif args.restore is not None:\n\t\t\tif args.restore == \"last\":\n\t\t\t\tif not os.path.isfile(\"%s/checkpoint_list\" % args.model_dir):\n\t\t\t\t\traise ValueError(\"No Last checkpoint found\")\n\t\t\t\targs.restore = open(\"%s/checkpoint_list\" % args.model_dir).readlines()[0]\n\t\t\telif args.restore == \"best\":\n\t\t\t\tif not os.path.isfile(\"%s/checkpoint_list\" % args.model_dir):\n\t\t\t\t\traise ValueError(\"No best checkpoint found\")\n\t\t\t\tname = open(\"%s/checkpoint_list\" % args.model_dir).readlines()[1]\n\t\t\t\targs.restore = name + \"_best\"\n\t\t\tif os.path.isfile(\"%s/%s.model\" % (args.model_dir, args.restore)):\n\t\t\t\tlogging.info(\"loading checkpoint %s\", args.restore)\n\t\t\t\tcheckpoint = torch.load(\"%s/%s.model\" % (args.model_dir, args.restore), \\\n\t\t\t\t\t\tmap_location=lambda storage, loc: storage)\n\t\t\t\tdiff = args - checkpoint[\"args\"]\n\t\t\t\tif diff:\n\t\t\t\t\tlogging.info(\"Args differences\\n%s\", json.dumps(diff, indent=2))\n\t\t\t\tself.now_batch = checkpoint['now_batch']\n\t\t\t\tself.now_epoch = checkpoint['now_epoch']\n\t\t\t\tself.best_loss = checkpoint['best_loss']\n\t\t\t\tself.net.load_state_dict(checkpoint['weights'], args.load_exclude_set)\n\t\t\t\tself.param.other_weights = checkpoint['other_weights']\n\t\t\t\tfor name, optimizer in self.optimizerList.items():\n\t\t\t\t\tif checkpoint[name]['state'] and self.param.args.restore_optimizer:\n\t\t\t\t\t\toptimizer.load_state_dict(checkpoint[name])\n\t\t\t\t\t\tself.optimizerCuda(optimizer)\n\t\t\t\tlogging.info(\"loaded checkpoint at %d epochs, %d batchs\", self.now_epoch, self.now_batch)\n\t\t\telse:\n\t\t\t\tlogging.info(\"no checkpoint found at %s\", args.restore)\n\t\t\t\traise AssertionError(\"no checkpoint found\")\n\n\t\tfor key, v in args.items():\n\t\t\tif isinstance(v, AnnealParameter):\n\t\t\t\tif v[0] == \"set\":\n\t\t\t\t\tself.param.other_weights[key] = v[1][\"value\"]\n\t\t\t\telif v[0] == \"set&anneal\":\n\t\t\t\t\tself.anneal_list.append(AnnealHelper(self, key, 0, 0, **v[1]))\n\t\t\t\t\tself.param.other_weights[key] = v[1][\"startValue\"]\n\n\t\tif args.restore is not None and args.restoreCallback:\n\t\t\targs.restoreCallback(self)\n\t\t\tdel args['restoreCallback']\n\n\t\tdel args['load_exclude_set']\n\n\t\tcuda(self.net)\n\n\tdef optimizerCuda(self, optimizer):\n\t\tfor state in optimizer.state.values():\n\t\t\tfor k, v in state.items():\n\t\t\t\tif torch.is_tensor(v):\n\t\t\t\t\tstate[k] = cuda(v)\n\n\tdef updateOtherWeights(self):\n\t\tfor a in self.anneal_list:\n\t\t\ta.step()\n\n\tdef updateOver(self):\n\t\tfor a in self.anneal_list:\n\t\t\tif not a.over():\n\t\t\t\treturn False\n\t\treturn True\n\n\tdef save_checkpoint(self, is_best=False, filename=None):\n\t\targs = self.args\n\t\tif filename is None:\n\t\t\tfilename = \"%s_%s\" % (self.param.args.name, \\\n\t\t\t\t\ttime.strftime(\"%Y%m%d_%H%M%S\", time.localtime()))\n\t\tstate = {\\\n\t\t\t'now_epoch': self.now_epoch,\\\n\t\t\t'now_batch': self.now_batch,\\\n\t\t\t'best_loss': self.best_loss,\\\n\t\t\t'args': self.param.args,\\\n\t\t\t'weights': self.net.state_dict(),\\\n\t\t\t'other_weights': self.param.other_weights,\\\n\t\t}\n\t\tfor name, optimizer in self.optimizerList.items():\n\t\t\tstate[name] = optimizer.state_dict()\n\t\tif not os.path.exists(args.model_dir):\n\t\t\tos.makedirs(args.model_dir)\n\t\ttorch.save(state, \"%s/%s.model\" % (args.model_dir, filename))\n\n\t\topen(\"%s/checkpoint_list\" % args.model_dir, \"w\").write(filename + \"\\n\" + self.args.name)\n\n\t\tif self.now_epoch % self.param.args.checkpoint_steps == 0:\n\t\t\tself.checkpoint_list.append(filename)\n\t\t\tif len(self.checkpoint_list) > self.param.args.checkpoint_max_to_keep:\n\t\t\t\ttry:\n\t\t\t\t\tos.remove(\"%s/%s.model\" % (args.model_dir, self.checkpoint_list[0]))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tself.checkpoint_list.pop(0)\n\t\telse:\n\t\t\tif len(self.checkpoint_list) > 1:\n\t\t\t\ttry:\n\t\t\t\t\tos.remove(\"%s/%s.model\" % (args.model_dir, self.checkpoint_list[-1]))\n\t\t\t\texcept OSError:\n\t\t\t\t\tpass\n\t\t\t\tself.checkpoint_list.pop()\n\t\t\tself.checkpoint_list.append(filename)\n\n\t\tif is_best:\n\t\t\tshutil.copyfile(\"%s/%s.model\" % (args.model_dir, filename), \\\n\t\t\t\t'%s/%s_best.model' % (args.model_dir, self.param.args.name))\n\n\tdef checkgrad(self):\n\t\tlogging.info(\"checkgrad:\")\n\t\tfor name, p in self.net.named_parameters():\n\t\t\tif p.grad is not None:\n\t\t\t\tlogging.info(\"\\t%s\", name)\n\ndef get_mean(loss_arr, key):\n\tif key in loss_arr[0]:\n\t\treturn np.mean(list(map(lambda x: x[key].detach().cpu().numpy(), loss_arr)))\n\telse:\n\t\treturn 0\n\ndef storage_to_list(incoming):\n\tfor i, j in incoming.items():\n\t\tif \"tolist\" in dir(j):\n\t\t\tincoming[i] = j.tolist()\n\treturn incoming\n","sub_path":"models/seq2seq-pytorch/utils/model_helper.py","file_name":"model_helper.py","file_ext":"py","file_size_in_byte":5228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191235212","text":"import socket\nimport os\n\n# host to listen on\nhost = \"192.168.192.25\"\n# create raw socket and bind to public interface\nif os.name == \"nt\":\n socket_protocol = socket.IPPROTO_IP\nelse:\n socket_protocol = socket.IPPROTO_ICMP\n\nsniffer = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket_protocol)\n\nsniffer.bind((host, 0))\n\n# include packet header\nsniffer.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)\n\n# when using windows, send ioctl to setup promiscuous mode\nif os.name == \"nt\":\n sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_ON)\n\n# receive single packet\nprint(sniffer.recvfrom(65565))\n\n# when using windows, turn off promiscuous mode\nif os.name == \"nt\":\n sniffer.ioctl(socket.SIO_RCVALL, socket.RCVALL_OFF)\n","sub_path":"Python/Networking/ParaSSH/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560204961","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport data_loader as dl\nfrom keras.optimizers import SGD, Adam\nfrom keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img\nimport random\nfrom sklearn.preprocessing import label_binarize\nimport cnn_generator as cg\nimport os\nimport helper_functions as hf\nfrom sklearn.metrics import classification_report\nfrom keras.models import load_model\nimport scipy.misc as misc\nimport keras.backend.tensorflow_backend as KTF\nimport tensorflow as tf\nimport keras.backend as K\nimport sys\nimport argparse\n\n\n# def get_session(gpu_fraction=0.9):\n# # '''Assume that you have 6GB of GPU memory and want to allocate ~2GB'''\n# num_threads = os.environ.get('OMP_NUM_THREADS')\n# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=gpu_fraction)\n# if num_threads:\n# return tf.Session(config=tf.ConfigProto(\n# gpu_options=gpu_options, intra_op_parallelism_threads=num_threads))\n# else:\n# return tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))\n# \n# KTF.set_session(get_session())\n\ndef get_args():\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--stage\", type=int, default=1)\n\tparser.add_argument(\"--experiment\", type=str, default='experiment-11.14')\n\tparser.add_argument(\"--img_size\", type=int, default=256)\n\tparser.add_argument(\"--input_size\", type=int, default=100)\n\tparser.add_argument(\"--nb_tr_sample\", type=int, default=100)\n\tparser.add_argument(\"--val_version\", type=int, default=16)\n\tparser.add_argument(\"--gpu\", type=str, default='0')\n\tparser.add_argument(\"--train_include\", type=int, default=0)\n\targs = parser.parse_args()\n\treturn args\n\nargs = get_args()\nprint(args)\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = args.gpu\n# stage = 1\n# experiment = 'experiment-11.14'\n# ku = 3\n# img_size = 256\n# input_size = 100\n# nb_tr_sample =100\n# val_version = 15\n\nstage = args.stage\nexperiment = args.experiment\ninput_size = args.input_size\nnb_tr_sample = args.nb_tr_sample\nval_version = args.val_version\ntrain_include =args.train_include\nimage_size = args.img_size\nmodel_root = args.experiment+'/' ## the model_root folder is the experiment name\n\n\n## data load and preprocess\t\nX_train,X_val,Y_train,Y_val=hf.load_random_data(val_version = val_version)\nif train_include == 1:\n\tX_val = np.concatenate([X_train, X_val], axis =0)\n\tY_val = np.concatenate([Y_train, Y_val], axis =0)\n\nimage_patches = hf.image_depatch(X_val, 100)\ndensity_patches = hf.image_depatch(Y_val, 100)\nshp = image_patches.shape\nimage_patch_arr = image_patches.reshape(shp[0],shp[1],shp[2],1)\ndensity_patch_arr = density_patches.reshape(shp[0],shp[1],shp[2],1)\n\nimport glob\n## metrics\nmean_abs_error1 = 0\nstd_abs_error1 = 0\nave_estimate1 = 0\nave_real1 = 0\nave_acc1 = 0\n\n## uncertain selection strategy\nmodel_folders = glob.glob(model_root+'stage-'+str(stage)+'/'+'round*')\nif len(model_folders)>0:\n\tall_map_ls = []\n\t# predictions\n\tfor folder in model_folders:\n\t\tK.clear_session()\n\t\tmodel_folder = folder+'/model.h5'\n\t# \tprint(model_folder)\n\t\tmodel = load_model(model_folder)\n\t\tpreds = model.predict(image_patch_arr).reshape(shp[0],shp[1],shp[2])\n\t\tpreds = preds/100\n\t\testimated_maps = hf.image_merge(preds,image_size)\n\t\tall_map_ls.append(estimated_maps)\n\n\tall_map_arr = np.array(all_map_ls)\n\trot_all_map = np.transpose(all_map_arr, (1,2,3,0))\n\tave_estimations = np.mean(rot_all_map, axis = 3)\n\testimated_counts = np.apply_over_axes(np.sum,ave_estimations,[1,2]).reshape(estimated_maps.shape[0])\n\treal_counts = np.apply_over_axes(np.sum,Y_val,[1,2]).reshape(estimated_maps.shape[0])\n\tmean_abs_error1 = np.mean(np.abs(estimated_counts-real_counts))\n\tstd_abs_error1 = np.std(np.abs(estimated_counts-real_counts))\n\tave_estimate1 = np.mean(estimated_counts)\n\tave_real1 = np.mean(real_counts)\n\tave_acc1 = (ave_real1-mean_abs_error1)/ave_real1\n\t# print(np.mean(estimated_counts),np.mean(real_counts))\n\tprint(mean_abs_error1,std_abs_error1)\n\tprint('image prediction accuracy-->'+str(ave_acc1))\n\n## random selection strategy\nmean_abs_error2 = 0\nstd_abs_error2 = 0\nave_estimate2 = 0\nave_real2 = 0\nave_acc2 = 0\nmodel_folders = glob.glob(model_root+'stage-'+str(stage)+'/'+'random_round*')\nif len(model_folders)>0:\n\tall_map_ls = []\n\t# predictions\n\tfor folder in model_folders:\n\t\tK.clear_session()\n\t\tmodel_folder = folder+'/model.h5'\n\t# \tprint(model_folder)\n\t\tmodel = load_model(model_folder)\n\t\tpreds = model.predict(image_patch_arr).reshape(shp[0],shp[1],shp[2])\n\t\tpreds = preds/100\n\t\testimated_maps = hf.image_merge(preds,image_size)\n\t\tall_map_ls.append(estimated_maps)\n\n\tall_map_arr = np.array(all_map_ls)\n\trot_all_map = np.transpose(all_map_arr, (1,2,3,0))\n\tave_estimations = np.mean(rot_all_map, axis = 3)\n\testimated_counts = np.apply_over_axes(np.sum,ave_estimations,[1,2]).reshape(estimated_maps.shape[0])\n\treal_counts = np.apply_over_axes(np.sum,Y_val,[1,2]).reshape(estimated_maps.shape[0])\n\tmean_abs_error2 = np.mean(np.abs(estimated_counts-real_counts))\n\tstd_abs_error2 = np.std(np.abs(estimated_counts-real_counts))\n\tave_estimate2 = np.mean(estimated_counts)\n\tave_real2 = np.mean(real_counts)\n\tave_acc2 = (ave_real2-mean_abs_error2)/ave_real2\n\t# print(np.mean(estimated_counts),np.mean(real_counts))\n\tprint(mean_abs_error2,std_abs_error2)\n\tprint('image prediction accuracy-->'+str(ave_acc2))\n\n## save the results in file\nmetric_file = model_root + 'result.csv'\nimport csv\nif stage ==0:\n\twith open(metric_file, 'w') as csvfile:\n\t\tfieldnames = ['stage','average_count', 'average_ground', 'error_mean', 'error_std', 'accuracy']\n\t\twriter = csv.DictWriter(csvfile, fieldnames = fieldnames)\n\t\twriter.writeheader()\n\t\twriter.writerow({'stage':stage, 'average_count':ave_estimate1, 'average_ground': ave_real1, 'error_mean': mean_abs_error1, 'error_std': std_abs_error1, 'accuracy':ave_acc1})\n\t\twriter.writerow({'stage':stage, 'average_count':ave_estimate2, 'average_ground': ave_real2, 'error_mean': mean_abs_error2, 'error_std': std_abs_error2, 'accuracy':ave_acc2})\nelse:\n\twith open(metric_file, 'a') as csvfile:\n\t\tfieldnames = ['stage','average_count', 'average_ground', 'error_mean', 'error_std', 'accuracy']\n\t\twriter = csv.DictWriter(csvfile, fieldnames = fieldnames)\n\t\t# \t\twriter.writeheader()\n\t\twriter.writerow({'stage':stage, 'average_count':ave_estimate1, 'average_ground': ave_real1, 'error_mean': mean_abs_error1, 'error_std': std_abs_error1, 'accuracy':ave_acc1})\n\t\twriter.writerow({'stage':stage, 'average_count':ave_estimate2, 'average_ground': ave_real2, 'error_mean': mean_abs_error2, 'error_std': std_abs_error2, 'accuracy':ave_acc2})\n\t\t\n\n","sub_path":"dl-active/activeTest100new.py","file_name":"activeTest100new.py","file_ext":"py","file_size_in_byte":6545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"388083947","text":"import torch\n\nclass FNN(torch.nn.Module):\n def __init__(self,input_size,hidden_size,output_size):\n super(FNN,self).__init__()\n self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)\n self.relu1 = torch.nn.Tanh()\n self.fc2 = torch.nn.Linear(self.hidden_size, self.output_size)\n self.softmax = torch.nn.Softmax(dim=1)\n\n def forward(self, inp):\n fc1 = self.fc1(inp)\n relu1 = self.relu1(fc1)\n fc2 = self.fc2(relu1)\n output = self.softmax(fc2)\n return output","sub_path":"Trash/00QuestionClassifier/001Trash/fnn.py","file_name":"fnn.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1764091","text":"# $ python3 exercise_C.py\n\nimport numpy as np\nimport pandas as pd\nfrom rdkit import Chem\nfrom rdkit.Chem import Descriptors\nfrom rdkit.ML.Descriptors import MoleculeDescriptors\n\nclass RDKit_2D_descriptors:\n def __init__(self, smiles):\n self.mols = Chem.MolFromSmiles(smiles)\n self.smiles = smiles\n\n def compute_2D_desc(self):\n # transform name to calculator\n calc = MoleculeDescriptors.MolecularDescriptorCalculator([x[0] for x in Descriptors.descList])\n # calculate all 2D descroptors\n ds = calc.CalcDescriptors(self.mols)\n # transform tuple to numpy array (for after exercises)\n return np.array(ds)\n\n def compute_2D_desc_v2(self):\n # transform name to calculator\n calc = MoleculeDescriptors.MolecularDescriptorCalculator([x[0] for x in Descriptors.descList])\n # calculate all 2D descroptors\n ds = calc.CalcDescriptors(self.mols)\n # tuple value cannot change\n ds = list(ds)\n # descList[40] == 'Ipc'\n # prevent larger value\n ds[40] = Descriptors.Ipc(self.mols, avg=True)\n # transform tuple to numpy array (for after exercises)\n # ds = [ds[1], ds[17], ds[23], ds[26], ds[39], ds[55], ds[60], ds[65], ds[66], ds[77], ds[99], ds[121], ds[122]]\n return np.array(ds)\n\ndef get_2D_desc():\n \t# transform name to calculator\n calc = MoleculeDescriptors.MolecularDescriptorCalculator([x[0] for x in Descriptors.descList])\n # store descriptor names\n return calc.GetDescriptorNames()\n\nif __name__ == '__main__':\n # validate RDKit_2D_descroptors class \n ibuprofen_smiles = 'CC(C)Cc1ccc(cc1)[C@@H](C)C(=O)O'\n rdkit_2d_desc = RDKit_2D_descriptors(ibuprofen_smiles)\n print(f'the number of 2D_descroptors is {len(rdkit_2d_desc.compute_2D_desc())}')","sub_path":"exercise_C.py","file_name":"exercise_C.py","file_ext":"py","file_size_in_byte":1801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"223468281","text":"#importing base module in order to prepare LSTM model\nimport build_model as bm\nimport pandas as pd\nimport numpy as np\nfrom datetime import datetime\n\nPLACE = \"FSM\"\nTIME_INTERVAL = 150\nTIME_DIFFERENCE = 7 * 24 * 60 - 75\nSAMPLE_FREQUENCY = 5\nTIME_STEP = int(TIME_INTERVAL / SAMPLE_FREQUENCY) + 1\nEPOCH = 50\nBATCH_SIZE = 2018\n\nFILE_NAME = \"./speed_data/FSM/preprocessed_471_2017.csv\"\n\n# index values of months (used for given start of sets for test and training)\nJAN = 1\nFEB = 2\nMAR = 3\nAPR = 4\nMAY = 5\nJUN = 6\nJUL = 7\nAUG = 8\nSEP = 9\nOCT = 10\nNOV = 11\nDEC = 12 \n\n#Read and scale data\ndata = bm.read_data(FILE_NAME)\n#data = pd.concat([bm.read_data(\"preprocessed_471_2016.csv\"), data])\ndata['Scaled'], sc = bm.scale_data(data)\n\n#drop the speed column which includes real speed values (scaled values will be used instead)\ndata.drop(['Speed'], axis='columns', inplace=True)\n\n#Nerging another sensor data to main one\n#data_2 = bm.read_data(\"preprocessed_470.csv\")\n#data_2['Scaled_2'], sc = bm.scale_data(data_2, sc)\n#data_2.drop(['Speed'], axis = 'columns', inplace = True)\n#data = bm.merge_two_data(data_2, data)\n\n#adding more prev data\n#data_prev = data.shift(7*24*12)\n#data_prev_2 = data.shift(2*7*24*12)\n#data_prev = bm.merge_two_data(data_prev_2, data_prev)\n#data = bm.merge_two_data(data_prev, data)\n\n#channge missing values 0 to NaN\ndata.replace(0, np.nan, inplace = True)\n\n#add one hots to data\ndata = bm.join_weekday_one_hot(data)\n#data = bm.join_daypart_one_hot(data)\n\n#Prepare the sets\nfeatures = len(data.columns)\nx_features = features * TIME_STEP\n\nreframed = bm.series_to_supervised(data, TIME_INTERVAL, TIME_DIFFERENCE, SAMPLE_FREQUENCY)\n\ntrain = reframed[(reframed.index.month < JUN)&(reframed.index.month>FEB)]\ntest = reframed[reframed.index.month == JUN]\ntest = test[test.index.day < 10]\n\n#removing weekends from database\n#train = train[train.index.weekday < 5]\n#test = test[test.index.weekday < 5]\n\n\nx_train, y_train = train.values[:,:x_features],train.values[:,-1]\nx_test, y_test = test.values[:,:x_features],test.values[:,-1]\n\n#reshape the x's to 3D[sample, time_steps, features]\nx_train = x_train.reshape([x_train.shape[0], int(x_train.shape[1] / features),features])\nx_test = x_test.reshape([x_test.shape[0], int(x_test.shape[1] / features),features])\n\n#importing keras model and layers to construct LSTM model\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Flatten, LSTM, Dropout\n\n#initializing regression model\nregressor = Sequential()\n\n#adding layer(s) to model\nregressor.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2])))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units=50, return_sequences=True ))\nregressor.add(Dropout(0.2))\nregressor.add(LSTM(units=33, return_sequences=True))\n\n\nregressor.add(Flatten())\nregressor.add(Dense(units=1))\n\n#compiling the model with mean_absolute_percentage_error and adam optimizer\nregressor.compile(optimizer='adam', loss='mean_absolute_percentage_error')\n\n#fitting model with training sets and validation set\nhistory = regressor.fit(x_train, y_train, epochs = EPOCH, batch_size=BATCH_SIZE, validation_data=(x_test, y_test))\nbm.save_val_loss_plot(history, PLACE+\"_epoch_history.csv\")\n\nresults = regressor.predict(x_test)\n\n#constructing estimation dataframe\nreal_values = pd.DataFrame(index = test.index, \n data = bm.inverse_scale(sc, y_test.reshape(-1, 1)),\n columns = ['Real'])\n\npredictions = pd.DataFrame(index = test.index,\n data = bm.inverse_scale(sc, results),\n columns = ['Predictions'])\n\npredictions = pd.concat([real_values, predictions], axis = 1)\n\n\n#constructing daily error dataframe\ndays = predictions.groupby([predictions.index.year, \n predictions.index.month, \n predictions.index.day]).count().index.values\n\n\nrush_hour_predictions = predictions[(predictions.index.hour > 15) & (predictions.index.hour < 22)]\ndaily_error = []\nrush_hour_error = []\n\nfor day in days:\n day_real = predictions[predictions.index.day == day[2]]['Real'].values\n day_pred = predictions[predictions.index.day == day[2]]['Predictions'].values\n daily_error.append(bm.mean_absolute_percentage_error(day_real, day_pred))\n\n rush_real = rush_hour_predictions[rush_hour_predictions.index.day == day[2]]['Real'].values\n rush_pred = rush_hour_predictions[rush_hour_predictions.index.day == day[2]]['Predictions'].values\n rush_hour_error.append(bm.mean_absolute_percentage_error(rush_real, rush_pred))\n\n\nfrom datetime import date\ndaily_error = np.array(daily_error).transpose()\nrush_hour_errors = np.array(rush_hour_error).transpose()\nprint(daily_error.shape)\nindexes = [date(day[0], day[1], day[2]).ctime() for day in days]\ndata = {'Daily Error': daily_error, \n 'Rush Hour Error': rush_hour_error}\n\nerrors = pd.DataFrame(index = indexes, data = data)\nerrors.index.name = 'Date'\n\n#saving everything\nregressor.save_weights(PLACE+\"_weights.h5\")\nerrors.to_csv(PLACE+\"_Daily_Errors.csv\")\npredictions.to_csv(PLACE+\"_Estimations.csv\")\nprint(PLACE)","sub_path":"model_results/time_step/FSM/FSM_150/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"647705728","text":"#https://www.crowdcast.io/e/list-comprehensions\n\n# If you are wrting an iterable into an iterable\n# my_favorite_numbers = [1,1,2,3,5,8,13]\n# doubled_numbers = []\n#\n# for n in my_favorite_numbers:\n# doubled_numbers.append(n* 2)\n# print(doubled_numbers)\n#\n#\n# #list Comprehension from for loop\n#\n# doubled_numbers = [n* 2 for n in my_favorite_numbers]\n# print(doubled_numbers)\n#\n\n\n# doubled_numbers = [n*2 for n in my_favorite_numbers]\n# print(doubled_numbers)\n\n# import math\n#\n# numbers = range (101)\n# #list(numbers)\n#\n# cubes_of_squares = []\n#\n# for n in numbers:\n# if math.sqrt(n).is_integer():\n# cubes_of_squares.append(n**3)\n#\n#\n# cubes_of_squares = [\n# n**3\n# for n in numbers\n# if math.sqrt(n).is_integer()\n# ]\n# print(cubes_of_squares)\n#\n# names = ['Jeffery','Nicole','Wifred','Ashley','Dorothy','Cynthia','Jared','Eve']\n#\n# vowel_names = []\n# for name in names:\n# if name[0].lower() in \"aeiou\":\n# vowel_names.append(name)\n#\n# vowel_names = [\n# name\n# for name in names\n# if name[0].lower() in \"aeiou\"\n# ]\n# print(vowel_names)\n\n# numbers = [1,2,3,4,5,6,7,8,9]\n# # squares = []\n# #sum_of_squares = 0\n# #\n# # for n in numbers:\n# # squares.append(n**2)\n# # sum_of_squares = sum(squares)\n#\n#\n# sum_of_squares = sum(n**2 for n in numbers)\n# print(sum_of_squares)\n\n# words = ['hi','hiya','he','hello']\n#\n# def superstring_is_not_in_list(item,some_list):\n# return not any(item in x for x in some_list)\n#\n#\n#\n# # new_words =[]\n# # w_in_x = False\n# # for w in words:\n# # if superstring_is_not_in_list(w,words):\n# # new_words.append(w)\n#\n# new_words = [\n# w\n# for w in words\n# if superstring_is_not_in_list(w,words)]\n#\n# print(new_words)\n\n#generators\n\n# numbers = [1,2,3,4]\n# # print(sum(numbers))\n#\n# g = (n ** 2 for n in numbers)\n# print(sum(g))\n\n#print(list(g))\n\n# matrix = [\n# [row * 3 + i for i in range(1,4)]\n# for row in range (4)\n# ]\n\n#print(matrix)\n\n# flatten = []\n#\n# for row in matrix:\n# for x in row:\n# flatten.append(x)\n\n#print(flatten)\n\n# flatten = [\n# x\n# for row in matrix\n# for x in row\n# ]\n# print(flatten)\n\n\n#dictionary comprehension\n\nfavorite_numbers = {\n 'george': 58,\n 'david' :983,\n 'matt': 314,\n 'margaret' : 48,\n 'judith': 6\n}\n\nsmall_favorites = {}\n\nfor name, number in favorite_numbers.items():\n if number < 100:\n favorite_numbers[name] = number\n\nsmall_favorites = {\n name : number\n for name, number in favorite_numbers.items()\n if number < 100\n\n}\nprint(small_favorites)","sub_path":"ListComprehension/.idea/Listcomprehension.py","file_name":"Listcomprehension.py","file_ext":"py","file_size_in_byte":2626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"196521796","text":"# -*- coding: utf-8 -*-\nimport os \n\n#Definim la funció build_database\ndef build_database(params):\n #Declarem la llista d'ImageIDs\n ImageIDs = []\n #Retorna una llista dels arxius al path introduit\n lista = os.walk(os.path.join(params['root'],params['database'],params['split'],'images')) \n #Bucle per omplir la llista amb els nombres ID de les imatges sense la\n #extensió\n for root, dirs, files in lista:\n for fichero in files:\n (nombreFichero, extension) = os.path.splitext(fichero)\n ImageIDs.append(nombreFichero)\n #Guardem la longitud de ImageIDs en la variable i\n i=len(ImageIDs)\n #Creem el .txt que contindrà IDs de les imatges\n archi=open(os.path.join(params['root'],params['database'],params['split'],'ImageIDs.txt'),'w')\n #Bucle per omplir el .txt amb les IDs de les imatges\n for i in list(range(i)):\n if(i= 1:\n persons = ', '.join(\n list(queryset_range.all().values_list('last_name', flat=True))\n )\n raise serializers.ValidationError(\n f'Уже есть данные в этот период времени '\n f'(У сотрудников {persons}).'\n )\n # close prev appointment\n if queryset_open.exists():\n for open_param in queryset_open:\n if open_param.first_date < attrs['first_date']:\n open_param.last_date = attrs['first_date'] - timedelta(days=1)\n open_param.save()\n return attrs\n\n\nclass GroupTypesSerializer(serializers.Serializer):\n type = serializers.IntegerField()\n name = serializers.CharField()\n\n\nclass GroupSerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for group\n \"\"\"\n group_type = serializers.SerializerMethodField()\n kindergarten_name = serializers.SerializerMethodField()\n\n def __init__(self, *args, **kwargs):\n super(GroupSerializer, self).__init__(*args, **kwargs)\n date = self.context['request'].query_params.get('date', None)\n if date:\n self.date = dateutil.parser.parse(date)\n else:\n self.date = timezone.now().date()\n\n def get_group_type(self, group):\n return group.type(self.date)\n\n def get_kindergarten_name(self, group):\n return group.kindergarten.name\n\n class Meta:\n model = models.Group\n fields = (\n 'pk',\n 'name',\n 'kindergarten',\n 'kindergarten_name',\n 'position',\n 'short_term',\n 'is_nomad',\n 'group_type',\n )\n\n\nclass GroupTypeHistorySerializer(serializers.ModelSerializer):\n \"\"\"\n Serializer for group type\n \"\"\"\n name = serializers.SerializerMethodField()\n kindergarten = serializers.SerializerMethodField()\n\n def get_name(self, group_type):\n return group_type.get_type_display()\n\n def get_kindergarten(self, group_type):\n return group_type.group.kindergarten.pk\n\n class Meta:\n model = models.GroupTypeHistory\n fields = (\n 'pk',\n 'first_date',\n 'last_date',\n 'group',\n 'kindergarten',\n 'type',\n 'name'\n )\n\n def validate(self, attrs):\n \"\"\"\n Validate range dates\n \"\"\"\n queryset = models.GroupTypeHistory.objects.filter(group=attrs['group'])\n\n if self.instance:\n queryset = queryset.exclude(pk=self.instance.pk)\n queryset_open = queryset.filter(last_date=None)\n\n if attrs['last_date']:\n queryset_range = queryset.filter(\n Q(\n first_date__range=[attrs['first_date'], attrs['last_date']]\n ) | Q(\n last_date__range=[attrs['first_date'], attrs['last_date']]\n )\n )\n else:\n queryset_range = queryset.filter(last_date__gte=attrs['first_date'])\n\n if queryset_range.exists():\n if queryset_range.count() == 1:\n raise serializers.ValidationError(\n f'Уже есть данные в этот период времени ({queryset_range[0]} '\n f'конечная дата {queryset_range[0].last_date}).'\n )\n if queryset_range.count() >= 1:\n _list = ', '.join(\n list(queryset_range.all().values_list('name', flat=True))\n )\n raise serializers.ValidationError(\n f'Уже есть данные в этот период времени({_list}).'\n )\n # close prev appointment\n if queryset_open.exists():\n for open_param in queryset_open:\n if open_param.first_date < attrs['first_date']:\n open_param.last_date = attrs['first_date'] - timedelta(days=1)\n open_param.save()\n return attrs\n","sub_path":"apps/kindergartens/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":12567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"79235184","text":"import pickle\nimport uuid\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nimport time\n\nfrom util.batch import NumpyBatchGenerator, SampleMode\nfrom util.report import report, report_meal\nfrom util.load import load_to_batches\n\nfrom models.base import StackedLayers\nfrom models.linear import LinearMultiCharOutputLayer, LinearLayer\nfrom models.linear import LinearSingleCharOutputLayer\nfrom models.base import LinearReshapeLayer, InputLayer, DropOutLayer\nfrom models.conv import ConvLayer\n\nTEST = False\noverfitting_criteria = 0.3\nlow_increment_criteria = 0.3\n\nif TEST:\n MIN_EPOCHS = 2\n MAX_EPOCHS = 2\n\n nepochs = MAX_EPOCHS\n dbargs = {\n 'path': '../generate/datatest.hdf5',\n }\n batchargs = {\n 'train_bsize': 10,\n 'report_bsize': 10,\n 'val_bsize': 50,\n }\nelse:\n MIN_EPOCHS = 10\n MAX_EPOCHS = 10\n\n nepochs = MAX_EPOCHS\n dbargs = {\n 'path': '../generate/data.hdf5',\n }\n batchargs = {\n 'train_bsize': 10,\n 'report_bsize': 10,\n 'val_bsize': 10,\n }\n\npkeep = 0.7\ntrain_gen, report_gen, val_gen = load_to_batches(dbargs, batchargs)\n\n\ndef stop_criterion(epoch, train_hist, test_hist):\n if epoch < MIN_EPOCHS:\n return False\n\n if epoch >= MAX_EPOCHS:\n return True\n\n min_val_cost = np.min([t['loss_avg'] for t in test_hist])\n last_val_cost = test_hist[-1]['loss_avg']\n\n if last_val_cost > (1 + overfitting_criteria) * min_val_cost:\n return True\n\n k = 5\n kcost = [t['loss_avg'] for t in train_hist[:-k]]\n mean_train_cost = np.mean(kcost)\n min_train_cost = np.min(kcost)\n\n if min_train_cost > (1 - low_increment_criteria) * mean_train_cost:\n return True\n\n return False\n\ndef variable_summaries(var):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n summary = []\n\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n summary.append(tf.summary.scalar('mean', mean))\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))\n \n summary.append(tf.summary.scalar('stddev', stddev))\n summary.append(tf.summary.scalar('max', tf.reduce_max(var)))\n summary.append(tf.summary.scalar('min', tf.reduce_min(var)))\n summary.append(tf.summary.histogram('histogram', var))\n\n return summary\n\n\ndef run(model_spec, model_name, max_learning_rate, min_learning_rate):\n model_id = uuid.uuid4()\n print('start training {}...'.format(model_id))\n print('lrate {} -> {}'.format(max_learning_rate, min_learning_rate))\n\n with tf.Session(graph=model_spec.g) as sess:\n\n #summary = []\n # for l,layer in enunmerate(model_spec.layers):\n # if isinstance(layer, ConvLayer):\n\n # with tf.name_scope('conv{}.W'.format(l)):\n # summary += variable_summaries(layer.conv['W'])\n\n # with tf.name_scope('conv{}.b'.format(l)):\n # summary += variable_summaries(layer.conv['b'])\n\n model = model_spec.model\n\n with tf.name_scope('backprop'):\n learn_step = tf.placeholder(tf.float32)\n learning_rate = max_learning_rate + (min_learning_rate - max_learning_rate) * (learn_step/(nepochs -1))\n\n #train = [tf.train.AdamOptimizer(learning_rate).minimize(x) for x in model.loss]\n train = [tf.train.AdamOptimizer(learning_rate).minimize(x) for x in model.xent]\n #train = [tf.train.GradientDescentOptimizer(learning_rate).minimize(x) for x in model.xent]\n\n\n #summary.append(tf.summary.scalar('lrate', learning_rate))\n\n #writer = tf.summary.FileWriter('./experimento/{}'.format(model_id), graph)\n #sum_merged = tf.summary.merge(summary + model_spec.model.summ_ext)\n\n\n\n sess.run(tf.global_variables_initializer())\n def report(batch_gen):\n nw = 0 # Acerto palavra completa\n ls = np.zeros([5]) # loss/char\n nc = np.zeros([5]) #acerto/char\n for batch in batch_gen.gen_batches():\n _nw, _ls, _nc = sess.run([model.nacc, model.loss, model.cnacc], feed_dict=model_spec.food(batch))\n nw += _nw\n ls += _ls\n nc += _nc\n\n SIZE = batch_gen.max_seek * batch_gen.batch_size\n nw /= SIZE\n ls /= SIZE\n nc /= SIZE\n\n keys = ['wacc'] + ['loss_{}'.format(i) for i in range(5)] + ['loss_avg'] + ['acc_{}'.format(i) for i in range(5)] + ['wprob']\n vals = [nw] + list(ls) + [sum(ls)/5.0] + list(nc) + [np.prod(nc)]\n return {k:v for k,v in zip(keys, vals)}\n\n\n train_dict = report(train_gen)\n train_dict.update({'train_time': 0, 'total_time': 0, 'epoch': 0})\n\n test_dict = report(val_gen)\n test_dict.update({'train_time': 0, 'total_time': 0, 'epoch': 0})\n\n train_list = [train_dict]\n test_list = [test_dict]\n\n for epoch in range(nepochs):\n #print(sess.run(learning_rate, feed_dict={learn_step: epoch}))\n\n tinit = time.time()\n for i, batch in enumerate(train_gen.gen_batches()):\n food = model_spec.food(batch, pkeep=pkeep)\n food.update({learn_step: epoch})\n sess.run(train, feed_dict=food)\n\n #if i > 250:\n # break\n\n #_summary = sess.run(sum_merged, feed_dict=food)\n #writer.add_summary(_summary, epoch)\n\n\n tdelta_train = time.time() - tinit\n\n train_dict = report(train_gen)\n test_dict = report(val_gen)\n \n tdelta_total = time.time() - tinit\n\n #print('TRAIN:')\n #print(train_dict)\n #print('TEST:')\n #print(test_dict)\n print('J:{:.4f}/{:.4f} P:{:.4f}/{:.4f} P[0]:{:.4f}/{:.4f}'.format(\n train_dict['loss_avg'],\n test_dict['loss_avg'],\n train_dict['wacc'],\n test_dict['wacc'],\n train_dict['acc_0'],\n test_dict['acc_0']\n ))\n\n train_dict.update({'train_time': tdelta_train, 'total_time': tdelta_total, 'epoch': epoch + 1})\n test_dict.update({'train_time': tdelta_train, 'total_time': tdelta_total, 'epoch': epoch + 1})\n\n train_list.append(train_dict)\n test_list.append(test_dict)\n\n if stop_criterion(epoch, train_list, test_list):\n break\n\n\n df_train = pd.DataFrame(train_list)\n df_test = pd.DataFrame(test_list)\n\n train_report = {\n 'lrange': (min_learning_rate, max_learning_rate),\n 'model': model_spec.desc(),\n 'model_size': model_spec.total_size(),\n 'model_name': model_name,\n 'df_train': df_train,\n 'df_test': df_test,\n }\n\n print('model:')\n print(train_report['model'])\n print('model total size: {}'.format(train_report['model_size']))\n\n print('Train:')\n print(df_train[['loss_avg', 'wacc', 'wprob']])\n\n print('Test:')\n print(df_test[['loss_avg', 'wacc', 'wprob']])\n\n\n file_name = '{}.pkl'.format(model_id)\n with open(file_name, 'bw') as f:\n pickle.dump(train_report, f)\n\n\ndef experiment_lrate(model_func):\n #for lrate in [0.1, 0.01, 0.001, 0.0001, 0.00001]:\n for lrate in [1e-3, 1e-4]:\n max_learning_rate = lrate\n min_learning_rate = lrate\n graph = tf.Graph()\n model_spec = model_func(graph)\n run(model_spec, model_func.__name__, max_learning_rate, min_learning_rate)\n for l, desc in zip(model_spec.layers, model_spec.desc()):\n print('layer: {}, size: {}'.format(desc, l.num_parameters()))\n\n\ndef RMch(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\n\ndef RMchD(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n LinearReshapeLayer(graph, 'reshape'),\n DropOutLayer(graph, 'dropout1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6RMch(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 2, graph, 'conv1'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6RMchD(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 2, graph, 'conv1'),\n LinearReshapeLayer(graph, 'reshape'),\n DropOutLayer(graph, 'dropout1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6RMchMax(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 2, graph, 'conv1', True),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12RMchD(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n DropOutLayer(graph, 'dropout1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2'),\n DropOutLayer(graph, 'dropout2'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12RMch(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12RMchMax(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2', True),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12Rfl100Mch(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearLayer(100, graph, 'dense1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12Rfl100MchMax(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2', True),\n LinearReshapeLayer(graph, 'reshape'),\n LinearLayer(100, graph, 'dense1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12C5o36C5o36RMch(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2'),\n ConvLayer([5, 5, 12, 36], 2, graph, 'conv3'),\n ConvLayer([5, 5, 36, 36], 2, graph, 'conv4'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12C5o36C5o36RMchD(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n DropOutLayer(graph, 'dropout1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2'),\n DropOutLayer(graph, 'dropout2'),\n ConvLayer([5, 5, 12, 36], 2, graph, 'conv4'),\n DropOutLayer(graph, 'dropout3'),\n ConvLayer([5, 5, 36, 36], 2, graph, 'conv5'),\n DropOutLayer(graph, 'dropout3'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\n\ndef C5o6C5o12C5o36C5o36RMchMax(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2', True),\n ConvLayer([5, 5, 12, 36], 2, graph, 'conv3', True),\n ConvLayer([5, 5, 36, 36], 2, graph, 'conv4', True),\n LinearReshapeLayer(graph, 'reshape'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\ndef C5o6C5o12C5o36C5o36Rfl100Mch(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv3'),\n ConvLayer([5, 5, 12, 36], 2, graph, 'conv4'),\n ConvLayer([5, 5, 36, 36], 2, graph, 'conv5'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearLayer(100, graph, 'dense1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\n\ndef C5o6C5o12C5o36C5o36Rfl100MchD(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n DropOutLayer(graph, 'dropout1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2'),\n DropOutLayer(graph, 'dropout2'),\n ConvLayer([5, 5, 12, 36], 2, graph, 'conv3'),\n DropOutLayer(graph, 'dropout3'),\n ConvLayer([5, 5, 36, 36], 2, graph, 'conv4'),\n DropOutLayer(graph, 'dropout4'),\n LinearReshapeLayer(graph, 'reshape'),\n LinearLayer(100, graph, 'dense1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\n\ndef C5o6C5o12C5o36C5o36Rfl100MchMax(graph):\n return StackedLayers(\n InputLayer([None, 50, 200, 3], [None, 5, 36], graph, 'input'),\n ConvLayer([5, 5, 3, 6], 1, graph, 'conv1'),\n ConvLayer([5, 5, 6, 12], 2, graph, 'conv2', True),\n ConvLayer([5, 5, 12, 36], 2, graph, 'conv3', True),\n ConvLayer([5, 5, 36, 36], 2, graph, 'conv4', True),\n LinearReshapeLayer(graph, 'reshape'),\n LinearLayer(100, graph, 'dense1'),\n LinearMultiCharOutputLayer(5, graph, 'classificador'),\n )\n\n\nimport sys\n\nmodels = [\n C5o6RMchMax,\n\n C5o6C5o12RMch,\n C5o6C5o12RMchMax,\n\n C5o6C5o12Rfl100Mch,\n C5o6C5o12Rfl100MchMax,\n\n C5o6C5o12C5o36C5o36RMch,\n C5o6C5o12C5o36C5o36RMchMax,\n\n C5o6C5o12C5o36C5o36Rfl100Mch,\n C5o6C5o12C5o36C5o36Rfl100MchMax,\n]\n\nfor m in models:\n experiment_lrate(m)\n\nsys.exit(0)\n\nmodels = [\n RMch,\n C5o6RMchD,\n C5o6C5o12Rfl100MchD,\n C5o6C5o12C5o36C5o36RMchD,\n]\n\n\nmax_learning_rate = 1e-3\nmin_learning_rate = 1e-4\n\nfor m in models:\n graph = tf.Graph()\n model_spec = m(graph)\n run(model_spec, m.__name__, max_learning_rate, min_learning_rate)\n\n\nsys.exit(0)\n","sub_path":"experiments/exp7.py","file_name":"exp7.py","file_ext":"py","file_size_in_byte":14995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"155863493","text":"# -*- coding: utf-8 -*-\r\nfrom deep_translator import GoogleTranslator\r\nfrom deep_translator import exceptions\r\nimport pyttsx3\r\nimport urllib\r\nimport PySimpleGUI as sg\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\n\r\nhdr = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\r\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\r\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\r\n 'Accept-Encoding': 'none',\r\n 'Accept-Language': 'en-US,en;q=0.8',\r\n 'Connection': 'keep-alive'}\r\nengine = pyttsx3.init()\r\n\r\n\r\ndef crawl(baseurl, starturl, HyperLinkText, depth):\r\n i = 0\r\n currenturl = starturl\r\n regex = r'[^('\r\n urllist = []\r\n while i)\n# or\ntree = MyNewickTree()\ntree.parse()\n# get the root\nroot = tree.root\n\"\"\"\n\nimport abc\nimport warnings\n# custom lib\nfrom . import tree_base\n\n\nclass NewickFormatError(RuntimeError):\n\tpass\n\n\nclass NewickTreeNodeBase(tree_base.TreeNodeBase, abc.ABC):\n\tdef __init__(self, start = None, end = None, *ka, **kw):\n\t\tsuper(NewickTreeNodeBase, self).__init__(*ka, **kw)\n\t\tself.start\t\t= start # position in buf\n\t\tself.end\t\t= end # position in buf\n\t\tself.__ready_for_next_node\t= True # intended to be used by parser only\n\t\treturn\n\n\t############################################################################\n\t# these methods should only be used during parsing\n\t# not encouraged for derived classes to override\n\tdef parser_add_child(self, child):\n\t\t\"\"\"\n\t\tadd a child during parsing; extra logic and checks are done than simply\n\t\tadding a node; these checks are related to format checking, while not\n\t\trequired in other cases;\n\t\t\"\"\"\n\t\tif (not isinstance(child, NewickTreeNodeBase))\\\n\t\t\tor (child.start is None):\n\t\t\traise ValueError(\"child must be NewickTreeNodeBase, its 'start' \"\\\n\t\t\t\t\"must be correctly set before adding as a child\")\n\t\t# in parsing, node and separator are necessary to come one after another\n\t\tif not self.__ready_for_next_node:\n\t\t\traise NewickFormatError(\"expected separator between two nodes at c: \"\n\t\t\t\t\"%d\" % child.start)\n\t\tself.add_child(child)\n\t\tself.__ready_for_next_node = False\n\t\treturn\n\n\tdef parser_add_separator(self):\n\t\t\"\"\"\n\t\tcalled when parser encounters a separator ',';\n\t\ta separator is expected between two nodes, otherwise raise error when\n\t\ttrying adding the second node;\n\t\t\"\"\"\n\t\tself.__ready_for_next_node = True\n\t\treturn\n\n\tdef parser_put_bare_text(self, pos, s):\n\t\t\"\"\"\n\t\tcalled when parser encounters a substring looks like bare text; bare\n\t\ttexts are any characters other than controlling chars '(', ')' and ',';\n\n\t\tparser_put_bare_text() will first check locally to find a correct node\n\t\twhich should accpet this data (or create one if necessary), then call\n\t\tthat node's handler_bare_text() to digest the string; this method is not\n\t\tencouraged to be overriden in derived classes;\n\n\t\tif a new node is created, its type is by default the same as type(self);\n\t\t\"\"\"\n\t\tif self.__ready_for_next_node:\n\t\t\t# in this case, parse the text as a new node and add as child\n\t\t\t# note by default the type of new node is same as type(self)\n\t\t\tnew = type(self)()\n\t\t\tnew.start, new.end = pos, pos + len(s)\n\t\t\tnew.handler_bare_text(s)\n\t\t\tself.parser_add_child(new)\n\t\t\treturn\n\t\telse:\n\t\t\t# safe to not use 'elif self.children' here\n\t\t\t# since if self.__ready_for_next_node is False, then there must be\n\t\t\t# at least one node in self.children\n\t\t\tself.children[-1].handler_bare_text(s)\n\t\treturn\n\n\t############################################################################\n\t# these methods should only be used during parsing\n\t# derived classes must override these methods (of base class)\n\t@abc.abstractmethod\n\tdef handler_bare_text(self, s) -> None:\n\t\t\"\"\"\n\t\thandler of parsing bare text as extra node information; in general all\n\t\tcharacters other than the sequence controlling '(', ')' and ',' are bare\n\t\ttexts; example:\n\t\t(foo,bar)baz -> 'foo', 'bar' and 'baz' are all bare texts;\n\n\t\tthis handler determines how these texts are parsed and stored as local\n\t\tnode information; note it is not the same as parser_put_bare_text()\n\t\tmethod, which also finds the correct node to put input bare text, though\n\t\teventually parser_put_bare_text() calls handler_bare_text() internally\n\t\ton the node where it finds to put these texts;\n\n\t\tunlike parser_put_bare_text() method, handler_bare_text() is encouraged\n\t\tto be overridden in derived classes; note that overriding method must\n\t\taccept calling signature (self, s), where s is the input text; return\n\t\tvalues of handler_bare_text(), if any, will all be discarded;\n\t\t\"\"\"\n\t\tpass\n\n\nclass NewickTreeBase(tree_base.TreeBase, tree_base.TreeParserAbstract):\n\tnode_type = NewickTreeNodeBase\n\n\tdef parse(self, s):\n\t\t\"\"\"\n\t\tparse the content of string s (in Newick format) as a tree; the tree\n\t\tinstance before parsing must be empty;\n\t\t\"\"\"\n\t\tif not isinstance(s, str):\n\t\t\traise TypeError(\"s must be str\")\n\t\tif self.root is not None:\n\t\t\traise RuntimeError(\"use parse() on a non-emptry tree is prohibited\")\n\t\t# create root node\n\t\troot = self.node_type()\n\t\tself.force_set_root(root)\n\t\t# parser local variables\n\t\troot.start = last_pos = 0 # last_pos is used to slice bare string\n\t\t# keep tracking the top node\n\t\t# top comes from the old approach using stack; since each node knows its\n\t\t# parent node (parent node is unique), then popping the stack is equiv.\n\t\t# to traverse upward to the parent;\n\t\ttop = root\n\t\t# parsing\n\t\tfor pos, c in enumerate(s):\n\t\t\tif c == \"(\":\n\t\t\t\t# encounter a group opening '(' means should initialize a new\n\t\t\t\t# parser and replace 'top' with it\n\t\t\t\t# OLD: push the stack\n\t\t\t\tif pos > last_pos:\n\t\t\t\t\tjunk_start = last_pos\n\t\t\t\t\twarnings.warn(\"junk '%s' discarded at c: %d:%d\"\\\n\t\t\t\t\t\t% (s[junk_start:pos], junk_start, pos))\n\t\t\t\t# open a new node\n\t\t\t\tnew_node = self.node_type(start = pos)\n\t\t\t\ttop.parser_add_child(new_node)\n\t\t\t\ttop = new_node\n\t\t\t\tlast_pos = pos + 1\n\t\t\telif c == \",\":\n\t\t\t\t# this may pass empty string to top.parser_put_bare_text(), and\n\t\t\t\t# same to all below parser_put_bare_text() calls\n\t\t\t\ttop.parser_put_bare_text(pos, s[last_pos:pos])\n\t\t\t\ttop.parser_add_separator()\n\t\t\t\tlast_pos = pos + 1\n\t\t\telif c == \")\":\n\t\t\t\t# encounter a group closing ')' means should finalize the top\n\t\t\t\t# parser and replace 'top' with its parent\n\t\t\t\t# OLD: pop the stack\n\t\t\t\tif top is root:\n\t\t\t\t\t# should not reach root before the buffer string exhausts\n\t\t\t\t\traise NewickFormatError(\"orphan ')' encountered at c: %d\"\\\n\t\t\t\t\t\t% pos)\n\t\t\t\t# finalize the top parser\n\t\t\t\ttop.end = pos + 1\n\t\t\t\ttop.parser_put_bare_text(pos, s[last_pos:pos])\n\t\t\t\ttop = top.parent\n\t\t\t\tlast_pos = pos + 1\n\t\t# check if top is root; if not, must have ummatched parenthese\n\t\t# OLD: check if stack is clean\n\t\tif top is not root:\n\t\t\traise NewickFormatError(\"expected ')' to close '(' at c: %d\"\\\n\t\t\t\t% top.start)\n\t\troot.parser_put_bare_text(pos, s[last_pos:])\n\t\troot.end = len(s)\n\t\treturn self\n\n\t@classmethod\n\tdef load_string(cls, s, *ka, **kw):\n\t\t\"\"\"\n\t\tload and parse a string as a tree; keyargs/keywords are passed to the\n\t\ttree factory/initializer;\n\t\t\"\"\"\n\t\t# create tree\n\t\ttree = cls(*ka, **kw)\n\t\ttree.parse(s)\n\t\treturn tree\n","sub_path":"tree/newick_parser_lite.py","file_name":"newick_parser_lite.py","file_ext":"py","file_size_in_byte":6958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"381124166","text":"\"\"\" API Tests \"\"\"\nfrom __future__ import unicode_literals\n\nimport random\nimport unittest\nfrom collections import OrderedDict\nfrom copy import copy\nfrom tempfile import mkstemp\n\nfrom httmock import HTTMock, urlmatch\nfrom six import text_type\nfrom six.moves.urllib.parse import parse_qsl, urlparse\nfrom wordpress.api import API\nfrom wordpress.auth import OAuth\nfrom wordpress.helpers import StrUtils, UrlUtils\n\n\nclass BasicAuthTestcases(unittest.TestCase):\n def setUp(self):\n self.base_url = \"http://localhost:8888/wp-api/\"\n self.api_name = 'wc-api'\n self.api_ver = 'v3'\n self.endpoint = 'products/26'\n self.signature_method = \"HMAC-SHA1\"\n\n self.consumer_key = \"ck_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n self.consumer_secret = \"cs_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n self.api_params = dict(\n url=self.base_url,\n consumer_key=self.consumer_key,\n consumer_secret=self.consumer_secret,\n basic_auth=True,\n api=self.api_name,\n version=self.api_ver,\n query_string_auth=False,\n )\n\n def test_endpoint_url(self):\n api = API(\n **self.api_params\n )\n endpoint_url = api.requester.endpoint_url(self.endpoint)\n endpoint_url = api.auth.get_auth_url(endpoint_url, 'GET')\n self.assertEqual(\n endpoint_url,\n UrlUtils.join_components([\n self.base_url, self.api_name, self.api_ver, self.endpoint\n ])\n )\n\n def test_query_string_endpoint_url(self):\n query_string_api_params = dict(**self.api_params)\n query_string_api_params.update(dict(query_string_auth=True))\n api = API(\n **query_string_api_params\n )\n endpoint_url = api.requester.endpoint_url(self.endpoint)\n endpoint_url = api.auth.get_auth_url(endpoint_url, 'GET')\n expected_endpoint_url = '%s?consumer_key=%s&consumer_secret=%s' % (\n self.endpoint, self.consumer_key, self.consumer_secret)\n expected_endpoint_url = UrlUtils.join_components(\n [self.base_url, self.api_name, self.api_ver, expected_endpoint_url]\n )\n self.assertEqual(\n endpoint_url,\n expected_endpoint_url\n )\n endpoint_url = api.requester.endpoint_url(self.endpoint)\n endpoint_url = api.auth.get_auth_url(endpoint_url, 'GET')\n\n\nclass OAuthTestcases(unittest.TestCase):\n\n def setUp(self):\n self.base_url = \"http://localhost:8888/wordpress/\"\n self.api_name = 'wc-api'\n self.api_ver = 'v3'\n self.endpoint = 'products/99'\n self.signature_method = \"HMAC-SHA1\"\n self.consumer_key = \"ck_681c2be361e415519dce4b65ee981682cda78bc6\"\n self.consumer_secret = \"cs_b11f652c39a0afd3752fc7bb0c56d60d58da5877\"\n\n self.wcapi = API(\n url=self.base_url,\n consumer_key=self.consumer_key,\n consumer_secret=self.consumer_secret,\n api=self.api_name,\n version=self.api_ver,\n signature_method=self.signature_method\n )\n\n self.rfc1_api_url = 'https://photos.example.net/'\n self.rfc1_consumer_key = 'dpf43f3p2l4k3l03'\n self.rfc1_consumer_secret = 'kd94hf93k423kf44'\n self.rfc1_oauth_token = 'hh5s93j4hdidpola'\n self.rfc1_signature_method = 'HMAC-SHA1'\n self.rfc1_callback = 'http://printer.example.com/ready'\n self.rfc1_api = API(\n url=self.rfc1_api_url,\n consumer_key=self.rfc1_consumer_key,\n consumer_secret=self.rfc1_consumer_secret,\n api='',\n version='',\n callback=self.rfc1_callback,\n wp_user='',\n wp_pass='',\n oauth1a_3leg=True\n )\n self.rfc1_request_method = 'POST'\n self.rfc1_request_target_url = 'https://photos.example.net/initiate'\n self.rfc1_request_timestamp = '137131200'\n self.rfc1_request_nonce = 'wIjqoS'\n self.rfc1_request_params = [\n ('oauth_consumer_key', self.rfc1_consumer_key),\n ('oauth_signature_method', self.rfc1_signature_method),\n ('oauth_timestamp', self.rfc1_request_timestamp),\n ('oauth_nonce', self.rfc1_request_nonce),\n ('oauth_callback', self.rfc1_callback),\n ]\n self.rfc1_request_signature = b'74KNZJeDHnMBp0EMJ9ZHt/XKycU='\n\n self.twitter_api_url = \"https://api.twitter.com/\"\n self.twitter_consumer_secret = \\\n \"kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw\"\n self.twitter_consumer_key = \"xvz1evFS4wEEPTGEFPHBog\"\n self.twitter_signature_method = \"HMAC-SHA1\"\n self.twitter_api = API(\n url=self.twitter_api_url,\n consumer_key=self.twitter_consumer_key,\n consumer_secret=self.twitter_consumer_secret,\n api='',\n version='1',\n signature_method=self.twitter_signature_method,\n )\n\n self.twitter_method = \"POST\"\n self.twitter_target_url = (\n \"https://api.twitter.com/1/statuses/update.json?\"\n \"include_entities=true\"\n )\n self.twitter_params_raw = [\n (\"status\", \"Hello Ladies + Gentlemen, a signed OAuth request!\"),\n (\"include_entities\", \"true\"),\n (\"oauth_consumer_key\", self.twitter_consumer_key),\n (\"oauth_nonce\", \"kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg\"),\n (\"oauth_signature_method\", self.twitter_signature_method),\n (\"oauth_timestamp\", \"1318622958\"),\n (\"oauth_token\",\n \"370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb\"),\n (\"oauth_version\", \"1.0\"),\n ]\n self.twitter_param_string = (\n r\"include_entities=true&\"\n r\"oauth_consumer_key=xvz1evFS4wEEPTGEFPHBog&\"\n r\"oauth_nonce=kYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg&\"\n r\"oauth_signature_method=HMAC-SHA1&\"\n r\"oauth_timestamp=1318622958&\"\n r\"oauth_token=370773112-GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb&\"\n r\"oauth_version=1.0&\"\n r\"status=Hello%20Ladies%20%2B%20Gentlemen%2C%20a%20\"\n r\"signed%20OAuth%20request%21\"\n )\n self.twitter_signature_base_string = (\n r\"POST&\"\n r\"https%3A%2F%2Fapi.twitter.com%2F1%2Fstatuses%2Fupdate.json&\"\n r\"include_entities%3Dtrue%26\"\n r\"oauth_consumer_key%3Dxvz1evFS4wEEPTGEFPHBog%26\"\n r\"oauth_nonce%3DkYjzVBB8Y0ZFabxSWbWovY3uYSQ2pTgmZeNu2VS4cg%26\"\n r\"oauth_signature_method%3DHMAC-SHA1%26\"\n r\"oauth_timestamp%3D1318622958%26\"\n r\"oauth_token%3D370773112-\"\n r\"GmHxMAgYyLbNEtIKZeRNFsMKPR9EyMZeS9weJAEb%26\"\n r\"oauth_version%3D1.0%26\"\n r\"status%3DHello%2520Ladies%2520%252B%2520Gentlemen%252C%2520\"\n r\"a%2520signed%2520OAuth%2520request%2521\"\n )\n self.twitter_token_secret = 'LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE'\n self.twitter_signing_key = (\n 'kAcSOqF21Fu85e7zjz7ZN2U4ZRhfV3WpwPAoE3Z7kBw&'\n 'LswwdoUaIvS8ltyTt5jkRh4J50vUPVVHtR2YPi5kE'\n )\n self.twitter_oauth_signature = b'tnnArxj06cWHq44gCs1OSKk/jLY='\n\n self.lexev_consumer_key = 'your_app_key'\n self.lexev_consumer_secret = 'your_app_secret'\n self.lexev_callback = 'http://127.0.0.1/oauth1_callback'\n self.lexev_signature_method = 'HMAC-SHA1'\n self.lexev_version = '1.0'\n self.lexev_api = API(\n url='https://bitbucket.org/',\n api='api',\n version='1.0',\n consumer_key=self.lexev_consumer_key,\n consumer_secret=self.lexev_consumer_secret,\n signature_method=self.lexev_signature_method,\n callback=self.lexev_callback,\n wp_user='',\n wp_pass='',\n oauth1a_3leg=True\n )\n self.lexev_request_method = 'POST'\n self.lexev_request_url = \\\n 'https://bitbucket.org/api/1.0/oauth/request_token'\n self.lexev_request_nonce = '27718007815082439851427366369'\n self.lexev_request_timestamp = '1427366369'\n self.lexev_request_params = [\n ('oauth_callback', self.lexev_callback),\n ('oauth_consumer_key', self.lexev_consumer_key),\n ('oauth_nonce', self.lexev_request_nonce),\n ('oauth_signature_method', self.lexev_signature_method),\n ('oauth_timestamp', self.lexev_request_timestamp),\n ('oauth_version', self.lexev_version),\n ]\n self.lexev_request_signature = b\"iPdHNIu4NGOjuXZ+YCdPWaRwvJY=\"\n self.lexev_resource_url = (\n 'https://api.bitbucket.org/1.0/repositories/st4lk/'\n 'django-articles-transmeta/branches'\n )\n\n def test_get_sign_key(self):\n self.assertEqual(\n StrUtils.to_binary(\n self.wcapi.auth.get_sign_key(self.consumer_secret)),\n StrUtils.to_binary(\"%s&\" % self.consumer_secret)\n )\n\n self.assertEqual(\n StrUtils.to_binary(self.wcapi.auth.get_sign_key(\n self.twitter_consumer_secret, self.twitter_token_secret)),\n StrUtils.to_binary(self.twitter_signing_key)\n )\n\n def test_flatten_params(self):\n self.assertEqual(\n StrUtils.to_binary(UrlUtils.flatten_params(\n self.twitter_params_raw)),\n StrUtils.to_binary(self.twitter_param_string)\n )\n\n def test_sorted_params(self):\n # Example given in oauth.net:\n oauthnet_example_sorted = [\n ('a', '1'),\n ('c', 'hi%%20there'),\n ('f', '25'),\n ('f', '50'),\n ('f', 'a'),\n ('z', 'p'),\n ('z', 't')\n ]\n\n oauthnet_example = copy(oauthnet_example_sorted)\n random.shuffle(oauthnet_example)\n\n self.assertEqual(\n UrlUtils.sorted_params(oauthnet_example),\n oauthnet_example_sorted\n )\n\n def test_get_signature_base_string(self):\n twitter_param_string = OAuth.get_signature_base_string(\n self.twitter_method,\n self.twitter_params_raw,\n self.twitter_target_url\n )\n self.assertEqual(\n twitter_param_string,\n self.twitter_signature_base_string\n )\n\n def test_generate_oauth_signature(self):\n\n rfc1_request_signature = self.rfc1_api.auth.generate_oauth_signature(\n self.rfc1_request_method,\n self.rfc1_request_params,\n self.rfc1_request_target_url,\n '%s&' % self.rfc1_consumer_secret\n )\n self.assertEqual(\n text_type(rfc1_request_signature),\n text_type(self.rfc1_request_signature)\n )\n\n # TEST WITH RFC EXAMPLE 3 DATA\n\n # TEST WITH TWITTER DATA\n\n twitter_signature = self.twitter_api.auth.generate_oauth_signature(\n self.twitter_method,\n self.twitter_params_raw,\n self.twitter_target_url,\n self.twitter_signing_key\n )\n self.assertEqual(twitter_signature, self.twitter_oauth_signature)\n\n # TEST WITH LEXEV DATA\n\n lexev_request_signature = self.lexev_api.auth.generate_oauth_signature(\n method=self.lexev_request_method,\n params=self.lexev_request_params,\n url=self.lexev_request_url\n )\n self.assertEqual(lexev_request_signature, self.lexev_request_signature)\n\n def test_add_params_sign(self):\n endpoint_url = self.wcapi.requester.endpoint_url('products?page=2')\n\n params = OrderedDict()\n params[\"oauth_consumer_key\"] = self.consumer_key\n params[\"oauth_timestamp\"] = \"1477041328\"\n params[\"oauth_nonce\"] = \"166182658461433445531477041328\"\n params[\"oauth_signature_method\"] = self.signature_method\n params[\"oauth_version\"] = \"1.0\"\n params[\"oauth_callback\"] = 'localhost:8888/wordpress'\n\n signed_url = self.wcapi.auth.add_params_sign(\n \"GET\", endpoint_url, params)\n\n signed_url_params = parse_qsl(urlparse(signed_url).query)\n # self.assertEqual('page', signed_url_params[-1][0])\n self.assertIn('page', dict(signed_url_params))\n\n\nclass OAuth3LegTestcases(unittest.TestCase):\n def setUp(self):\n self.consumer_key = \"ck_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n self.consumer_secret = \"cs_XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX\"\n self.api = API(\n url=\"http://woo.test\",\n consumer_key=self.consumer_key,\n consumer_secret=self.consumer_secret,\n oauth1a_3leg=True,\n wp_user='test_user',\n wp_pass='test_pass',\n callback='http://127.0.0.1/oauth1_callback'\n )\n\n @urlmatch(path=r'.*wp-json.*')\n def woo_api_mock(*args, **kwargs):\n \"\"\" URL Mock \"\"\"\n return {\n 'status_code': 200,\n 'content': b\"\"\"\n {\n \"name\": \"Wordpress\",\n \"description\": \"Just another WordPress site\",\n \"url\": \"http://localhost:8888/wordpress\",\n \"home\": \"http://localhost:8888/wordpress\",\n \"namespaces\": [\n \"wp/v2\",\n \"oembed/1.0\",\n \"wc/v1\"\n ],\n \"authentication\": {\n \"oauth1\": {\n \"request\":\n \"http://localhost:8888/wordpress/oauth1/request\",\n \"authorize\":\n \"http://localhost:8888/wordpress/oauth1/authorize\",\n \"access\":\n \"http://localhost:8888/wordpress/oauth1/access\",\n \"version\": \"0.1\"\n }\n }\n }\n \"\"\"\n }\n\n @urlmatch(path=r'.*oauth.*')\n def woo_authentication_mock(*args, **kwargs):\n \"\"\" URL Mock \"\"\"\n return {\n 'status_code': 200,\n 'content':\n b\"\"\"oauth_token=XXXXXXXXXXXX&oauth_token_secret=YYYYYYYYYYYY\"\"\"\n }\n\n def test_get_sign_key(self):\n oauth_token_secret = \"PNW9j1yBki3e7M7EqB5qZxbe9n5tR6bIIefSMQ9M2pdyRI9g\"\n\n key = self.api.auth.get_sign_key(\n self.consumer_secret, oauth_token_secret)\n self.assertEqual(\n StrUtils.to_binary(key),\n StrUtils.to_binary(\"%s&%s\" %\n (self.consumer_secret, oauth_token_secret))\n )\n\n def test_auth_discovery(self):\n\n with HTTMock(self.woo_api_mock):\n # call requests\n authentication = self.api.auth.authentication\n self.assertEquals(\n authentication,\n {\n \"oauth1\": {\n \"request\":\n \"http://localhost:8888/wordpress/oauth1/request\",\n \"authorize\":\n \"http://localhost:8888/wordpress/oauth1/authorize\",\n \"access\":\n \"http://localhost:8888/wordpress/oauth1/access\",\n \"version\": \"0.1\"\n }\n }\n )\n\n def test_get_request_token(self):\n\n with HTTMock(self.woo_api_mock):\n authentication = self.api.auth.authentication\n self.assertTrue(authentication)\n\n with HTTMock(self.woo_authentication_mock):\n request_token, request_token_secret = \\\n self.api.auth.get_request_token()\n self.assertEquals(request_token, 'XXXXXXXXXXXX')\n self.assertEquals(request_token_secret, 'YYYYYYYYYYYY')\n\n def test_store_access_creds(self):\n _, creds_store_path = mkstemp(\n \"wp-api-python-test-store-access-creds.json\")\n api = API(\n url=\"http://woo.test\",\n consumer_key=self.consumer_key,\n consumer_secret=self.consumer_secret,\n oauth1a_3leg=True,\n wp_user='test_user',\n wp_pass='test_pass',\n callback='http://127.0.0.1/oauth1_callback',\n access_token='XXXXXXXXXXXX',\n access_token_secret='YYYYYYYYYYYY',\n creds_store=creds_store_path\n )\n api.auth.store_access_creds()\n\n with open(creds_store_path) as creds_store_file:\n self.assertEqual(\n creds_store_file.read(),\n ('{\"access_token\": \"XXXXXXXXXXXX\", '\n '\"access_token_secret\": \"YYYYYYYYYYYY\"}')\n )\n\n def test_retrieve_access_creds(self):\n _, creds_store_path = mkstemp(\n \"wp-api-python-test-store-access-creds.json\")\n with open(creds_store_path, 'w+') as creds_store_file:\n creds_store_file.write(\n ('{\"access_token\": \"XXXXXXXXXXXX\", '\n '\"access_token_secret\": \"YYYYYYYYYYYY\"}'))\n\n api = API(\n url=\"http://woo.test\",\n consumer_key=self.consumer_key,\n consumer_secret=self.consumer_secret,\n oauth1a_3leg=True,\n wp_user='test_user',\n wp_pass='test_pass',\n callback='http://127.0.0.1/oauth1_callback',\n creds_store=creds_store_path\n )\n\n api.auth.retrieve_access_creds()\n\n self.assertEqual(\n api.auth.access_token,\n 'XXXXXXXXXXXX'\n )\n\n self.assertEqual(\n api.auth.access_token_secret,\n 'YYYYYYYYYYYY'\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_auth.py","file_name":"test_auth.py","file_ext":"py","file_size_in_byte":17637,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404582934","text":"#! /usr/bin/env python\n\n\"\"\"Calculate isoelectric points of polypeptides using methods from Bjellqvist et al. 1993 and 1994\n\"\"\" \n\npositive_pKs = {'Nterm': 7.5, 'K': 10.0, 'R': 12.0, 'H': 5.98} \nnegative_pKs = {'Cterm': 3.55, 'D': 4.05, 'E': 4.45, 'C': 9.0, 'Y': 10.0} \npKcterminal = {'D': 4.55, 'E': 4.75} \npKnterminal = {'A': 7.59, 'M': 7.0, 'S': 6.93, 'P': 8.36, 'T': 6.82, 'V': 7.44, 'E': 7.7} \ncharged_aas = ('K', 'R', 'H', 'D', 'E', 'C', 'Y') \n\n\nclass IsoelectricPoint(object):\n\tdef __init__(self, ProteinSequence, AminoAcidsContent):\n\t\tself.sequence = ProteinSequence\n\t\tself.charged_aas_content = self._select_charged(AminoAcidsContent)\n\n\tdef _select_charged(self, AminoAcidsContent):\n\t\tcharged = {}\n\t\tfor aa in charged_aas:\n\t\t\tcharged[aa] = float(AminoAcidsContent[aa])\n\t\tcharged['Nterm'] = 1.0\n\t\tcharged['Cterm'] = 1.0\n\t\treturn charged\n\n\tdef _chargeR(self, pH, pos_pKs, neg_pKs):\n\t\tPositiveCharge = 0.0\n\t\tfor aa, pK in pos_pKs.items():\n\t\t\tCR = 10**(pK-pH)\n\t\t\tpartial_charge = CR/(CR+1.0)\n\t\t\tPositiveCharge += self.charged_aas_content[aa] * partial_charge\n\n\t\tNegativeCharge = 0.0\n\t\tfor aa, pK in neg_pKs.items():\n\t\t\tCR = 10**(pH-pK)\n\t\t\tpartial_charge = CR/(CR+1.0)\n\t\t\tNegativeCharge += self.charged_aas_content[aa] * partial_charge\n\n\t\treturn PositiveCharge - NegativeCharge\n\n\tdef pi(self):\n\t\tpos_pKs = dict(positive_pKs)\n\t\tneg_pKs = dict(negative_pKs)\n\t\tnterm = self.sequence[0]\n\t\tcterm = self.sequence[-1]\n\t\tif nterm in pKnterminal:\n\t\t\tpos_pKs['Nterm'] = pKnterminal[nterm]\n\t\tif cterm in pKcterminal:\n\t\t\tneg_pKs['Cterm'] = pKcterminal[cterm]\n\n\t\tpH = 7.0\n\t\tCharge = self._chargeR(pH, pos_pKs, neg_pKs)\n\t\tif Charge > 0.0:\n\t\t\tpH1 = pH\n\t\t\tCharge1 = Charge\n\t\t\twhile Charge1 > 0.0:\n\t\t\t\tpH = pH1 + 1.0\n\t\t\t\tCharge = self._chargeR(pH, pos_pKs, neg_pKs)\n\t\t\t\tif Charge > 0.0:\n\t\t\t\t\tpH1 = pH\n\t\t\t\t\tCharge1 = Charge\n\t\t\t\telse:\n\t\t\t\t\tpH2 = pH\n\t\t\t\t\tCharge2 = Charge\n\t\t\t\t\tbreak\n\t\telse:\n\t\t\tpH2 = pH\n\t\t\tCharge2 = Charge\n\t\t\twhile Charge2 < 0.0:\n\t\t\t\tpH = pH2 - 1.0\n\t\t\t\tCharge = self._chargeR(pH, pos_pKs, neg_pKs)\n\t\t\t\tif Charge < 0.0:\n\t\t\t\t\tpH2 = pH\n\t\t\t\t\tCharge2 = Charge\n\t\t\t\telse:\n\t\t\t\t\tpH1 = pH\n\t\t\t\t\tCharge1 = Charge\n\t\t\t\t\tbreak\n\n\t\twhile pH2 - pH1 > 0.0001 and Charge != 0.0:\n\t\t\tpH = (pH1 + pH2) / 2.0\n\t\t\tCharge = self._chargeR(pH, pos_pKs, neg_pKs)\n\t\t\tif Charge > 0.0:\n\t\t\t\tpH1 = pH\n\t\t\t\tCharge1 = Charge\n\t\t\telse:\n\t\t\t\tpH2 = pH\n\t\t\t\tCharge2 = Charge\n\n\t\treturn pH","sub_path":"Isoelectric_Point.py","file_name":"Isoelectric_Point.py","file_ext":"py","file_size_in_byte":2341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"650476254","text":"'''**********************\n* 安典坤 \n* \n* 将杨强老师的代码做了改动,用于添加员工采集员工的人脸\n* \n**********************'''\nimport cv2\nimport os\nimport numpy as np\nimport random\nfrom PyQt5.QtCore import pyqtSignal,Qt,QObject\n# 数据文件的路径,相对于当前文件目录下的data目录\ncurrent_dir = os.path.dirname(__file__)\n# 生成数据文件的绝对路径(在window下存在汉字路径问题,需要设置系统为utf-8编码)\nmod_file = os.path.join(current_dir, \"data/haarcascade_frontalface_alt2.xml\")\n\n\nclass CaptureFacesAI(QObject):\n sign_ReturnImg = pyqtSignal(np.ndarray)\n \n def __init__(self):\n super(CaptureFacesAI, self).__init__()\n # 采集的图像大小\n self.imgsize = 60\n # 采集的图像数目\n self.n = 0\n # 人脸侦测对象 # 因为汉字路径问题,所以直接使用相对路径,这个安装后需要注意\n # self.harr = cv2.CascadeClassifier(\"capturefaces/data/haarcascade_frontalface_alt2.xml\")\n self.harr = cv2.CascadeClassifier(mod_file)\n \n def relight(self, imgsrc, alpha=1, bias=0):\n '''图像亮度'''\n imgsrc = imgsrc.astype(float)\n imgsrc = imgsrc * alpha + bias\n imgsrc[imgsrc < 0] = 0\n imgsrc[imgsrc > 255] = 255\n imgsrc = imgsrc.astype(np.uint8)\n #print(imgsrc)\n return imgsrc\n\n def createdir(self, imgpath):\n '''创建目录存放采集图像'''\n if not os.path.exists(imgpath):\n os.makedirs(imgpath)\n self.img_path = imgpath\n self.n = 0 # 创建新的目录重新计数\n \n def handle_image(self, imgsrc):\n '''侦测人脸,并做灰度处理后,保存'''\n # 转换为灰度\n gray_img = cv2.cvtColor(imgsrc, cv2.COLOR_BGR2GRAY)\n # 侦测人脸\n faces = self.harr.detectMultiScale(gray_img, 1.3, 5)\n\n # 循环处理人脸(缩放,灰度变化,保存)\n if len(faces) == 1: # 如果识别出多个人脸,不处理\n for f_x, f_y, f_w, f_h in faces:\n face = imgsrc[f_y:f_y+f_h, f_x:f_x+f_w]\n face = cv2.resize(face, (self.imgsize, self.imgsize))\n # 对图像进行灰度处理, 产生3个灰度处理(这样处理今后训练出来的AI模型,对灰度具有一定的泛化能力)\n face = self.relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50))\n #print(type(face))#numpy.ndarray\n #cv2.imwrite(os.path.join(self.img_path, F\"{self.n:03d}_1.jpg\"), face)\n self.sign_ReturnImg.emit(face)\n \n face = self.relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50))\n #cv2.imwrite(os.path.join(self.img_path, F\"{self.n:03d}_2.jpg\"), face)\n self.sign_ReturnImg.emit(face)\n\n face = self.relight(face, random.uniform(0.5, 1.5), random.randint(-50, 50))\n #cv2.imwrite(os.path.join(self.img_path, F\"{self.n:03d}_3.jpg\"), face)\n self.sign_ReturnImg.emit(face)\n # 标记人脸\n imgsrc = cv2.rectangle(imgsrc, (f_x, f_y), (f_x + f_w, f_y + f_h), (255, 0, 0), 1)\n \n self.n += 1\n # 返回标记过的图像\n #self.sign_ReturnImg.emit(face)\n return imgsrc\n ","sub_path":"finalVersion/version12/UiCodes/facialFeatureExtraction/capturefaces/CaptureFacesAIStaff.py","file_name":"CaptureFacesAIStaff.py","file_ext":"py","file_size_in_byte":3385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"594653916","text":"import time\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.by import By\r\n\r\ndriver = webdriver.Chrome(executable_path='D:\\Drivers\\chromedriver.exe')\r\n#driver = webdriver.Firefox(executable_path='D:\\Drivers\\geckodriver.exe')\r\n#driver = webdriver.Ie(executable_path='D:\\Drivers\\IEDriverServer.exe')\r\n\r\ndriver.set_page_load_timeout(10)\r\ndriver.get('https://13.126.51.129')\r\ndriver.maximize_window()\r\ndriver.refresh()\r\n\r\n#title, URL\r\nprint('Title: ',driver.title)\r\nprint('Current URL: ', driver.current_url)\r\n\r\n#Login\r\ndriver.find_element_by_id('id_username').send_keys('setup')\r\ndriver.find_element_by_id('id_password').send_keys('0f4e7352eb9995241')\r\ndriver.find_element_by_id('id_loginButton').click()\r\nprint('Login successful')\r\n\r\ntime.sleep(2)\r\ndriver.find_element_by_css_selector('button.c-dock__new-dashboard.Button').click()\r\none=driver.find_element_by_css_selector('input.qa-properties__name.c-input.c-field') #dashboard name field\r\nprint(one.is_displayed())\r\nprint(one.is_enabled())\r\ndriver.find_element_by_css_selector('button.Button.qa-properties__cancel').click()\r\n\r\n\r\n'''\r\ntime.sleep(3)\r\ndriver.get('https://www.google.com')\r\nprint('title: ', driver.title)\r\n\r\ntime.sleep(3)\r\ndriver.back()\r\ntime.sleep(3)\r\ndriver.forward()\r\n\r\ndriver.find_element_by_class_name(\"fa.fa-stack-1x.fa-exclamation\").click() #system settings gear\r\n\r\n#Create new dashboard\r\ndriver.implicitly_wait(5)\r\ndriver.find_element_by_css_selector('button.c-dock__new-dashboard.Button').click()\r\ndriver.find_element_by_css_selector('input.qa-properties__name.c-input.c-field').clear()\r\ndriver.find_element_by_css_selector('input.qa-properties__name.c-input.c-field').send_keys('AFourtech')\r\ndriver.implicitly_wait(5)\r\ndriver.find_element_by_css_selector('button.Button.Button-highlight.qa-properties__save').click()\r\nprint('New dashboard created successfully')\r\n'''\r\n\r\ntime.sleep(2)\r\ndriver.quit()","sub_path":"test1file.py","file_name":"test1file.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"326544528","text":"#!/usr/bin/python3\n\nfrom bs4 import BeautifulSoup\nimport requests\n\n\nurl = 'https://www.calc.ru/Bitcoin-k-rublyu-online.html'\npage = requests.get(url)\n#print(page.status_code)\nnews = []\nnew_news = []\n\nsoup = BeautifulSoup(page.text, \"html.parser\")\n#print(soup)\n\nnews = soup.findAll(class_='t18', style=\"font-size: 24px;\")\n#news = soup.findAll(style=\"font-size: 24px;\")\n#print(soup.class_='t18')\n\n#btc = news.findAll('b')\n#print(news)\n\nfor tag in soup.find_all(class_='t18'):\n print(tag.text[0:21])\n","sub_path":"python/btc.py","file_name":"btc.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"89169119","text":"#\n# Copyright (c) 2014-2017 Wind River Systems, Inc.\n#\n# SPDX-License-Identifier: Apache-2.0\n#\n\n\"\"\"\nBackup & Restore\n\"\"\"\n\nfrom __future__ import print_function\nimport copy\nimport filecmp\nimport fileinput\nimport os\nimport glob\nimport shutil\nimport stat\nimport subprocess\nimport tarfile\nimport tempfile\nimport textwrap\nimport time\n\nfrom fm_api import constants as fm_constants\nfrom fm_api import fm_api\nfrom sysinv.common import constants as sysinv_constants\n\nfrom controllerconfig.common import log\nfrom controllerconfig.common import constants\nfrom controllerconfig.common.exceptions import BackupFail\nfrom controllerconfig.common.exceptions import RestoreFail\nfrom controllerconfig.common.exceptions import KeystoneFail\nfrom controllerconfig.common.exceptions import SysInvFail\nfrom controllerconfig import openstack\nimport tsconfig.tsconfig as tsconfig\nfrom controllerconfig import utils\nfrom controllerconfig import sysinv_api as sysinv\nfrom six.moves import input\nfrom os import environ\n\nLOG = log.get_logger(__name__)\n\nDEVNULL = open(os.devnull, 'w')\nRESTORE_COMPLETE = \"restore-complete\"\nRESTORE_RERUN_REQUIRED = \"restore-rerun-required\"\n\n# Backup/restore related constants\nbackup_in_progress = tsconfig.BACKUP_IN_PROGRESS_FLAG\nrestore_in_progress = tsconfig.RESTORE_IN_PROGRESS_FLAG\nrestore_patching_complete = '/etc/platform/.restore_patching_complete'\nnode_is_patched = '/var/run/node_is_patched'\nkeyring_permdir = os.path.join('/opt/platform/.keyring', tsconfig.SW_VERSION)\nceph_permdir = os.path.join(tsconfig.CONFIG_PATH, 'ceph-config')\nldap_permdir = '/var/lib/openldap-data'\npatching_permdir = '/opt/patching'\npatching_repo_permdir = '/www/pages/updates'\nhome_permdir = '/home'\nextension_permdir = '/opt/extension'\npatch_vault_permdir = '/opt/patch-vault'\nmariadb_pod = 'mariadb-server-0'\n\nkube_config = environ.get('KUBECONFIG')\nif kube_config is None:\n kube_config = '/etc/kubernetes/admin.conf'\n\n\nkube_cmd_prefix = 'kubectl --kubeconfig=%s ' % kube_config\nkube_cmd_prefix += 'exec -i %s -n openstack -- bash -c ' % mariadb_pod\n\nmysql_prefix = '\\'exec mysql -uroot -p\"$MYSQL_ROOT_PASSWORD\" '\nmysqldump_prefix = '\\'exec mysqldump -uroot -p\"$MYSQL_ROOT_PASSWORD\" '\n\n\ndef get_backup_databases():\n \"\"\"\n Retrieve database lists for backup.\n :return: backup_databases and backup_database_skip_tables\n \"\"\"\n\n # Databases common to all configurations\n REGION_LOCAL_DATABASES = ('postgres', 'template1', 'sysinv',\n 'fm', 'barbican')\n REGION_SHARED_DATABASES = ('keystone',)\n\n # Indicates which tables have to be dropped for a certain database.\n DB_TABLE_SKIP_MAPPING = {\n 'fm': ('alarm',),\n 'dcorch': ('orch_job',\n 'orch_request',\n 'resource',\n 'subcloud_resource'), }\n\n if tsconfig.region_config == 'yes':\n BACKUP_DATABASES = REGION_LOCAL_DATABASES\n else:\n # Add additional databases for non-region configuration and for the\n # primary region in region deployments.\n BACKUP_DATABASES = REGION_LOCAL_DATABASES + REGION_SHARED_DATABASES\n\n # Add distributed cloud databases\n if tsconfig.distributed_cloud_role == \\\n sysinv_constants.DISTRIBUTED_CLOUD_ROLE_SYSTEMCONTROLLER:\n BACKUP_DATABASES += ('dcmanager', 'dcorch')\n\n # We generate the tables to be skipped for each database\n # mentioned in BACKUP_DATABASES. We explicitly list\n # skip tables in DB_TABLE_SKIP_MAPPING\n BACKUP_DB_SKIP_TABLES = dict(\n [[x, DB_TABLE_SKIP_MAPPING.get(x, ())] for x in BACKUP_DATABASES])\n\n return BACKUP_DATABASES, BACKUP_DB_SKIP_TABLES\n\n\ndef get_os_backup_databases():\n \"\"\"\n Retrieve openstack database lists from MariaDB for backup.\n :return: os_backup_databases\n \"\"\"\n\n skip_dbs = (\"Database\", \"information_schema\", \"performance_schema\",\n \"mysql\", \"horizon\", \"panko\", \"gnocchi\")\n\n try:\n db_cmd = kube_cmd_prefix + mysql_prefix + '-e\"show databases\" \\''\n\n proc = subprocess.Popen([db_cmd], shell=True,\n stdout=subprocess.PIPE, stderr=DEVNULL)\n\n os_backup_dbs = set(line[:-1] for line in proc.stdout\n if line[:-1] not in skip_dbs)\n\n proc.communicate()\n\n return os_backup_dbs\n\n except subprocess.CalledProcessError:\n raise BackupFail(\"Failed to get openstack databases from MariaDB.\")\n\n\ndef check_load_versions(archive, staging_dir):\n match = False\n try:\n member = archive.getmember('etc/build.info')\n archive.extract(member, path=staging_dir)\n match = filecmp.cmp('/etc/build.info', staging_dir + '/etc/build.info')\n shutil.rmtree(staging_dir + '/etc')\n except Exception as e:\n LOG.exception(e)\n raise RestoreFail(\"Unable to verify load version in backup file. \"\n \"Invalid backup file.\")\n\n if not match:\n LOG.error(\"Load version mismatch.\")\n raise RestoreFail(\"Load version of backup does not match the \"\n \"version of the installed load.\")\n\n\ndef get_subfunctions(filename):\n \"\"\"\n Retrieves the subfunctions from a platform.conf file.\n :param filename: file to retrieve subfunctions from\n :return: a list of the subfunctions or None if no subfunctions exist\n \"\"\"\n matchstr = 'subfunction='\n\n with open(filename, 'r') as f:\n for line in f:\n if matchstr in line:\n parsed = line.split('=')\n return parsed[1].rstrip().split(\",\")\n return\n\n\ndef check_load_subfunctions(archive, staging_dir):\n \"\"\"\n Verify that the subfunctions in the backup match the installed load.\n :param archive: backup archive\n :param staging_dir: staging directory\n :return: raises exception if the subfunctions do not match\n \"\"\"\n match = False\n backup_subfunctions = None\n try:\n member = archive.getmember('etc/platform/platform.conf')\n archive.extract(member, path=staging_dir)\n backup_subfunctions = get_subfunctions(staging_dir +\n '/etc/platform/platform.conf')\n shutil.rmtree(staging_dir + '/etc')\n if set(backup_subfunctions) ^ set(tsconfig.subfunctions):\n # The set of subfunctions do not match\n match = False\n else:\n match = True\n except Exception:\n LOG.exception(\"Unable to verify subfunctions in backup file\")\n raise RestoreFail(\"Unable to verify subfunctions in backup file. \"\n \"Invalid backup file.\")\n\n if not match:\n LOG.error(\"Subfunction mismatch - backup: %s, installed: %s\" %\n (str(backup_subfunctions), str(tsconfig.subfunctions)))\n raise RestoreFail(\"Subfunctions in backup load (%s) do not match the \"\n \"subfunctions of the installed load (%s).\" %\n (str(backup_subfunctions),\n str(tsconfig.subfunctions)))\n\n\ndef file_exists_in_archive(archive, file_path):\n \"\"\" Check if file exists in archive \"\"\"\n try:\n archive.getmember(file_path)\n return True\n\n except KeyError:\n LOG.info(\"File %s is not in archive.\" % file_path)\n return False\n\n\ndef filter_directory(archive, directory):\n for tarinfo in archive:\n if tarinfo.name.split('/')[0] == directory:\n yield tarinfo\n\n\ndef backup_etc_size():\n \"\"\" Backup etc size estimate \"\"\"\n try:\n total_size = utils.directory_get_size('/etc')\n return total_size\n except OSError:\n LOG.error(\"Failed to estimate backup etc size.\")\n raise BackupFail(\"Failed to estimate backup etc size\")\n\n\ndef backup_etc(archive):\n \"\"\" Backup etc \"\"\"\n try:\n archive.add('/etc', arcname='etc')\n\n except tarfile.TarError:\n LOG.error(\"Failed to backup etc.\")\n raise BackupFail(\"Failed to backup etc\")\n\n\ndef restore_etc_file(archive, dest_dir, etc_file):\n \"\"\" Restore etc file \"\"\"\n try:\n # Change the name of this file to remove the leading path\n member = archive.getmember('etc/' + etc_file)\n # Copy the member to avoid changing the name for future operations on\n # this member.\n temp_member = copy.copy(member)\n temp_member.name = os.path.basename(temp_member.name)\n archive.extract(temp_member, path=dest_dir)\n\n except tarfile.TarError:\n LOG.error(\"Failed to restore etc file.\")\n raise RestoreFail(\"Failed to restore etc file\")\n\n\ndef restore_etc_ssl_dir(archive, configpath=constants.CONFIG_WORKDIR):\n \"\"\" Restore the etc SSL dir \"\"\"\n\n def filter_etc_ssl_private(members):\n for tarinfo in members:\n if 'etc/ssl/private' in tarinfo.name:\n yield tarinfo\n\n if file_exists_in_archive(archive, 'config/server-cert.pem'):\n restore_config_file(\n archive, configpath, 'server-cert.pem')\n\n if file_exists_in_archive(archive, 'etc/ssl/private'):\n # NOTE: This will include all TPM certificate files if TPM was\n # enabled on the backed up system. However in that case, this\n # restoration is only done for the first controller and TPM\n # will need to be reconfigured once duplex controller (if any)\n # is restored.\n archive.extractall(path='/',\n members=filter_etc_ssl_private(archive))\n\n\ndef restore_ceph_external_config_files(archive, staging_dir):\n # Restore ceph-config.\n if file_exists_in_archive(archive, \"config/ceph-config\"):\n restore_config_dir(archive, staging_dir, 'ceph-config', ceph_permdir)\n\n # Copy the file to /etc/ceph.\n # There might be no files to copy, so don't check the return code.\n cp_command = ('cp -Rp ' + os.path.join(ceph_permdir, '*') +\n ' /etc/ceph/')\n subprocess.call(cp_command, shell=True)\n\n\ndef backup_config_size(config_permdir):\n \"\"\" Backup configuration size estimate \"\"\"\n try:\n return(utils.directory_get_size(config_permdir))\n\n except OSError:\n LOG.error(\"Failed to estimate backup configuration size.\")\n raise BackupFail(\"Failed to estimate backup configuration size\")\n\n\ndef backup_config(archive, config_permdir):\n \"\"\" Backup configuration \"\"\"\n try:\n # The config dir is versioned, but we're only grabbing the current\n # release\n archive.add(config_permdir, arcname='config')\n\n except tarfile.TarError:\n LOG.error(\"Failed to backup config.\")\n raise BackupFail(\"Failed to backup configuration\")\n\n\ndef restore_config_file(archive, dest_dir, config_file):\n \"\"\" Restore configuration file \"\"\"\n try:\n # Change the name of this file to remove the leading path\n member = archive.getmember('config/' + config_file)\n # Copy the member to avoid changing the name for future operations on\n # this member.\n temp_member = copy.copy(member)\n temp_member.name = os.path.basename(temp_member.name)\n archive.extract(temp_member, path=dest_dir)\n\n except tarfile.TarError:\n LOG.error(\"Failed to restore config file %s.\" % config_file)\n raise RestoreFail(\"Failed to restore configuration\")\n\n\ndef restore_configuration(archive, staging_dir):\n \"\"\" Restore configuration \"\"\"\n try:\n os.makedirs(constants.CONFIG_WORKDIR, stat.S_IRWXU | stat.S_IRGRP |\n stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH)\n except OSError:\n LOG.error(\"Failed to create config directory: %s\",\n constants.CONFIG_WORKDIR)\n raise RestoreFail(\"Failed to restore configuration files\")\n\n # Restore cgcs_config file from original installation for historical\n # purposes. Not used to restore the system as the information in this\n # file is out of date (not updated after original installation).\n restore_config_file(archive, constants.CONFIG_WORKDIR, 'cgcs_config')\n\n # Restore platform.conf file and update as necessary. The file will be\n # created in a temporary location and then moved into place when it is\n # complete to prevent access to a partially created file.\n restore_etc_file(archive, staging_dir, 'platform/platform.conf')\n temp_platform_conf_file = os.path.join(tsconfig.PLATFORM_CONF_PATH,\n 'platform.conf.temp')\n shutil.copyfile(os.path.join(staging_dir, 'platform.conf'),\n temp_platform_conf_file)\n install_uuid = utils.get_install_uuid()\n for line in fileinput.FileInput(temp_platform_conf_file, inplace=1):\n if line.startswith(\"INSTALL_UUID=\"):\n # The INSTALL_UUID must be updated to match the new INSTALL_UUID\n # which was generated when this controller was installed prior to\n # doing the restore.\n print(\"INSTALL_UUID=%s\" % install_uuid)\n elif line.startswith(\"management_interface=\") or \\\n line.startswith(\"oam_interface=\") or \\\n line.startswith(\"cluster_host_interface=\") or \\\n line.startswith(\"UUID=\"):\n # Strip out any entries that are host specific as the backup can\n # be done on either controller. The application of the\n # platform_conf manifest will add these back in.\n pass\n else:\n print(line, end='')\n fileinput.close()\n # Move updated platform.conf file into place.\n os.rename(temp_platform_conf_file, tsconfig.PLATFORM_CONF_FILE)\n\n # Kick tsconfig to reload the platform.conf file\n tsconfig._load()\n\n # Restore branding\n restore_config_dir(archive, staging_dir, 'branding', '/opt/branding/')\n\n # Restore banner customization\n restore_config_dir(archive, staging_dir, 'banner/etc', '/opt/banner')\n\n # Restore ssh configuration\n restore_config_dir(archive, staging_dir, 'ssh_config',\n constants.CONFIG_WORKDIR + '/ssh_config')\n\n # Configure hostname\n utils.configure_hostname('controller-0')\n\n # Restore hosts file\n restore_etc_file(archive, '/etc', 'hosts')\n restore_etc_file(archive, constants.CONFIG_WORKDIR, 'hosts')\n\n # Restore certificate files\n restore_etc_ssl_dir(archive)\n\n\ndef filter_pxelinux(archive):\n for tarinfo in archive:\n if tarinfo.name.find('config/pxelinux.cfg') == 0:\n yield tarinfo\n\n\ndef restore_dnsmasq(archive, config_permdir):\n \"\"\" Restore dnsmasq \"\"\"\n try:\n etc_files = ['hosts']\n\n perm_files = ['hosts',\n 'dnsmasq.hosts', 'dnsmasq.leases',\n 'dnsmasq.addn_hosts']\n\n for etc_file in etc_files:\n restore_config_file(archive, '/etc', etc_file)\n\n for perm_file in perm_files:\n restore_config_file(archive, config_permdir, perm_file)\n\n # Extract distributed cloud addn_hosts file if present in archive.\n if file_exists_in_archive(\n archive, 'config/dnsmasq.addn_hosts_dc'):\n restore_config_file(archive, config_permdir,\n 'dnsmasq.addn_hosts_dc')\n\n tmpdir = tempfile.mkdtemp(prefix=\"pxerestore_\")\n\n archive.extractall(tmpdir,\n members=filter_pxelinux(archive))\n\n if os.path.exists(tmpdir + '/config/pxelinux.cfg'):\n shutil.rmtree(config_permdir + 'pxelinux.cfg', ignore_errors=True)\n shutil.move(tmpdir + '/config/pxelinux.cfg', config_permdir)\n\n shutil.rmtree(tmpdir, ignore_errors=True)\n\n except (shutil.Error, subprocess.CalledProcessError, tarfile.TarError):\n LOG.error(\"Failed to restore dnsmasq config.\")\n raise RestoreFail(\"Failed to restore dnsmasq files\")\n\n\ndef backup_puppet_data_size(puppet_permdir):\n \"\"\" Backup puppet data size estimate \"\"\"\n try:\n return(utils.directory_get_size(puppet_permdir))\n\n except OSError:\n LOG.error(\"Failed to estimate backup puppet data size.\")\n raise BackupFail(\"Failed to estimate backup puppet data size\")\n\n\ndef backup_puppet_data(archive, puppet_permdir):\n \"\"\" Backup puppet data \"\"\"\n try:\n # The puppet dir is versioned, but we're only grabbing the current\n # release\n archive.add(puppet_permdir, arcname='hieradata')\n\n except tarfile.TarError:\n LOG.error(\"Failed to backup puppet data.\")\n raise BackupFail(\"Failed to backup puppet data\")\n\n\ndef restore_static_puppet_data(archive, puppet_workdir):\n \"\"\" Restore static puppet data \"\"\"\n try:\n member = archive.getmember('hieradata/static.yaml')\n archive.extract(member, path=os.path.dirname(puppet_workdir))\n\n member = archive.getmember('hieradata/secure_static.yaml')\n archive.extract(member, path=os.path.dirname(puppet_workdir))\n\n except tarfile.TarError:\n LOG.error(\"Failed to restore static puppet data.\")\n raise RestoreFail(\"Failed to restore static puppet data\")\n\n except OSError:\n pass\n\n\ndef restore_puppet_data(archive, puppet_workdir, controller_0_address):\n \"\"\" Restore puppet data \"\"\"\n try:\n member = archive.getmember('hieradata/system.yaml')\n archive.extract(member, path=os.path.dirname(puppet_workdir))\n\n member = archive.getmember('hieradata/secure_system.yaml')\n archive.extract(member, path=os.path.dirname(puppet_workdir))\n\n # Only restore controller-0 hieradata\n controller_0_hieradata = 'hieradata/%s.yaml' % controller_0_address\n member = archive.getmember(controller_0_hieradata)\n archive.extract(member, path=os.path.dirname(puppet_workdir))\n\n except tarfile.TarError:\n LOG.error(\"Failed to restore puppet data.\")\n raise RestoreFail(\"Failed to restore puppet data\")\n\n except OSError:\n pass\n\n\ndef backup_armada_manifest_size(armada_permdir):\n \"\"\" Backup armada manifest size estimate \"\"\"\n try:\n return(utils.directory_get_size(armada_permdir))\n\n except OSError:\n LOG.error(\"Failed to estimate backup armada manifest size.\")\n raise BackupFail(\"Failed to estimate backup armada manifest size\")\n\n\ndef backup_armada_manifest_data(archive, armada_permdir):\n \"\"\" Backup armada manifest data \"\"\"\n try:\n archive.add(armada_permdir, arcname='armada')\n\n except tarfile.TarError:\n LOG.error(\"Failed to backup armada manifest data.\")\n raise BackupFail(\"Failed to backup armada manifest data\")\n\n\ndef restore_armada_manifest_data(archive, armada_permdir):\n \"\"\" Restore armada manifest data \"\"\"\n try:\n shutil.rmtree(armada_permdir, ignore_errors=True)\n members = filter_directory(archive, 'armada')\n temp_members = list()\n # remove armada and armada/ from the member path since they are\n # extracted to armada_permdir: /opt/platform/armada/release\n for m in members:\n temp_member = copy.copy(m)\n lst = temp_member.name.split('armada/')\n if len(lst) > 1:\n temp_member.name = lst[1]\n temp_members.append(temp_member)\n archive.extractall(path=armada_permdir, members=temp_members)\n\n except (tarfile.TarError, OSError):\n LOG.error(\"Failed to restore armada manifest.\")\n shutil.rmtree(armada_permdir, ignore_errors=True)\n raise RestoreFail(\"Failed to restore armada manifest\")\n\n\ndef backup_keyring_size(keyring_permdir):\n \"\"\" Backup keyring size estimate \"\"\"\n try:\n return(utils.directory_get_size(keyring_permdir))\n\n except OSError:\n LOG.error(\"Failed to estimate backup keyring size.\")\n raise BackupFail(\"Failed to estimate backup keyring size\")\n\n\ndef backup_keyring(archive, keyring_permdir):\n \"\"\" Backup keyring configuration \"\"\"\n try:\n archive.add(keyring_permdir, arcname='.keyring')\n\n except tarfile.TarError:\n LOG.error(\"Failed to backup keyring.\")\n raise BackupFail(\"Failed to backup keyring configuration\")\n\n\ndef restore_keyring(archive, keyring_permdir):\n \"\"\" Restore keyring configuration \"\"\"\n try:\n shutil.rmtree(keyring_permdir, ignore_errors=False)\n members = filter_directory(archive, '.keyring')\n temp_members = list()\n # remove .keyring and .keyring/ from the member path since they are\n # extracted to keyring_permdir: /opt/platform/.keyring/release\n for m in members:\n temp_member = copy.copy(m)\n lst = temp_member.name.split('.keyring/')\n if len(lst) > 1:\n temp_member.name = lst[1]\n temp_members.append(temp_member)\n archive.extractall(path=keyring_permdir, members=temp_members)\n\n except (tarfile.TarError, shutil.Error):\n LOG.error(\"Failed to restore keyring.\")\n shutil.rmtree(keyring_permdir, ignore_errors=True)\n raise RestoreFail(\"Failed to restore keyring configuration\")\n\n\ndef prefetch_keyring(archive):\n \"\"\" Prefetch keyring configuration for manifest use \"\"\"\n keyring_tmpdir = '/tmp/.keyring'\n python_keyring_tmpdir = '/tmp/python_keyring'\n try:\n shutil.rmtree(keyring_tmpdir, ignore_errors=True)\n shutil.rmtree(python_keyring_tmpdir, ignore_errors=True)\n archive.extractall(\n path=os.path.dirname(keyring_tmpdir),\n members=filter_directory(archive,\n os.path.basename(keyring_tmpdir)))\n\n shutil.move(keyring_tmpdir + '/python_keyring', python_keyring_tmpdir)\n\n except (tarfile.TarError, shutil.Error):\n LOG.error(\"Failed to restore keyring.\")\n shutil.rmtree(keyring_tmpdir, ignore_errors=True)\n shutil.rmtree(python_keyring_tmpdir, ignore_errors=True)\n raise RestoreFail(\"Failed to restore keyring configuration\")\n\n\ndef cleanup_prefetched_keyring():\n \"\"\" Cleanup fetched keyring \"\"\"\n try:\n keyring_tmpdir = '/tmp/.keyring'\n python_keyring_tmpdir = '/tmp/python_keyring'\n\n shutil.rmtree(keyring_tmpdir, ignore_errors=True)\n shutil.rmtree(python_keyring_tmpdir, ignore_errors=True)\n\n except shutil.Error:\n LOG.error(\"Failed to cleanup keyring.\")\n raise RestoreFail(\"Failed to cleanup fetched keyring\")\n\n\ndef backup_ldap_size():\n \"\"\" Backup ldap size estimate \"\"\"\n try:\n total_size = 0\n\n proc = subprocess.Popen(\n ['slapcat -d 0 -F /etc/openldap/schema | wc -c'],\n shell=True, stdout=subprocess.PIPE)\n\n for line in proc.stdout:\n total_size = int(line)\n break\n\n proc.communicate()\n\n return total_size\n\n except subprocess.CalledProcessError:\n LOG.error(\"Failed to estimate backup ldap size.\")\n raise BackupFail(\"Failed to estimate backup ldap size\")\n\n\ndef backup_ldap(archive, staging_dir):\n \"\"\" Backup ldap configuration \"\"\"\n try:\n ldap_staging_dir = staging_dir + '/ldap'\n os.mkdir(ldap_staging_dir, 0o655)\n\n subprocess.check_call([\n 'slapcat', '-d', '0', '-F', '/etc/openldap/schema',\n '-l', (ldap_staging_dir + '/ldap.db')], stdout=DEVNULL)\n\n archive.add(ldap_staging_dir + '/ldap.db', arcname='ldap.db')\n\n except (OSError, subprocess.CalledProcessError, tarfile.TarError):\n LOG.error(\"Failed to backup ldap database.\")\n raise BackupFail(\"Failed to backup ldap configuration\")\n\n\ndef restore_ldap(archive, ldap_permdir, staging_dir):\n \"\"\" Restore ldap configuration \"\"\"\n try:\n ldap_staging_dir = staging_dir + '/ldap'\n archive.extract('ldap.db', path=ldap_staging_dir)\n\n utils.stop_lsb_service('openldap')\n\n subprocess.call(['rm', '-rf', ldap_permdir], stdout=DEVNULL)\n os.mkdir(ldap_permdir, 0o755)\n\n subprocess.check_call(['slapadd', '-F', '/etc/openldap/schema',\n '-l', ldap_staging_dir + '/ldap.db'],\n stdout=DEVNULL, stderr=DEVNULL)\n\n except (subprocess.CalledProcessError, OSError, tarfile.TarError):\n LOG.error(\"Failed to restore ldap database.\")\n raise RestoreFail(\"Failed to restore ldap configuration\")\n\n finally:\n utils.start_lsb_service('openldap')\n\n\ndef backup_mariadb_size():\n \"\"\" Backup MariaDB size estimate \"\"\"\n try:\n total_size = 0\n\n os_backup_dbs = get_os_backup_databases()\n\n # Backup data for databases.\n for db_elem in os_backup_dbs:\n\n db_cmd = kube_cmd_prefix + mysqldump_prefix\n db_cmd += ' %s\\' | wc -c' % db_elem\n\n proc = subprocess.Popen([db_cmd], shell=True,\n stdout=subprocess.PIPE, stderr=DEVNULL)\n\n total_size += int(proc.stdout.readline())\n proc.communicate()\n\n return total_size\n\n except subprocess.CalledProcessError:\n LOG.error(\"Failed to estimate MariaDB database size.\")\n raise BackupFail(\"Failed to estimate MariaDB database size\")\n\n\ndef backup_mariadb(archive, staging_dir):\n \"\"\" Backup MariaDB data \"\"\"\n try:\n mariadb_staging_dir = staging_dir + '/mariadb'\n os.mkdir(mariadb_staging_dir, 0o655)\n\n os_backup_dbs = get_os_backup_databases()\n\n # Backup data for databases.\n for db_elem in os_backup_dbs:\n db_cmd = kube_cmd_prefix + mysqldump_prefix\n db_cmd += ' %s\\' > %s/%s.sql.data' % (db_elem,\n mariadb_staging_dir, db_elem)\n\n subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL)\n\n archive.add(mariadb_staging_dir, arcname='mariadb')\n\n except (OSError, subprocess.CalledProcessError, tarfile.TarError):\n LOG.error(\"Failed to backup MariaDB databases.\")\n raise BackupFail(\"Failed to backup MariaDB database.\")\n\n\ndef extract_mariadb_data(archive):\n \"\"\" Extract and store MariaDB data \"\"\"\n try:\n # We store MariaDB data in /opt/backups/mariadb for now.\n # After MariaDB service is up, we will populate the\n # database using these data.\n archive.extractall(path=constants.BACKUPS_PATH,\n members=filter_directory(archive, 'mariadb'))\n except (OSError, tarfile.TarError) as e:\n LOG.error(\"Failed to extract and store MariaDB data. Error: %s\", e)\n raise RestoreFail(\"Failed to extract and store MariaDB data.\")\n\n\ndef create_helm_overrides_directory():\n \"\"\"\n Create helm overrides directory\n During restore, application-apply will be done without\n first running application-upload where the helm overrides\n directory is created. So we need to create the helm overrides\n directory before running application-apply.\n \"\"\"\n try:\n os.mkdir(constants.HELM_OVERRIDES_PERMDIR, 0o755)\n except OSError:\n LOG.error(\"Failed to create helm overrides directory\")\n raise BackupFail(\"Failed to create helm overrides directory\")\n\n\ndef restore_mariadb():\n \"\"\"\n Restore MariaDB\n\n This function is called after MariaDB service is up\n \"\"\"\n try:\n mariadb_staging_dir = constants.BACKUPS_PATH + '/mariadb'\n # Restore data for databases.\n for data in glob.glob(mariadb_staging_dir + '/*.sql.data'):\n db_elem = data.split('/')[-1].split('.')[0]\n create_db = \"create database %s\" % db_elem\n\n # Create the database\n db_cmd = kube_cmd_prefix + mysql_prefix + '-e\"%s\" \\'' % create_db\n subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL)\n\n # Populate data\n db_cmd = 'cat %s | ' % data\n db_cmd = db_cmd + kube_cmd_prefix + mysql_prefix\n db_cmd += '%s\\' ' % db_elem\n subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL)\n\n shutil.rmtree(mariadb_staging_dir, ignore_errors=True)\n\n except (OSError, subprocess.CalledProcessError) as e:\n LOG.error(\"Failed to restore MariaDB data. Error: %s\", e)\n raise RestoreFail(\"Failed to restore MariaDB data.\")\n\n\ndef backup_postgres_size():\n \"\"\" Backup postgres size estimate \"\"\"\n try:\n total_size = 0\n\n # Backup roles, table spaces and schemas for databases.\n proc = subprocess.Popen([('sudo -u postgres pg_dumpall --clean ' +\n '--schema-only | wc -c')], shell=True,\n stdout=subprocess.PIPE, stderr=DEVNULL)\n\n for line in proc.stdout:\n total_size = int(line)\n break\n\n proc.communicate()\n\n # get backup database\n backup_databases, backup_db_skip_tables = get_backup_databases()\n\n # Backup data for databases.\n for _, db_elem in enumerate(backup_databases):\n\n db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts '\n db_cmd += '--disable-triggers --data-only %s ' % db_elem\n\n for _, table_elem in enumerate(backup_db_skip_tables[db_elem]):\n db_cmd += '--exclude-table=%s ' % table_elem\n\n db_cmd += '| wc -c'\n\n proc = subprocess.Popen([db_cmd], shell=True,\n stdout=subprocess.PIPE, stderr=DEVNULL)\n\n for line in proc.stdout:\n total_size += int(line)\n break\n\n proc.communicate()\n\n return total_size\n\n except subprocess.CalledProcessError:\n LOG.error(\"Failed to estimate backup database size.\")\n raise BackupFail(\"Failed to estimate backup database size\")\n\n\ndef backup_postgres(archive, staging_dir):\n \"\"\" Backup postgres configuration \"\"\"\n try:\n postgres_staging_dir = staging_dir + '/postgres'\n os.mkdir(postgres_staging_dir, 0o655)\n\n # Backup roles, table spaces and schemas for databases.\n subprocess.check_call([('sudo -u postgres pg_dumpall --clean ' +\n '--schema-only' +\n '> %s/%s' % (postgres_staging_dir,\n 'postgres.sql.config'))],\n shell=True, stderr=DEVNULL)\n\n # get backup database\n backup_databases, backup_db_skip_tables = get_backup_databases()\n\n # Backup data for databases.\n for _, db_elem in enumerate(backup_databases):\n\n db_cmd = 'sudo -u postgres pg_dump --format=plain --inserts '\n db_cmd += '--disable-triggers --data-only %s ' % db_elem\n\n for _, table_elem in enumerate(backup_db_skip_tables[db_elem]):\n db_cmd += '--exclude-table=%s ' % table_elem\n\n db_cmd += '> %s/%s.sql.data' % (postgres_staging_dir, db_elem)\n\n subprocess.check_call([db_cmd], shell=True, stderr=DEVNULL)\n\n archive.add(postgres_staging_dir, arcname='postgres')\n\n except (OSError, subprocess.CalledProcessError, tarfile.TarError):\n LOG.error(\"Failed to backup postgres databases.\")\n raise BackupFail(\"Failed to backup database configuration\")\n\n\ndef restore_postgres(archive, staging_dir):\n \"\"\" Restore postgres configuration \"\"\"\n try:\n postgres_staging_dir = staging_dir + '/postgres'\n archive.extractall(path=staging_dir,\n members=filter_directory(archive, 'postgres'))\n\n utils.start_service(\"postgresql\")\n\n # Restore roles, table spaces and schemas for databases.\n subprocess.check_call([\"sudo\", \"-u\", \"postgres\", \"psql\", \"-f\",\n postgres_staging_dir +\n '/postgres.sql.config', \"postgres\"],\n stdout=DEVNULL, stderr=DEVNULL)\n\n # Restore data for databases.\n for data in glob.glob(postgres_staging_dir + '/*.sql.data'):\n db_elem = data.split('/')[-1].split('.')[0]\n subprocess.check_call([\"sudo\", \"-u\", \"postgres\", \"psql\", \"-f\",\n data, db_elem],\n stdout=DEVNULL)\n\n except (OSError, subprocess.CalledProcessError, tarfile.TarError) as e:\n LOG.error(\"Failed to restore postgres databases. Error: %s\", e)\n raise RestoreFail(\"Failed to restore database configuration\")\n\n finally:\n utils.stop_service('postgresql')\n\n\ndef filter_config_dir(archive, directory):\n for tarinfo in archive:\n if tarinfo.name.find('config/' + directory) == 0:\n yield tarinfo\n\n\ndef restore_config_dir(archive, staging_dir, config_dir, dest_dir):\n \"\"\" Restore configuration directory if it exists \"\"\"\n try:\n archive.extractall(staging_dir,\n members=filter_config_dir(archive, config_dir))\n\n # Copy files from backup to dest dir\n if (os.path.exists(staging_dir + '/config/' + config_dir) and\n os.listdir(staging_dir + '/config/' + config_dir)):\n subprocess.call([\"mkdir\", \"-p\", dest_dir])\n\n try:\n for f in glob.glob(\n staging_dir + '/config/' + config_dir + '/*'):\n subprocess.check_call([\"cp\", \"-p\", f, dest_dir])\n except IOError:\n LOG.warning(\"Failed to copy %s files\" % config_dir)\n\n except (subprocess.CalledProcessError, tarfile.TarError):\n LOG.info(\"No custom %s config was found during restore.\" % config_dir)\n\n\ndef backup_std_dir_size(directory):\n \"\"\" Backup standard directory size estimate \"\"\"\n try:\n return utils.directory_get_size(directory)\n\n except OSError:\n LOG.error(\"Failed to estimate backup size for %s\" % directory)\n raise BackupFail(\"Failed to estimate backup size for %s\" % directory)\n\n\ndef backup_std_dir(archive, directory):\n \"\"\" Backup standard directory \"\"\"\n try:\n archive.add(directory, arcname=os.path.basename(directory))\n\n except tarfile.TarError:\n LOG.error(\"Failed to backup %s\" % directory)\n raise BackupFail(\"Failed to backup %s\" % directory)\n\n\ndef restore_std_dir(archive, directory):\n \"\"\" Restore standard directory \"\"\"\n try:\n shutil.rmtree(directory, ignore_errors=True)\n # Verify that archive contains this directory\n try:\n archive.getmember(os.path.basename(directory))\n except KeyError:\n LOG.error(\"Archive does not contain directory %s\" % directory)\n raise RestoreFail(\"Invalid backup file - missing directory %s\" %\n directory)\n archive.extractall(\n path=os.path.dirname(directory),\n members=filter_directory(archive, os.path.basename(directory)))\n\n except (shutil.Error, tarfile.TarError):\n LOG.error(\"Failed to restore %s\" % directory)\n raise RestoreFail(\"Failed to restore %s\" % directory)\n\n\ndef configure_loopback_interface(archive):\n \"\"\" Restore and apply configuration for loopback interface \"\"\"\n utils.remove_interface_config_files()\n restore_etc_file(\n archive, utils.NETWORK_SCRIPTS_PATH,\n 'sysconfig/network-scripts/' + utils.NETWORK_SCRIPTS_LOOPBACK)\n utils.restart_networking()\n\n\ndef backup_ceph_crush_map(archive, staging_dir):\n \"\"\" Backup ceph crush map \"\"\"\n try:\n ceph_staging_dir = os.path.join(staging_dir, 'ceph')\n os.mkdir(ceph_staging_dir, 0o655)\n crushmap_file = os.path.join(ceph_staging_dir,\n sysinv_constants.CEPH_CRUSH_MAP_BACKUP)\n subprocess.check_call(['ceph', 'osd', 'getcrushmap',\n '-o', crushmap_file], stdout=DEVNULL,\n stderr=DEVNULL)\n archive.add(crushmap_file, arcname='ceph/' +\n sysinv_constants.CEPH_CRUSH_MAP_BACKUP)\n except Exception as e:\n LOG.error('Failed to backup ceph crush map. Reason: {}'.format(e))\n raise BackupFail('Failed to backup ceph crush map')\n\n\ndef restore_ceph_crush_map(archive):\n \"\"\" Restore ceph crush map \"\"\"\n if not file_exists_in_archive(archive, 'ceph/' +\n sysinv_constants.CEPH_CRUSH_MAP_BACKUP):\n return\n\n try:\n crush_map_file = 'ceph/' + sysinv_constants.CEPH_CRUSH_MAP_BACKUP\n if file_exists_in_archive(archive, crush_map_file):\n member = archive.getmember(crush_map_file)\n # Copy the member to avoid changing the name for future\n # operations on this member.\n temp_member = copy.copy(member)\n temp_member.name = os.path.basename(temp_member.name)\n archive.extract(temp_member,\n path=sysinv_constants.SYSINV_CONFIG_PATH)\n\n except tarfile.TarError as e:\n LOG.error('Failed to restore crush map file. Reason: {}'.format(e))\n raise RestoreFail('Failed to restore crush map file')\n\n\ndef check_size(archive_dir):\n \"\"\"Check if there is enough space to create backup.\"\"\"\n backup_overhead_bytes = 1024 ** 3 # extra GB for staging directory\n\n backup_size = (backup_overhead_bytes +\n backup_etc_size() +\n backup_config_size(tsconfig.CONFIG_PATH) +\n backup_puppet_data_size(constants.HIERADATA_PERMDIR) +\n backup_keyring_size(keyring_permdir) +\n backup_ldap_size() +\n backup_postgres_size() +\n backup_std_dir_size(home_permdir) +\n backup_std_dir_size(patching_permdir) +\n backup_std_dir_size(patching_repo_permdir) +\n backup_std_dir_size(extension_permdir) +\n backup_std_dir_size(patch_vault_permdir) +\n backup_armada_manifest_size(constants.ARMADA_PERMDIR) +\n backup_std_dir_size(constants.HELM_CHARTS_PERMDIR) +\n backup_mariadb_size()\n )\n\n archive_dir_free_space = \\\n utils.filesystem_get_free_space(archive_dir)\n\n if backup_size > archive_dir_free_space:\n print(\"Archive directory (%s) does not have enough free \"\n \"space (%s), estimated backup size is %s.\" %\n (archive_dir, utils.print_bytes(archive_dir_free_space),\n utils.print_bytes(backup_size)))\n\n raise BackupFail(\"Not enough free space for backup.\")\n\n\ndef backup(backup_name, archive_dir, clone=False):\n \"\"\"Backup configuration.\"\"\"\n\n if not os.path.isdir(archive_dir):\n raise BackupFail(\"Archive directory (%s) not found.\" % archive_dir)\n\n if not utils.is_active(\"management-ip\"):\n raise BackupFail(\n \"Backups can only be performed from the active controller.\")\n\n if os.path.isfile(backup_in_progress):\n raise BackupFail(\"Backup already in progress.\")\n else:\n open(backup_in_progress, 'w')\n\n fmApi = fm_api.FaultAPIs()\n entity_instance_id = \"%s=%s\" % (fm_constants.FM_ENTITY_TYPE_HOST,\n sysinv_constants.CONTROLLER_HOSTNAME)\n fault = fm_api.Fault(alarm_id=fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS,\n alarm_state=fm_constants.FM_ALARM_STATE_SET,\n entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,\n entity_instance_id=entity_instance_id,\n severity=fm_constants.FM_ALARM_SEVERITY_MINOR,\n reason_text=(\"System Backup in progress.\"),\n # operational\n alarm_type=fm_constants.FM_ALARM_TYPE_7,\n # congestion\n probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_8,\n proposed_repair_action=(\"No action required.\"),\n service_affecting=False)\n\n fmApi.set_fault(fault)\n\n staging_dir = None\n system_tar_path = None\n warnings = ''\n try:\n os.chdir('/')\n\n if not clone:\n check_size(archive_dir)\n\n print (\"\\nPerforming backup (this might take several minutes):\")\n staging_dir = tempfile.mkdtemp(dir=archive_dir)\n\n system_tar_path = os.path.join(archive_dir,\n backup_name + '_system.tgz')\n system_archive = tarfile.open(system_tar_path, \"w:gz\")\n\n step = 1\n total_steps = 16\n\n # Step 1: Backup etc\n backup_etc(system_archive)\n utils.progress(total_steps, step, 'backup etc', 'DONE')\n step += 1\n\n # Step 2: Backup configuration\n backup_config(system_archive, tsconfig.CONFIG_PATH)\n utils.progress(total_steps, step, 'backup configuration', 'DONE')\n step += 1\n\n # Step 3: Backup puppet data\n backup_puppet_data(system_archive, constants.HIERADATA_PERMDIR)\n utils.progress(total_steps, step, 'backup puppet data', 'DONE')\n step += 1\n\n # Step 4: Backup armada data\n backup_armada_manifest_data(system_archive, constants.ARMADA_PERMDIR)\n utils.progress(total_steps, step, 'backup armada data', 'DONE')\n step += 1\n\n # Step 5: Backup helm charts data\n backup_std_dir(system_archive, constants.HELM_CHARTS_PERMDIR)\n utils.progress(total_steps, step, 'backup helm charts', 'DONE')\n step += 1\n\n # Step 6: Backup keyring\n backup_keyring(system_archive, keyring_permdir)\n utils.progress(total_steps, step, 'backup keyring', 'DONE')\n step += 1\n\n # Step 7: Backup ldap\n backup_ldap(system_archive, staging_dir)\n utils.progress(total_steps, step, 'backup ldap', 'DONE')\n step += 1\n\n # Step 8: Backup postgres\n backup_postgres(system_archive, staging_dir)\n utils.progress(total_steps, step, 'backup postgres', 'DONE')\n step += 1\n\n # Step 9: Backup mariadb\n backup_mariadb(system_archive, staging_dir)\n utils.progress(total_steps, step, 'backup mariadb', 'DONE')\n step += 1\n\n # Step 10: Backup home\n backup_std_dir(system_archive, home_permdir)\n utils.progress(total_steps, step, 'backup home directory', 'DONE')\n step += 1\n\n # Step 11: Backup patching\n if not clone:\n backup_std_dir(system_archive, patching_permdir)\n utils.progress(total_steps, step, 'backup patching', 'DONE')\n step += 1\n\n # Step 12: Backup patching repo\n if not clone:\n backup_std_dir(system_archive, patching_repo_permdir)\n utils.progress(total_steps, step, 'backup patching repo', 'DONE')\n step += 1\n\n # Step 13: Backup extension filesystem\n backup_std_dir(system_archive, extension_permdir)\n utils.progress(total_steps, step, 'backup extension filesystem '\n 'directory', 'DONE')\n step += 1\n\n # Step 14: Backup patch-vault filesystem\n if os.path.exists(patch_vault_permdir):\n backup_std_dir(system_archive, patch_vault_permdir)\n utils.progress(total_steps, step, 'backup patch-vault filesystem '\n 'directory', 'DONE')\n step += 1\n\n # Step 15: Backup ceph crush map\n backup_ceph_crush_map(system_archive, staging_dir)\n utils.progress(total_steps, step, 'backup ceph crush map', 'DONE')\n step += 1\n\n # Step 16: Create archive\n system_archive.close()\n utils.progress(total_steps, step, 'create archive', 'DONE')\n step += 1\n\n except Exception:\n if system_tar_path and os.path.isfile(system_tar_path):\n os.remove(system_tar_path)\n\n raise\n finally:\n fmApi.clear_fault(fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS,\n entity_instance_id)\n os.remove(backup_in_progress)\n if staging_dir:\n shutil.rmtree(staging_dir, ignore_errors=True)\n\n system_msg = \"System backup file created\"\n if not clone:\n system_msg += \": \" + system_tar_path\n\n print(system_msg)\n if warnings != '':\n print(\"WARNING: The following problems occurred:\")\n print(textwrap.fill(warnings, 80))\n\n\ndef create_restore_runtime_config(filename):\n \"\"\" Create any runtime parameters needed for Restore.\"\"\"\n config = {}\n # We need to re-enable Openstack password rules, which\n # were previously disabled while the controller manifests\n # were applying during a Restore\n config['classes'] = ['keystone::security_compliance']\n utils.create_manifest_runtime_config(filename, config)\n\n\ndef restore_system(backup_file, include_storage_reinstall=False, clone=False):\n \"\"\"Restoring system configuration.\"\"\"\n\n if (os.path.exists(constants.CGCS_CONFIG_FILE) or\n os.path.exists(tsconfig.CONFIG_PATH) or\n os.path.exists(constants.INITIAL_CONFIG_COMPLETE_FILE)):\n print(textwrap.fill(\n \"Configuration has already been done. \"\n \"A system restore operation can only be done \"\n \"immediately after the load has been installed.\", 80))\n print('')\n raise RestoreFail(\"System configuration already completed\")\n\n if not os.path.isabs(backup_file):\n raise RestoreFail(\"Backup file (%s) not found. Full path is \"\n \"required.\" % backup_file)\n\n if os.path.isfile(restore_in_progress):\n raise RestoreFail(\"Restore already in progress.\")\n else:\n open(restore_in_progress, 'w')\n\n # Add newline to console log for install-clone scenario\n newline = clone\n staging_dir = None\n\n try:\n try:\n with open(os.devnull, \"w\") as fnull:\n subprocess.check_call([\"vgdisplay\", \"cgts-vg\"],\n stdout=fnull,\n stderr=fnull)\n except subprocess.CalledProcessError:\n LOG.error(\"The cgts-vg volume group was not found\")\n raise RestoreFail(\"Volume groups not configured\")\n\n print(\"\\nRestoring system (this will take several minutes):\")\n # Use /scratch for the staging dir for now,\n # until /opt/backups is available\n staging_dir = tempfile.mkdtemp(dir='/scratch')\n # Permission change required or postgres restore fails\n subprocess.call(['chmod', 'a+rx', staging_dir], stdout=DEVNULL)\n os.chdir('/')\n\n step = 1\n total_steps = 26\n\n # Step 1: Open archive and verify installed load matches backup\n try:\n archive = tarfile.open(backup_file)\n except tarfile.TarError as e:\n LOG.exception(e)\n raise RestoreFail(\"Error opening backup file. Invalid backup \"\n \"file.\")\n check_load_versions(archive, staging_dir)\n check_load_subfunctions(archive, staging_dir)\n utils.progress(total_steps, step, 'open archive', 'DONE', newline)\n step += 1\n\n # Patching is potentially a multi-phase step.\n # If the controller is impacted by patches from the backup,\n # it must be rebooted before continuing the restore.\n # If this is the second pass through, we can skip over this.\n if not os.path.isfile(restore_patching_complete) and not clone:\n # Step 2: Restore patching\n restore_std_dir(archive, patching_permdir)\n utils.progress(total_steps, step, 'restore patching', 'DONE',\n newline)\n step += 1\n\n # Step 3: Restore patching repo\n restore_std_dir(archive, patching_repo_permdir)\n utils.progress(total_steps, step, 'restore patching repo', 'DONE',\n newline)\n step += 1\n\n # Step 4: Apply patches\n try:\n subprocess.check_output([\"sw-patch\", \"install-local\"])\n except subprocess.CalledProcessError:\n LOG.error(\"Failed to install patches\")\n raise RestoreFail(\"Failed to install patches\")\n utils.progress(total_steps, step, 'install patches', 'DONE',\n newline)\n step += 1\n\n open(restore_patching_complete, 'w')\n\n # If the controller was impacted by patches, we need to reboot.\n if os.path.isfile(node_is_patched):\n if not clone:\n print(\"\\nThis controller has been patched. \" +\n \"A reboot is required.\")\n print(\"After the reboot is complete, \" +\n \"re-execute the restore command.\")\n while True:\n user_input = input(\n \"Enter 'reboot' to reboot controller: \")\n if user_input == 'reboot':\n break\n LOG.info(\"This controller has been patched. Rebooting now\")\n print(\"\\nThis controller has been patched. Rebooting now\\n\\n\")\n time.sleep(5)\n os.remove(restore_in_progress)\n if staging_dir:\n shutil.rmtree(staging_dir, ignore_errors=True)\n subprocess.call(\"reboot\")\n\n else:\n # We need to restart the patch controller and agent, since\n # we setup the repo and patch store outside its control\n with open(os.devnull, \"w\") as devnull:\n subprocess.call(\n [\"systemctl\",\n \"restart\",\n \"sw-patch-controller-daemon.service\"],\n stdout=devnull, stderr=devnull)\n subprocess.call(\n [\"systemctl\",\n \"restart\",\n \"sw-patch-agent.service\"],\n stdout=devnull, stderr=devnull)\n if clone:\n # No patches were applied, return to cloning code\n # to run validation code.\n return RESTORE_RERUN_REQUIRED\n else:\n # Add the skipped steps\n step += 3\n\n if os.path.isfile(node_is_patched):\n # If we get here, it means the node was patched by the user\n # AFTER the restore applied patches and rebooted, but didn't\n # reboot.\n # This means the patch lineup no longer matches what's in the\n # backup, but we can't (and probably shouldn't) prevent that.\n # However, since this will ultimately cause the node to fail\n # the goenabled step, we can fail immediately and force the\n # user to reboot.\n print (\"\\nThis controller has been patched, but not rebooted.\")\n print (\"Please reboot before continuing the restore process.\")\n raise RestoreFail(\"Controller node patched without rebooting\")\n\n # Flag can now be cleared\n if os.path.exists(restore_patching_complete):\n os.remove(restore_patching_complete)\n\n # Prefetch keyring\n prefetch_keyring(archive)\n\n # Step 5: Restore configuration\n restore_configuration(archive, staging_dir)\n # In AIO SX systems, the loopback interface is used as the management\n # interface. However, the application of the interface manifest will\n # not configure the necessary addresses on the loopback interface (see\n # apply_network_config.sh for details). So, we need to configure the\n # loopback interface here.\n if tsconfig.system_mode == sysinv_constants.SYSTEM_MODE_SIMPLEX:\n configure_loopback_interface(archive)\n # Write the simplex flag\n utils.write_simplex_flag()\n utils.progress(total_steps, step, 'restore configuration', 'DONE',\n newline)\n step += 1\n\n # Step 6: Apply restore bootstrap manifest\n controller_0_address = utils.get_address_from_hosts_file(\n 'controller-0')\n restore_static_puppet_data(archive, constants.HIERADATA_WORKDIR)\n try:\n utils.apply_manifest(controller_0_address,\n sysinv_constants.CONTROLLER,\n 'bootstrap',\n constants.HIERADATA_WORKDIR)\n except Exception as e:\n LOG.exception(e)\n raise RestoreFail(\n 'Failed to apply bootstrap manifest. '\n 'See /var/log/puppet/latest/puppet.log for details.')\n\n utils.progress(total_steps, step, 'apply bootstrap manifest', 'DONE',\n newline)\n step += 1\n\n # Step 7: Restore puppet data\n restore_puppet_data(archive, constants.HIERADATA_WORKDIR,\n controller_0_address)\n utils.progress(total_steps, step, 'restore puppet data', 'DONE',\n newline)\n step += 1\n\n # Step 8: Persist configuration\n utils.persist_config()\n utils.progress(total_steps, step, 'persist configuration', 'DONE',\n newline)\n step += 1\n\n # Step 9: Apply controller manifest\n try:\n utils.apply_manifest(controller_0_address,\n sysinv_constants.CONTROLLER,\n 'controller',\n constants.HIERADATA_PERMDIR)\n except Exception as e:\n LOG.exception(e)\n raise RestoreFail(\n 'Failed to apply controller manifest. '\n 'See /var/log/puppet/latest/puppet.log for details.')\n utils.progress(total_steps, step, 'apply controller manifest', 'DONE',\n newline)\n step += 1\n\n # Step 10: Apply runtime controller manifests\n restore_filename = os.path.join(staging_dir, 'restore.yaml')\n create_restore_runtime_config(restore_filename)\n try:\n utils.apply_manifest(controller_0_address,\n sysinv_constants.CONTROLLER,\n 'runtime',\n constants.HIERADATA_PERMDIR,\n runtime_filename=restore_filename)\n except Exception as e:\n LOG.exception(e)\n raise RestoreFail(\n 'Failed to apply runtime controller manifest. '\n 'See /var/log/puppet/latest/puppet.log for details.')\n utils.progress(total_steps, step,\n 'apply runtime controller manifest', 'DONE',\n newline)\n step += 1\n\n # Move the staging dir under /opt/backups, now that it's setup\n shutil.rmtree(staging_dir, ignore_errors=True)\n staging_dir = tempfile.mkdtemp(dir=constants.BACKUPS_PATH)\n # Permission change required or postgres restore fails\n subprocess.call(['chmod', 'a+rx', staging_dir], stdout=DEVNULL)\n\n # Step 11: Apply banner customization\n utils.apply_banner_customization()\n utils.progress(total_steps, step, 'apply banner customization', 'DONE',\n newline)\n step += 1\n\n # Step 12: Restore dnsmasq and pxeboot config\n restore_dnsmasq(archive, tsconfig.CONFIG_PATH)\n utils.progress(total_steps, step, 'restore dnsmasq', 'DONE', newline)\n step += 1\n\n # Step 13: Restore keyring\n restore_keyring(archive, keyring_permdir)\n utils.progress(total_steps, step, 'restore keyring', 'DONE', newline)\n step += 1\n\n # Step 14: Restore ldap\n restore_ldap(archive, ldap_permdir, staging_dir)\n utils.progress(total_steps, step, 'restore ldap', 'DONE', newline)\n step += 1\n\n # Step 15: Restore postgres\n restore_postgres(archive, staging_dir)\n utils.progress(total_steps, step, 'restore postgres', 'DONE', newline)\n step += 1\n\n # Step 16: Extract and store mariadb data\n extract_mariadb_data(archive)\n utils.progress(total_steps, step, 'extract mariadb', 'DONE', newline)\n step += 1\n\n # Step 17: Restore ceph crush map\n restore_ceph_crush_map(archive)\n utils.progress(total_steps, step, 'restore ceph crush map', 'DONE',\n newline)\n step += 1\n\n # Step 18: Restore home\n restore_std_dir(archive, home_permdir)\n utils.progress(total_steps, step, 'restore home directory', 'DONE',\n newline)\n step += 1\n\n # Step 19: Restore extension filesystem\n restore_std_dir(archive, extension_permdir)\n utils.progress(total_steps, step, 'restore extension filesystem '\n 'directory', 'DONE', newline)\n step += 1\n\n # Step 20: Restore patch-vault filesystem\n if file_exists_in_archive(archive,\n os.path.basename(patch_vault_permdir)):\n restore_std_dir(archive, patch_vault_permdir)\n utils.progress(total_steps, step, 'restore patch-vault filesystem '\n 'directory', 'DONE', newline)\n\n step += 1\n\n # Step 21: Restore external ceph configuration files.\n restore_ceph_external_config_files(archive, staging_dir)\n utils.progress(total_steps, step, 'restore CEPH external config',\n 'DONE', newline)\n step += 1\n\n # Step 22: Restore Armada manifest\n restore_armada_manifest_data(archive, constants.ARMADA_PERMDIR)\n utils.progress(total_steps, step, 'restore armada manifest',\n 'DONE', newline)\n step += 1\n\n # Step 23: Restore Helm charts\n restore_std_dir(archive, constants.HELM_CHARTS_PERMDIR)\n utils.progress(total_steps, step, 'restore helm charts',\n 'DONE', newline)\n step += 1\n\n # Step 24: Create Helm overrides directory\n create_helm_overrides_directory()\n utils.progress(total_steps, step, 'create helm overrides directory',\n 'DONE', newline)\n step += 1\n\n # Step 25: Shutdown file systems\n archive.close()\n shutil.rmtree(staging_dir, ignore_errors=True)\n utils.shutdown_file_systems()\n utils.progress(total_steps, step, 'shutdown file systems', 'DONE',\n newline)\n step += 1\n\n # Step 26: Recover services\n utils.mtce_restart()\n utils.mark_config_complete()\n time.sleep(120)\n\n for service in ['sysinv-conductor', 'sysinv-inv']:\n if not utils.wait_sm_service(service):\n raise RestoreFail(\"Services have failed to initialize.\")\n\n utils.progress(total_steps, step, 'recover services', 'DONE', newline)\n step += 1\n\n if tsconfig.system_mode != sysinv_constants.SYSTEM_MODE_SIMPLEX:\n\n print(\"\\nRestoring node states (this will take several minutes):\")\n\n with openstack.OpenStack() as client:\n # On ceph setups storage nodes take about 90 seconds\n # to become locked. Setting the timeout to 120 seconds\n # for such setups\n lock_timeout = 60\n storage_hosts = sysinv.get_hosts(client.admin_token,\n client.conf['region_name'],\n personality='storage')\n if storage_hosts:\n lock_timeout = 120\n\n failed_lock_host = False\n skip_hosts = ['controller-0']\n if not include_storage_reinstall:\n if storage_hosts:\n install_uuid = utils.get_install_uuid()\n for h in storage_hosts:\n skip_hosts.append(h.name)\n\n # Update install_uuid on the storage node\n client.sysinv.ihost.update_install_uuid(\n h.uuid,\n install_uuid)\n\n skip_hosts_count = len(skip_hosts)\n\n # Wait for nodes to be identified as disabled before attempting\n # to lock hosts. Even if after 3 minute nodes are still not\n # identified as disabled, we still continue the restore.\n if not client.wait_for_hosts_disabled(\n exempt_hostnames=skip_hosts,\n timeout=180):\n LOG.info(\"At least one node is not in a disabling state. \"\n \"Continuing.\")\n\n print(\"\\nLocking nodes:\")\n try:\n failed_hosts = client.lock_hosts(skip_hosts,\n utils.progress,\n timeout=lock_timeout)\n # Don't power off nodes that could not be locked\n if len(failed_hosts) > 0:\n skip_hosts.append(failed_hosts)\n\n except (KeystoneFail, SysInvFail) as e:\n LOG.exception(e)\n failed_lock_host = True\n\n if not failed_lock_host:\n print(\"\\nPowering-off nodes:\")\n try:\n client.power_off_hosts(skip_hosts,\n utils.progress,\n timeout=60)\n except (KeystoneFail, SysInvFail) as e:\n LOG.exception(e)\n # this is somehow expected\n\n if failed_lock_host or len(skip_hosts) > skip_hosts_count:\n if include_storage_reinstall:\n print(textwrap.fill(\n \"Failed to lock at least one node. \" +\n \"Please lock the unlocked nodes manually.\", 80\n ))\n else:\n print(textwrap.fill(\n \"Failed to lock at least one node. \" +\n \"Please lock the unlocked controller-1 or \" +\n \"worker nodes manually.\", 80\n ))\n\n if not clone:\n print(textwrap.fill(\n \"Before continuing to the next step in the restore, \" +\n \"please ensure all nodes other than controller-0 \" +\n \"and storage nodes, if they are not being \" +\n \"reinstalled, are powered off. Please refer to the \" +\n \"system administration guide for more details.\", 80\n ))\n\n finally:\n os.remove(restore_in_progress)\n if staging_dir:\n shutil.rmtree(staging_dir, ignore_errors=True)\n cleanup_prefetched_keyring()\n\n fmApi = fm_api.FaultAPIs()\n entity_instance_id = \"%s=%s\" % (fm_constants.FM_ENTITY_TYPE_HOST,\n sysinv_constants.CONTROLLER_HOSTNAME)\n fault = fm_api.Fault(\n alarm_id=fm_constants.FM_ALARM_ID_BACKUP_IN_PROGRESS,\n alarm_state=fm_constants.FM_ALARM_STATE_MSG,\n entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,\n entity_instance_id=entity_instance_id,\n severity=fm_constants.FM_ALARM_SEVERITY_MINOR,\n reason_text=(\"System Restore complete.\"),\n # other\n alarm_type=fm_constants.FM_ALARM_TYPE_0,\n # unknown\n probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_UNKNOWN,\n proposed_repair_action=(\"\"),\n service_affecting=False)\n\n fmApi.set_fault(fault)\n\n if utils.get_system_type() == sysinv_constants.TIS_AIO_BUILD:\n print(\"\\nApplying worker manifests for %s. \" %\n (utils.get_controller_hostname()))\n print(\"Node will reboot on completion.\")\n\n sysinv.do_worker_config_complete(utils.get_controller_hostname())\n\n # show in-progress log on console every 30 seconds\n # until self reboot or timeout\n\n time.sleep(30)\n for i in range(1, 10):\n print(\"worker manifest apply in progress ... \")\n time.sleep(30)\n\n raise RestoreFail(\"Timeout running worker manifests, \"\n \"reboot did not occur\")\n\n return RESTORE_COMPLETE\n","sub_path":"controllerconfig/controllerconfig/controllerconfig/backup_restore.py","file_name":"backup_restore.py","file_ext":"py","file_size_in_byte":64251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"273385714","text":"import numpy as np\n\n\ndef map_adaptation(gmm, data, max_iterations=300, likelihood_threshold=1e-20, relevance_factor=16):\n N = data.shape[0]\n D = data.shape[1]\n K = gmm.n_components\n\n mu_new = np.zeros((K, D))\n n_k = np.zeros((K, 1))\n\n mu_k = gmm.means_\n cov_k = gmm.covariances_\n pi_k = gmm.weights_\n\n old_likelihood = gmm.score(data)\n new_likelihood = 0\n iterations = 0\n while abs(old_likelihood - new_likelihood) > likelihood_threshold and iterations < max_iterations:\n iterations += 1\n old_likelihood = new_likelihood\n z_n_k = gmm.predict_proba(data)\n n_k = np.sum(z_n_k, axis=0)\n for i in range(K):\n temp = np.zeros((1, D))\n for n in range(N):\n temp += z_n_k[n][i] * data[n, :]\n mu_new[i] = (1 / max(n_k[i], 1e-20)) * temp\n\n adaptation_coefficient = n_k / (n_k + relevance_factor)\n for k in range(K):\n mu_k[k] = (adaptation_coefficient[k] * mu_new[k]) + ((1 - adaptation_coefficient[k]) * mu_k[k])\n gmm.means_ = mu_k\n\n log_likelihood = gmm.score(data)\n new_likelihood = log_likelihood\n #print(log_likelihood)\n return gmm\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"340565122","text":"import itertools\nimport math\nimport random\nimport time\n\n\nclass City:\n nr_miasta = 0\n X_cord = 0\n Y_cord = 0\n\n def __init__(self, nr_miasta):\n self.nr_miasta = nr_miasta\n self.X_cord = random.randint(0, 40)\n self.Y_cord = random.randint(0, 20)\n\n\n# Generowanie wszystkich mozliwych sciezek o dlugosci rownej ilosci node'ow\ndef generate_node_permutations(node_list):\n node_permutations = list(itertools.permutations(node_list, len(node_list)))\n return node_permutations\n\n\n# Funkcja sprawdzajaca czy graf jest izomorficzny. Jezeli tak to wyrzuca z listy powtorzenie\n# def check_is_isomorphic(node_permutations):\n# for node_set in node_permutations:\n# check_list = \"\"\n# for node in node_set:\n# check_list = check_list + str(node.nr_miasta)\n# print(check_list)\n# if check_list ==\n\n\n# Funkcja obliczajaca odleglosc pomiedzy dwoma node'ami.\ndef distance_between_nodes(c1, c2):\n dist = math.hypot(c2.X_cord - c1.X_cord, c2.Y_cord - c1.Y_cord)\n return float(dist)\n\n\n# Funkcja obliczajaca dlugosc drogi i zwracajaca jej wartosc. Uzywa distance_between_nodes\ndef path_length(node_set):\n distance_sum = 0\n for i in range(0, len(node_set) - 1):\n distance_sum = distance_sum + distance_between_nodes(node_set[i], node_set[i + 1])\n return distance_sum\n\n\n# Funkcja znajdujaca nakrotsza droge. Uzywa path_length\ndef find_shortest_distance(node_permutations):\n # current_distance = 0\n for node_set in node_permutations:\n if node_set == next(iter(node_permutations), None):\n min_distance = path_length(node_set)\n else:\n current_distance = path_length(node_set)\n if current_distance < min_distance:\n min_distance = current_distance\n best_set = node_set\n print(\"Shortest distance: \", round(min_distance, 2))\n\n\n# node_amout - ilosc node'ow do wygenerowania\n# node_list - lista wygenerowanych node'ow\n# node_permutations - lista wszystkich mozliwych polaczen\ndef main():\n node_amount = input(\"Podaj ilosc miast: \")\n while node_amount != \"0\":\n start_time = time.time()\n node_amount = int(node_amount)\n node_list = list()\n\n # Generowanie node_amount ilosci miast o random'owych wspolrzednych\n for i in range(0, node_amount):\n node_list.append(City(i))\n\n # Generowanie wszystkich mozliwych drog\n node_permutations = generate_node_permutations(node_list)\n check_is_isomorphic(node_permutations)\n print(\"Permutations amount: \", len(node_permutations))\n # Znajdowanie najkrotszej drogi\n find_shortest_distance(node_permutations)\n elapsed_time = time.time() - start_time\n print(\"Elapsed time: \", elapsed_time)\n print()\n node_amount = input(\"Podaj ilosc miast: \")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"PSI/Lab_01/BruteforceMethod.py","file_name":"BruteforceMethod.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"493371445","text":"import pytest\n\nfrom conftest import client\n\npytestmark = pytest.mark.asyncio\n\n\nasync def test_get_ng_song():\n await client.get_ng_song(1)\n\n\nasync def test_search_songs():\n assert await client.search_page_songs(\"Panda Eyes\")\n\n\nasync def test_search_users():\n assert await client.search_page_users(\"CreoMusic\")\n\n\nasync def test_get_user_songs():\n assert await client.get_page_user_songs(\"Xtrullor\")\n","sub_path":"tests/test_ng.py","file_name":"test_ng.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"215328987","text":"import sys\n\nimport pexpect as pexpect\nfrom telegram import Bot\n\nimport config\n\nSWITCH = '192.168.2.2'\nPORT = 8\n\n\ndef disable_port(switch_ip, port):\n \"\"\"\n Disable a port on a switch.\n :param switch_ip: IP of the switch\n :param port: port to disable\n :return: `True` on success, `False` on failure\n \"\"\"\n try:\n p = pexpect.spawn('telnet %s' % switch_ip, timeout=2)\n except:\n return False\n\n p.logfile = sys.stdout\n p.expect('UserName:')\n p.sendline('admin')\n p.expect('PassWord:')\n p.sendline('XXXXXXXX')\n p.expect('#')\n p.sendline('config ports %d state disable' % port)\n p.sendline('logout')\n return True\n\n\nif __name__ == '__main__':\n bot = Bot(token=config.token)\n if disable_port(SWITCH, PORT):\n bot.send_message(config.on_duty_chat, 'Порт %d на свитче %s выключен' % (PORT, SWITCH))\n else:\n bot.send_message(config.on_duty_chat, 'Свитч %s не досутпен' % SWITCH)\n","sub_path":"zabbix_notifier.py","file_name":"zabbix_notifier.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576390885","text":"#!/usr/bin/python\nimport numpy as np\nfrom scipy.ndimage.filters import gaussian_filter1d\nimport os\nimport ConfigParser\nfrom sklearn.externals import joblib\nimport ipdb\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# read the current file path\nfile_path = os.path.dirname(__file__)\n# read model cfg file\ncp_models = ConfigParser.SafeConfigParser()\ncp_models.read(os.path.join(file_path, '../cfg/models.cfg'))\n\n# load param\ndatasets_path = os.path.join(file_path, cp_models.get('datasets', 'path'))\nnum_alpha_candidate = cp_models.getint('phase', 'num_phaseCandidate')\ntask_name_path = os.path.join(datasets_path, 'pkl/task_name_list.pkl')\ntask_name = joblib.load(task_name_path)\nsigma = cp_models.getint('filter', 'sigma')\npromp_set = joblib.load(os.path.join(datasets_path, 'pkl/promp_set.pkl'))\nmethod = 'promp'\ndatasets_raw = joblib.load(os.path.join(datasets_path, 'pkl/'+method+'_datasets_raw.pkl'))\n\n# read datasets cfg file\ncp_datasets = ConfigParser.SafeConfigParser()\ncp_datasets.read(os.path.join(datasets_path, './info/cfg/datasets.cfg'))\n# read datasets params\ndata_index_sec = cp_datasets.items('index_17')\ndata_index = [map(int, task[1].split(',')) for task in data_index_sec]\n\n\ndef main():\n task_id = 0\n test_index = 8\n obs_ratio = 0.4\n\n # read test data\n obs_data_dict = datasets_raw[task_id][test_index]\n\n left_joints = obs_data_dict['left_joints']\n obs_data = left_joints\n timestamp = obs_data_dict['stamp']\n gt_time = np.copy(timestamp)\n # filter the data\n obs_data = gaussian_filter1d(obs_data.T, sigma=sigma).T\n # preprocessing for the data\n obs_data_post_arr = promp_set[0].min_max_scaler.transform(obs_data)\n\n # choose the data\n start_idx = 40\n ratio = 5\n num_obs = int(len(timestamp)*obs_ratio)\n num_obs -= num_obs % ratio\n obs_data_post_arr = obs_data_post_arr[start_idx:num_obs:ratio, :]\n timestamp = timestamp[start_idx:num_obs:ratio]\n viapoint = obs_data[start_idx:num_obs:ratio, :]\n viapoint_time = np.copy(timestamp)\n\n # phase estimation\n print('Phase estimating...')\n alpha_max_list = []\n for promp in promp_set:\n alpha_temp = promp.alpha_candidate(num_alpha_candidate)\n # ipdb.set_trace()\n idx_max = promp.estimate_alpha(alpha_temp, obs_data_post_arr, timestamp)\n alpha_max_list.append(alpha_temp[idx_max]['candidate'])\n promp.set_alpha(alpha_temp[idx_max]['candidate'])\n\n # task recognition\n print('Adding via points in each trained model...')\n for task_idx, promp in enumerate(promp_set):\n for idx in range(len(timestamp)):\n # ipdb.set_trace()\n promp.add_viapoint(timestamp[idx] / alpha_max_list[task_idx], obs_data_post_arr[idx, :])\n promp.param_update(unit_update=True)\n # promp.promps[task_idx].plot_prior()\n # promp.promps[task_idx].plot_nUpdated()\n # plt.legend()\n # plt.show()\n # ipdb.set_trace()\n print('Computing the likelihood for each model under observations...')\n\n # # task recognition\n # print('Adding via points in each trained model...')\n # for task_idx, promp in enumerate(promp_set):\n # promp.add_viapoint(1.0, obs_data_post_arr)\n # promp.param_update(unit_update=True)\n # print('Computing the likelihood for each model under observations...')\n\n prob_task = []\n for promp in promp_set:\n prob_task_temp = promp.prob_obs()\n prob_task.append(prob_task_temp)\n idx_max_prob = np.argmax(prob_task)\n print('The max fit model index is task %s' % task_name[idx_max_prob])\n \n # robot motion generation\n traj_full = []\n for promp_id, promp in enumerate(promp_set):\n # ipdb.set_trace()\n [traj_time, traj] = promp.gen_real_traj(alpha_max_list[promp_id])\n traj = promp.min_max_scaler.inverse_transform(traj) \n traj_full.append(traj)\n\n # save the conditional result\n print('Saving the post ProMPs...')\n joblib.dump(promp_set, os.path.join(datasets_path, 'pkl/'+method+'_post_offline.pkl'))\n # save the robot traj\n print('Saving the robot traj...')\n joblib.dump([traj_full, obs_data_dict,viapoint,gt_time,viapoint_time], os.path.join(datasets_path, 'pkl/'+method+'_traj_offline.pkl'))\n\n\nif __name__ == '__main__':\n main()","sub_path":"motion_generation/promp_movement_gen.py","file_name":"promp_movement_gen.py","file_ext":"py","file_size_in_byte":4314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"118291673","text":"MAX_COLUMNS = 10\nMIN_COLUMNS = 2\n\nLOWER = 33\nUPPER = 127\n\nchar = input(\"Enter a character:\\t\")\nprint(\"The ASCII code for {} is {}\".format(char,ord(char)))\nnumber = int(input(\"Enter a number between {} and {}:\".format(LOWER,UPPER)))\nwhile number < LOWER or number > UPPER:\n number = int(input(\"Enter a number between {} and {}:\".format(LOWER,UPPER)))\n print(\"The character for {} is {}\".format(number, chr(number)))\n\nfor value in range (LOWER, UPPER + 1):\n print(\"{:3} {:>4}\".format(value, chr(value)))\n\ncolumns = int (input(\"Enter number of columns:\\t\"))\nwhile columns < MIN_COLUMNS or columns > MAX_COLUMNS:\n print(\"Please use a value between {} and {}\".format(MIN_COLUMNS,MAX_COLUMNS))\n columns = int (input(\"Enter number of columns:\\t\"))\n\nnumber_of_values = UPPER - LOWER + 1\nrows = number_of_values // columns\n\nprint(\"Version 1:Horizontal then vertical ordering\")\nvalue = LOWER\nfor row in range(rows):\n for column in range(columns):\n print(\"{:6} {:>2}\".format(value, chr(value)),end=\"\")\n print()\nstarting_value = value\nfor value in range(starting_value, UPPER + 1):\n print(\"{:6} {:>2}\".format(value, chr(value)),end=\"\")\nprint(\"\\n\")\n\nprint(\"Version 2: Vertical then horizontal ordering\")\n# iterate through rows\nfor row in range(rows + 1):\n starting_value = LOWER + row\n value = starting_value\n # print all column values not including the last one (-1)\n for column in range(columns - 1):\n value_to_print = value + (column * rows)\n print(\"{:6} {:>2}\".format(value_to_print, chr(value_to_print)), end=\"\")\n value += 1\n\n # last column may not exist so handle separately\n # having the if statement outside the for loop means we don't do it every column\n # so it is more efficient (we can't avoid doing it every row AFAIK)\n value_to_print = value + ((column + 1) * rows)\n if value_to_print <= UPPER:\n print(\"{:6} {:>2}\".format(value_to_print, chr(value_to_print)), end=\"\")\n print()","sub_path":"Week 2/ascii_table.py","file_name":"ascii_table.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"191517983","text":"import pandas as pd\nimport numpy as np\n\n\ndef map_for_dict_Gender(Gender):\n dict_Gender = {'Male': 0, 'Female': 1}\n res = dict_Gender.get(Gender)\n return res\n\n\ndef map_for_dict_MariStat(MariStat):\n dict_MariStat = {'Other': 0, 'Alone': 1}\n res = dict_MariStat.get(MariStat)\n return res\n\n\ndef f_VehUsage_Professional(VehUsage):\n if VehUsage == 'Professional':\n VehUsage_Professional = 1\n else:\n VehUsage_Professional = 0\n return VehUsage_Professional\n\n\ndef f_VehUsage_Private_trip_to_office(VehUsage):\n if VehUsage == 'Private+trip to office':\n VehUsage_Private_trip_to_office = 1\n else:\n VehUsage_Private_trip_to_office = 0\n return VehUsage_Private_trip_to_office\n\n\ndef f_VehUsage_Private(VehUsage):\n if VehUsage == 'Private':\n VehUsage_Private = 1\n else:\n VehUsage_Private = 0\n return VehUsage_Private\n\n\ndef f_VehUsage_Professional_run(VehUsage):\n if VehUsage == 'Professional run':\n VehUsage_Professional_run = 1\n else:\n VehUsage_Professional_run = 0\n return VehUsage_Professional_run\n\n\ndef process_input(json_input):\n columns_list = ['LicAge', 'Gender', 'MariStat', 'DrivAge', 'HasKmLimit', 'BonusMalus',\n 'OutUseNb', 'RiskArea', 'VehUsage_Private',\n 'VehUsage_Private+trip to office', 'VehUsage_Professional',\n 'VehUsage_Professional run', 'SocioCateg_CSP1', 'SocioCateg_CSP2',\n 'SocioCateg_CSP3', 'SocioCateg_CSP4', 'SocioCateg_CSP5',\n 'SocioCateg_CSP6', 'SocioCateg_CSP7', 'DrivAgeSq']\n\n LicAge = json_input[\"LicAge\"]\n Gender = map_for_dict_Gender(json_input[\"Gender\"])\n MariStat = map_for_dict_MariStat(json_input[\"MariStat\"])\n DrivAge = json_input[\"DrivAge\"]\n HasKmLimit = json_input[\"HasKmLimit\"]\n BonusMalus = json_input[\"BonusMalus\"]\n OutUseNb = json_input[\"OutUseNb\"]\n RiskArea = json_input[\"RiskArea\"]\n VehUsg_Private = f_VehUsage_Private(json_input[\"VehUsage\"])\n VehUsg_Private_trip_to_office = f_VehUsage_Private_trip_to_office(json_input[\"VehUsage\"])\n VehUsg_Professional = f_VehUsage_Professional(json_input[\"VehUsage\"])\n VehUsg_Professional_run = f_VehUsage_Professional_run(json_input[\"VehUsage\"])\n\n CSP1 = 0\n CSP2 = 0\n CSP3 = 0\n CSP4 = 0\n CSP5 = 0\n CSP6 = 0\n CSP7 = 0\n\n DrivAgeSq = json_input[\"DrivAge\"] ** 2\n\n data_list = [(LicAge, Gender, MariStat, DrivAge,\n HasKmLimit, BonusMalus, OutUseNb, RiskArea,\n VehUsg_Private, VehUsg_Private_trip_to_office,\n VehUsg_Professional, VehUsg_Professional_run,\n CSP1, CSP2, CSP3, CSP4, CSP5, CSP6, CSP7,\n DrivAgeSq)]\n\n print(data_list)\n\n df = pd.DataFrame.from_records(data_list, columns=columns_list)\n\n return df\n","sub_path":"Задания/lessons/lesson9/venv/process_data.py","file_name":"process_data.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"156852072","text":"\"\"\"This is an example base experiment combining multiple modalities and running an \nexperiment to predict a diagnosis or cognitive score.\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\ntry:\n import alz.tadpole.tadpole_processing_helpers as helpers\nexcept ImportError:\n import tadpole_processing_helpers as helpers\nimport pickle\nimport sklearn\nfrom sklearn.svm import SVC, SVR\nfrom sklearn.model_selection import GridSearchCV\n\npickle_path = \"out/merged_data.pkl\"\n\nwith open(pickle_path, \"rb\") as pickle_file:\n merged_dataset = pickle.load(pickle_file)\n\n\ndef get_data_for_experiment_with_modalities(modalities, target_modality, target_column, data, columns_to_drop=None, verbose=False):\n \"\"\"Clean and return the data with the particular modalities and desired target\n \n Args:\n modalities (list): The list of modalities to use as features\n target_modality (str): The name of the modality with the desired target\n target_column (str): The name of the column within the target\n data (dict): The saved dataset\n columns_to_ignore (list, optional): Any columns to drop from the dataframe before cleaning the data. Defaults to None.\n verbose (bool, optional): Whether to display verbose print messages. Defaults to False.\n \n Returns:\n tuple: The features and targets cleaned up for experimentation\n \"\"\"\n\n df = pd.concat([pd.DataFrame.from_records(data[modality]).T for modality in modalities+[target_modality]], axis=1)\n\n if columns_to_drop is not None:\n df = df.drop(columns_to_drop, axis=1)\n\n if verbose:\n print(df.shape)\n print(\"Row analysis of NaNs\")\n print(df.isna().sum(axis=0))\n\n df = df.dropna(axis=0, how=\"all\")\n if verbose:\n print(df.isna().sum(axis=0))\n print(df.shape)\n print(\"Column analysis of NaNs\")\n print(df.isna().sum(axis=1))\n\n df = df.dropna(axis=1, how=\"all\")\n if verbose:\n print(df.isna().sum(axis=1))\n print(df.shape)\n\n df = df.dropna(axis=0, how=\"any\")\n if verbose:\n print(df.isna().sum().sum())\n print(df.shape)\n\n targets = df[[target_column]]\n if target_column == \"DX\":\n targets = targets[\"DX\"].apply(convert_string_to_integer_diagnosis)\n \n\n features = df.drop(target_column, axis=1)\n\n return features, targets\n\n\n# # Ignore Ecog Columns\n# ecog_columns = [column for column in df.columns if column.startswith(\"Ecog\")]\n# df = df.drop(ecog_columns, axis=1)\n\n# # Ignore RAVLT columns\n# ravlt_columns = [column for column in df.columns if column.startswith(\"RAVLT\")]\n# df = df.drop(ravlt_columns, axis=1)\n\n# Select rows that don't have NA for certain cognitive tests\n# important_columns = [\"DX\", \"ADAS11\", \"MMSE\"]\n# df = df.dropna(subset=important_columns)\n\n\n\n\ndef convert_string_to_integer_diagnosis(string_diagnosis):\n \"\"\"Convert string diagnosis to integer for machine learning prediction\n \n Args:\n string_diagnosis (str): The string diagnosis\n \n Returns:\n int: An integer code for each diagnosis (NL - 0, MCI - 1, Dementia - 2)\n \"\"\"\n if string_diagnosis == 'MCI to Dementia':\n return 2\n if string_diagnosis == 'NL':\n return 0\n if string_diagnosis == 'Dementia':\n return 2\n if string_diagnosis == 'MCI to NL':\n return 0\n if string_diagnosis == 'Dementia to MCI':\n return 1\n if string_diagnosis == 'NL to MCI':\n return 1\n if string_diagnosis == 'NL to Dementia':\n return 2\n if string_diagnosis == 'MCI':\n return 1\n return None\n\n\ndef run_base_experiment(features, targets, parameters, model, scoring):\n \"\"\"Run an experiment with a model and calculate the mean and standard deviation for the score\n \n Args:\n features (iterable): The features to use to fit the movel\n targets (iterable): The targets to try and predict with the model\n parameters (dict): The parameter grid to search\n model (sklearn.Estimator): The sklearn estimator to instantiate\n scoring (str): The scoring method to use (see sklearn documentation)\n \n Returns:\n tuple: mean and standard deviation results for the experiment\n \"\"\"\n clf = GridSearchCV(model(), parameters, scoring=scoring, cv=10, n_jobs=-1)\n clf.fit(features, targets)\n mean = clf.best_score_\n stddev = clf.cv_results_['std_test_score'][clf.best_index_]\n\n return mean, stddev\n\n\ndef main():\n\n modalities_to_use = [\"MRI_FSX_SV_labeled_latest\", \"snp_labeled\"]\n features, targets = get_data_for_experiment_with_modalities(modalities_to_use, \"diagnosis_labeled_latest\", \"DX\", merged_dataset, verbose=True)\n parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}\n run_base_experiment(features, targets, parameters, SVC, \"f1_weighted\")\n\n\n modalities_to_use = [\"MRI_FSX_SV_labeled_latest\", \"snp_labeled\"]\n features, targets = get_data_for_experiment_with_modalities(modalities_to_use, \"ADAS11_labeled_latest\", \"ADAS11\", merged_dataset, verbose=True)\n parameters = {'C':[1, 10], 'epsilon': [0.1, 0.2]}\n print(f\"Will run experiment with {features.shape[0]} samples.\")\n mean, stddev = run_base_experiment(features, targets, parameters, SVR, \"neg_mean_squared_error\")\n print(f\"mean: {mean}, stddev: {stddev}\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"alz/alz/tadpole/tadpole_base_experiment.py","file_name":"tadpole_base_experiment.py","file_ext":"py","file_size_in_byte":5361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"233851519","text":"#!/usr/bin/python\n\"\"\" PN CLI VRRP L3 \"\"\"\n\n#\n# This file is part of Ansible\n#\n# Ansible is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Ansible is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Ansible. If not, see .\n#\n\nfrom ansible.module_utils.basic import AnsibleModule\nimport shlex\n\nDOCUMENTATION = \"\"\"\n---\nmodule: pn_ztp_vrrp_l3\nauthor: 'Pluribus Networks (devops@pluribusnetworks.com)'\nshort_description: CLI command to configure VRRP - Layer 3 Setup\ndescription: Virtual Router Redundancy Protocol (VRRP) - Layer 3 Setup\noptions:\n pn_cliusername:\n description:\n - Provide login username if user is not root.\n required: False\n type: str\n pn_clipassword:\n description:\n - Provide login password if user is not root.\n required: False\n type: str\n pn_spine_list:\n description:\n - Specify list of Spine hosts\n required: False\n type: list\n pn_leaf_list:\n description:\n - Specify list of leaf hosts\n required: False\n type: list\n pn_csv_data:\n description:\n - String containing vrrp data parsed from csv file.\n required: False\n type: str\n\"\"\"\n\nEXAMPLES = \"\"\"\n - name: VRRP L3 setup\n pn_ztp_vrrp_l3:\n pn_cliusername: \"{{ USERNAME }}\"\n pn_clipassword: \"{{ PASSWORD }}\"\n pn_spine_list: \"{{ groups['spine'] }}\"\n pn_leaf_list: \"{{ groups['leaf'] }}\"\n pn_csv_data: \"{{ lookup('file', '{{ csv_file }}') }}\"\n\"\"\"\n\nRETURN = \"\"\"\nstdout:\n description: The set of responses for each command.\n returned: always\n type: str\nchanged:\n description: Indicates whether the CLI caused changes on the target.\n returned: always\n type: bool\nfailed:\n description: Indicates whether or not the execution failed on the target.\n returned: always\n type: bool\n\"\"\"\n\nCHANGED_FLAG = []\n\n\ndef pn_cli(module):\n \"\"\"\n Method to generate the cli portion to launch the Netvisor cli.\n :param module: The Ansible module to fetch username and password.\n :return: The cli string for further processing\n \"\"\"\n username = module.params['pn_cliusername']\n password = module.params['pn_clipassword']\n\n if username and password:\n cli = '/usr/bin/cli --quiet --user %s:%s ' % (username, password)\n else:\n cli = '/usr/bin/cli --quiet '\n\n return cli\n\n\ndef run_cli(module, cli):\n \"\"\"\n Method to execute the cli command on the target node(s) and returns the\n output.\n :param module: The Ansible module to fetch input parameters.\n :param cli: The complete cli string to be executed on the target node(s).\n :return: Output/Error or Success msg depending upon the response from cli.\n \"\"\"\n cli = shlex.split(cli)\n rc, out, err = module.run_command(cli)\n results = []\n if out:\n return out\n\n if err:\n json_msg = {'switch': '', 'output': u'Operation Failed: {}'.format(str(cli))}\n results.append(json_msg)\n module.exit_json(\n unreachable=False,\n failed=True,\n exception='',\n summary=results,\n task='CLI commands to configure L3 VRRP zero touch provisioning',\n stderr=err.strip(),\n msg='L3 VRRP configuration failed',\n changed=False\n )\n else:\n return 'Success'\n\n\ndef get_vrouter_name(module, switch_name):\n \"\"\"\n Method to return name of the vrouter.\n :param module: The Ansible module to fetch input parameters.\n :param switch_name: Name of the switch for which to find the vrouter.\n :return: Vrouter name.\n \"\"\"\n cli = pn_cli(module)\n cli += ' vrouter-show location ' + switch_name\n cli += ' format name no-show-headers '\n return run_cli(module, cli).split()[0]\n\n\ndef create_vlan(module, vlan_id, switch):\n \"\"\"\n Method to create vlans.\n :param module: The Ansible module to fetch input parameters.\n :param vlan_id: vlan id to be created.\n :param switch: Name of the switch on which vlan creation will be executed.\n :return: String describing if vlan got created or if it already exists.\n \"\"\"\n global CHANGED_FLAG\n cli = pn_cli(module)\n clicopy = cli\n cli += ' vlan-show format id no-show-headers '\n existing_vlan_ids = run_cli(module, cli).split()\n existing_vlan_ids = list(set(existing_vlan_ids))\n\n if vlan_id not in existing_vlan_ids:\n cli = clicopy\n cli += ' vlan-create id %s scope fabric ' % vlan_id\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n return ' %s: Vlan id %s with scope fabric created successfully \\n' % (\n switch, vlan_id\n )\n\n else:\n return ' %s: Vlan id %s with scope fabric already exists \\n' % (\n switch, vlan_id\n )\n\n\ndef create_vrouter(module, switch, vrrp_id, vnet_name):\n \"\"\"\n Method to create vrouter and assign vrrp_id to the switches.\n :param module: The Ansible module to fetch input parameters.\n :param switch: The switch name on which vrouter will be created.\n :param vrrp_id: The vrrp_id to be assigned.\n :param vnet_name: The name of the vnet for vrouter creation.\n :return: String describing if vrouter got created or if it already exists.\n \"\"\"\n global CHANGED_FLAG\n output = ''\n vrouter_name = str(switch) + '-vrouter'\n cli = pn_cli(module)\n cli += ' switch ' + switch\n clicopy = cli\n\n # Check if vrouter already exists\n cli += ' vrouter-show format name no-show-headers '\n existing_vrouter_names = run_cli(module, cli).split()\n\n # If vrouter doesn't exists then create it\n if vrouter_name not in existing_vrouter_names:\n cli = clicopy\n cli += ' vrouter-create name %s vnet %s hw-vrrp-id %s enable ' % (\n vrouter_name, vnet_name, vrrp_id)\n run_cli(module, cli)\n output = ' %s: Created vrouter with name %s \\n' % (switch, vrouter_name)\n CHANGED_FLAG.append(True)\n else:\n cli = clicopy\n cli += ' vrouter-show name ' + vrouter_name\n cli += ' format hw-vrrp-id no-show-headers'\n hw_vrrp_id = run_cli(module, cli).split()[0]\n\n if hw_vrrp_id != vrrp_id:\n cli = clicopy\n cli += ' vrouter-modify name %s hw-vrrp-id %s ' % (vrouter_name,\n vrrp_id)\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n\n return output\n\n\ndef create_vrouter_interface(module, switch, ip, vlan_id, vrrp_id,\n ip_count, vrrp_priority):\n \"\"\"\n Method to add vrouter interface and assign IP to it along with\n vrrp_id and vrrp_priority.\n :param module: The Ansible module to fetch input parameters.\n :param switch: The switch name on which interfaces will be created.\n :param ip: IP address to be assigned to vrouter interface.\n :param vlan_id: vlan_id to be assigned.\n :param vrrp_id: vrrp_id to be assigned.\n :param vrrp_priority: priority to be given(110 for active switch).\n :param ip_count: The value of fourth octet in the ip\n :return: String describing if vrouter interface got added or not.\n \"\"\"\n global CHANGED_FLAG\n vrouter_name = get_vrouter_name(module, switch)\n ip_addr = ip.split('.')\n fourth_octet = ip_addr[3].split('/')\n subnet = fourth_octet[1]\n\n static_ip = ip_addr[0] + '.' + ip_addr[1] + '.' + ip_addr[2] + '.'\n ip_vip = static_ip + '1' + '/' + subnet\n ip2 = static_ip + ip_count + '/' + subnet\n\n cli = pn_cli(module)\n clicopy = cli\n cli += ' vrouter-interface-show vlan %s ip %s ' % (vlan_id, ip2)\n cli += ' format switch no-show-headers '\n existing_vrouter = run_cli(module, cli).split()\n existing_vrouter = list(set(existing_vrouter))\n\n if vrouter_name not in existing_vrouter:\n cli = clicopy\n cli += ' switch ' + switch\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' ip ' + ip2\n cli += ' vlan %s if data ' % vlan_id\n run_cli(module, cli)\n output = ' %s: Added vrouter interface with ip %s to %s \\n' % (\n switch, ip2, vrouter_name\n )\n CHANGED_FLAG.append(True)\n else:\n output = ' %s: Vrouter interface %s already exists for %s \\n' % (\n switch, ip2, vrouter_name\n )\n\n cli = clicopy\n cli += ' vrouter-interface-show vrouter-name %s ip %s vlan %s ' % (\n vrouter_name, ip2, vlan_id\n )\n cli += ' format nic no-show-headers '\n eth_port = run_cli(module, cli).split()\n eth_port.remove(vrouter_name)\n\n cli = clicopy\n cli += ' vrouter-interface-show vlan %s ip %s vrrp-primary %s ' % (\n vlan_id, ip_vip, eth_port[0]\n )\n cli += ' format switch no-show-headers '\n existing_vrouter = run_cli(module, cli).split()\n existing_vrouter = list(set(existing_vrouter))\n\n if vrouter_name not in existing_vrouter:\n cli = clicopy\n cli += ' switch ' + switch\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' ip ' + ip_vip\n cli += ' vlan %s if data vrrp-id %s ' % (vlan_id, vrrp_id)\n cli += ' vrrp-primary %s vrrp-priority %s ' % (eth_port[0],\n vrrp_priority)\n run_cli(module, cli)\n output += ' %s: Added vrouter interface with ip %s to %s \\n' % (\n switch, ip_vip, vrouter_name\n )\n CHANGED_FLAG.append(True)\n\n else:\n output += ' %s: Vrouter interface %s already exists for %s \\n' % (\n switch, ip_vip, vrouter_name\n )\n\n return output\n\n\ndef create_cluster(module, switch, name, node1, node2):\n \"\"\"\n Method to create a cluster between two switches.\n :param module: The Ansible module to fetch input parameters.\n :param switch: Name of the local switch.\n :param name: The name of the cluster to create.\n :param node1: First node of the cluster.\n :param node2: Second node of the cluster.\n :return: The output of run_cli() method.\n \"\"\"\n global CHANGED_FLAG\n cli = pn_cli(module)\n clicopy = cli\n cli += ' switch %s cluster-show format name no-show-headers ' % node1\n cluster_list = run_cli(module, cli).split()\n if name not in cluster_list:\n cli = clicopy\n cli += ' switch %s cluster-create name %s ' % (switch, name)\n cli += ' cluster-node-1 %s cluster-node-2 %s ' % (node1, node2)\n if 'Success' in run_cli(module, cli):\n CHANGED_FLAG.append(True)\n return ' %s: %s created successfully \\n' % (switch, name)\n else:\n return ' %s: %s already exists \\n' % (switch, name)\n\n\ndef create_vrouter_without_vrrp(module, switch, vnet_name):\n \"\"\"\n Method to create vrouter without assigning vrrp id to it.\n :param module: The Ansible module to fetch input parameters.\n :param switch: The switch name on which vrouter will be created.\n :param vnet_name: The name of the vnet for vrouter creation.\n :return: String describing if vrouter got created or if it already exists.\n \"\"\"\n global CHANGED_FLAG\n vrouter_name = str(switch) + '-vrouter'\n cli = pn_cli(module)\n cli += ' switch ' + switch\n clicopy = cli\n\n # Check if vrouter already exists\n cli += ' vrouter-show format name no-show-headers '\n existing_vrouter_names = run_cli(module, cli).split()\n\n # If vrouter doesn't exists then create it\n if vrouter_name not in existing_vrouter_names:\n cli = clicopy\n cli += ' vrouter-create name %s vnet %s ' % (vrouter_name, vnet_name)\n run_cli(module, cli)\n output = ' %s: Created vrouter with name %s \\n' % (switch, vrouter_name)\n CHANGED_FLAG.append(True)\n else:\n output = ' %s: Vrouter with name %s already exists \\n' % (switch,\n vrouter_name)\n\n return output\n\n\ndef configure_vrrp_for_non_cluster_leafs(module, ip, non_cluster_leaf, vlan_id):\n \"\"\"\n Method to configure vrrp for non-cluster switches.\n :param module: The Ansible module to fetch input parameters.\n :param ip: IP address for the default gateway\n :param non_cluster_leaf: Name of non-cluster leaf switch.\n :param vlan_id: The vlan id to be assigned.\n :return: String describing whether interfaces got added or not.\n \"\"\"\n global CHANGED_FLAG\n vrouter_name = get_vrouter_name(module, non_cluster_leaf)\n\n ip_addr = ip.split('.')\n fourth_octet = ip_addr[3].split('/')\n subnet = fourth_octet[1]\n\n static_ip = ip_addr[0] + '.' + ip_addr[1] + '.' + ip_addr[2] + '.'\n ip_gateway = static_ip + '1' + '/' + subnet\n\n cli = pn_cli(module)\n clicopy = cli\n cli += ' vrouter-interface-show ip %s vlan %s ' % (ip_gateway, vlan_id)\n cli += ' format switch no-show-headers '\n existing_vrouter = run_cli(module, cli).split()\n existing_vrouter = list(set(existing_vrouter))\n\n if vrouter_name not in existing_vrouter:\n cli = clicopy\n cli += 'switch ' + non_cluster_leaf\n cli += ' vrouter-interface-add vrouter-name ' + vrouter_name\n cli += ' vlan ' + vlan_id\n cli += ' ip ' + ip_gateway\n run_cli(module, cli)\n CHANGED_FLAG.append(True)\n return ' %s: Added vrouter interface with ip %s on %s \\n' % (\n non_cluster_leaf, ip_gateway, vrouter_name\n )\n\n else:\n return ' %s: Vrouter interface %s already exists on %s \\n' % (\n non_cluster_leaf, ip_gateway, vrouter_name\n )\n\n\ndef configure_vrrp_for_clustered_switches(module, vrrp_id, vrrp_ip,\n active_switch, vlan_id, switch_list):\n \"\"\"\n Method to configure vrrp interfaces for clustered leaf switches.\n :param module: The Ansible module to fetch input parameters.\n :param vrrp_id: The vrrp_id to be assigned.\n :param vrrp_ip: The vrrp_ip to be assigned.\n :param active_switch: The name of the active switch.\n :param vlan_id: vlan id to be assigned.\n :param switch_list: List of clustered switches.\n :return: The output of the configuration.\n \"\"\"\n node1 = switch_list[0]\n node2 = switch_list[1]\n name = (node1 + '-to-' + node2 + '-cluster')[:59]\n host_count = 1\n\n output = create_cluster(module, node2, name, node1, node2)\n output += create_vlan(module, vlan_id, node2)\n\n vnet_name = get_global_vnet_name(module)\n\n for switch in switch_list:\n output += create_vrouter(module, switch, vrrp_id, vnet_name)\n\n for switch in switch_list:\n host_count += 1\n vrrp_priority = '110' if switch == active_switch else '100'\n output += create_vrouter_interface(module, switch, vrrp_ip, vlan_id,\n vrrp_id, str(host_count),\n vrrp_priority)\n\n return output\n\n\ndef configure_vrrp_for_non_clustered_switches(module, vlan_id, ip,\n non_cluster_leaf):\n \"\"\"\n Method to configure VRRP for non clustered leafs.\n :param module: The Ansible module to fetch input parameters.\n :param vlan_id: vlan id to be assigned.\n :param ip: Ip address to be assigned.\n :param non_cluster_leaf: Name of non-clustered leaf switch.\n :return: Output string of configuration.\n \"\"\"\n vnet_name = get_global_vnet_name(module)\n output = create_vrouter_without_vrrp(module, non_cluster_leaf, vnet_name)\n output += create_vlan(module, vlan_id, non_cluster_leaf)\n output += configure_vrrp_for_non_cluster_leafs(module, ip,\n non_cluster_leaf, vlan_id)\n return output\n\n\ndef configure_vrrp(module, csv_data):\n \"\"\"\n Method to configure VRRP L3.\n :param module: The Ansible module to fetch input parameters.\n :param csv_data: String containing vrrp data passed from csv file.\n :return: Output string of configuration.\n \"\"\"\n output = ''\n vnet_name = get_global_vnet_name(module)\n for switch in module.params['pn_spine_list']:\n output += create_vrouter_without_vrrp(module, switch, vnet_name)\n\n csv_data = csv_data.replace(\" \", \"\")\n csv_data_list = csv_data.split('\\n')\n # Parse csv file data and configure VRRP.\n for row in csv_data_list:\n elements = row.split(',')\n switch_list = []\n vlan_id = elements[0]\n vrrp_ip = elements[1]\n leaf_switch_1 = str(elements[2])\n if len(elements) > 5:\n leaf_switch_2 = str(elements[3])\n vrrp_id = elements[4]\n active_switch = str(elements[5])\n switch_list.append(leaf_switch_1)\n switch_list.append(leaf_switch_2)\n output += configure_vrrp_for_clustered_switches(module, vrrp_id,\n vrrp_ip,\n active_switch,\n vlan_id,\n switch_list)\n\n else:\n output += configure_vrrp_for_non_clustered_switches(module, vlan_id,\n vrrp_ip,\n leaf_switch_1)\n\n return output\n\n\ndef get_global_vnet_name(module):\n \"\"\"\n Method to get global vnet name, required for vrouters creation.\n :param module: The Ansible module to fetch input parameters.\n :return: Global vnet name.\n \"\"\"\n cli = pn_cli(module)\n cli += ' fabric-node-show format fab-name no-show-headers '\n fabric_name = list(set(run_cli(module, cli).split()))[0]\n return str(fabric_name) + '-global'\n\n\ndef main():\n \"\"\" This section is for arguments parsing \"\"\"\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliusername=dict(required=False, type='str'),\n pn_clipassword=dict(required=False, type='str', no_log=True),\n pn_spine_list=dict(required=False, type='list'),\n pn_leaf_list=dict(required=False, type='list'),\n pn_csv_data=dict(required=True, type='str'),\n )\n )\n\n global CHANGED_FLAG\n message = configure_vrrp(module, module.params['pn_csv_data'])\n\n # Exit the module and return the required JSON.\n message_string = message\n results = []\n switch_list = module.params['pn_spine_list'] + module.params['pn_leaf_list']\n for switch in switch_list:\n replace_string = switch + ': '\n\n for line in message_string.splitlines():\n if replace_string in line:\n json_msg = {'switch' : switch , 'output' : (line.replace(replace_string, '')).strip() }\n results.append(json_msg)\n\n # Exit the module and return the required JSON.\n module.exit_json(\n unreachable=False,\n msg = 'L3 VRRP configuration executed successfully',\n summary=results,\n exception='',\n failed=False,\n changed=True if True in CHANGED_FLAG else False,\n task='CLI commands to configure L3 VRRP zero touch provisioning'\n )\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ansible/library/pn_ztp_vrrp_l3_json.py","file_name":"pn_ztp_vrrp_l3_json.py","file_ext":"py","file_size_in_byte":19536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379287335","text":"from datetime import datetime\nimport logging\nimport os, sys\nimport glob\nimport subprocess\n\nfrom vbench.api import collect_benchmarks\n\nfrom benchmarks.utils import cd\n\nBASEDIR = os.path.dirname(os.path.realpath(__file__))\n\nlog = logging.getLogger('vb')\nlog.setLevel(logging.INFO)\n\nwith cd(os.path.join(BASEDIR, 'benchmarks')):\n filenames = glob.glob(\"vb*.py\")\n names = [filename[:-3] for filename in filenames]\n print(names)\n benchmarks = collect_benchmarks(names)\n\nlog.info(\"Initializing settings\")\n\nREPO_PATH = os.path.join(BASEDIR, 'buhmm')\nREPO_URL = 'git://github.com/chebee7i/buhmm.git'\nREPO_BROWSE = 'https://github.com/chebee7i/buhmm'\nDB_PATH = os.path.join(BASEDIR, 'db/benchmarks.db')\nTMP_DIR = os.path.join(BASEDIR, 'tmp')\n\n# Assure corresponding directories existence\nfor s in (REPO_PATH, os.path.dirname(DB_PATH), TMP_DIR):\n if not os.path.exists(s):\n os.makedirs(s)\n\nBRANCHES = ['master']\n\nPREPARE = \"\"\"\ngit clean -dfx\n\"\"\"\n\nBUILD = \"\"\"\npython setup.py build_ext --inplace\n\"\"\"\n\nSTART_DATE = datetime(2014, 10, 30)\nRST_BASE = 'source'\n\n\ndependencies = []\n\nDESCRIPTION = \"\"\n\nHARDWARE = \"\"\"\nResults were collected on the following machine:\n\n - {uname}\n - CPU: {cpu}\n - Memory: {mem}\n - {dist}\n - Python {python}\n\n``lscpu`` output::\n\n{lscpu}\n\n\"\"\"\ntry:\n subs = {}\n p = subprocess.Popen(['uname', '-srmpio'], stdout=subprocess.PIPE)\n out, err = p.communicate()\n subs['uname'] = out.strip()\n\n p = subprocess.Popen('cat /proc/cpuinfo | grep --color=no \"model name\"',\n shell=True, stdout=subprocess.PIPE)\n out, err = p.communicate()\n out = out.split('\\n')[0]\n out = out.split(':')[1].strip()\n subs['cpu'] = out\n\n p = subprocess.Popen('cat /proc/meminfo | grep --color=no MemTotal',\n shell=True, stdout=subprocess.PIPE)\n out, err = p.communicate()\n out = out.split('\\n')[0]\n out = out.split(':')[1].strip()\n subs['mem'] = out\n\n try:\n p = subprocess.Popen('cat /etc/lsb-release',\n shell=True, stdout=subprocess.PIPE)\n out, err = p.communicate()\n # Grab last line\n out = out.strip().split('\\n')[-1]\n # Take away quotes in content after equals sign.\n out = out.split('=')[1][1:-1]\n subs['dist'] = out\n except:\n subs['dist'] = ''\n\n subs['python'] = \"{}.{}.{}\".format(*sys.version_info[:3])\n\n p = subprocess.Popen('lscpu', shell=True, stdout=subprocess.PIPE)\n out, err = p.communicate()\n subs['lscpu'] = ' '+ '\\n '.join(out.split('\\n'))\n\nexcept:\n pass\nelse:\n if subs:\n HARDWARE = HARDWARE.format(**subs)\n DESCRIPTION += HARDWARE\n\n filename = os.path.join(BASEDIR, 'db', 'hardware.txt')\n if os.path.isfile(filename):\n with open(filename) as f:\n data = f.read()\n if data != HARDWARE:\n msg = 'Database exists and was created using different hardware.'\n print(msg)\n print(\"\\nEXISTING:\\n{0}\".format(data))\n print(\"\\nCURRENT:\\n{0}\".format(HARDWARE))\n else:\n with open(filename, 'w') as f:\n f.write(HARDWARE)\n","sub_path":"suite.py","file_name":"suite.py","file_ext":"py","file_size_in_byte":3140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"95400124","text":"\"\"\"\n将音频文件读取并转成json格式或者protobuf格式,通过rabbit_mq发送\n\"\"\"\nimport pika\nimport audio_pb2 as pb\nimport soundfile\nimport json\n### 连接rabbitmq服务器 ###\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\"localhost\"))\n\n### 创建一个AMQP信道(channel) ###\nchannel = connection.channel()\n\n### 声明队列(queue) ###\nchannel.queue_declare(queue=\"泡沫\")\n\n### 读取音频,转为protbuf文件 ###\naudio = pb.Audio()\nfile_path = r\"D:\\PyCharm_Code\\Python\\audio\\sox\\GEM.wav\"\nwith open(file_path, 'rb') as f:\n # print(f.read())\n audio.data = f.read()\n\n\nmessage = audio.SerializeToString() # 序列化:序列化此消息为二进制串\n\n## 发送消息 ###\nchannel.basic_publish(exchange='',\n routing_key='泡沫',\n body=message)\nprint(\"[x] Sent the audio!\")\n\n### 关闭与rabbitmq的连接 ###\n# connection.close()","sub_path":"audio/EXP01/3.MQ_Audio_Send.py","file_name":"3.MQ_Audio_Send.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"152408937","text":"import json\nimport os\nfrom typing import Dict, Text\n\nimport requests\nfrom collections import Counter\nfrom io import BytesIO\nfrom urllib import request\nfrom urllib.error import HTTPError\n\nimport imagehash\nfrom PIL import Image\nfrom PIL.Image import DecompressionBombError\n\nfrom redditrepostsleuth.core.exception import ImageConversioinException\nfrom redditrepostsleuth.core.logging import log\nfrom redditrepostsleuth.core.db.databasemodels import Post\n\n\ndef generate_img_by_post(post: Post) -> Image:\n \"\"\"\n Generate the image files provided from Imgur. We pass the data straight from the request into PIL.Image\n \"\"\"\n\n try:\n img = generate_img_by_url(post.url)\n except (ImageConversioinException) as e:\n log.error('Failed to convert image %s. Error: %s (%s)', post.id, str(e), str(post))\n return None\n\n return img if img else None\n\ndef generate_img_by_url(url: str) -> Image:\n\n req = request.Request(\n url,\n data=None,\n headers={\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.47 Safari/537.36'\n }\n )\n\n try:\n response = request.urlopen(req, timeout=10)\n img = Image.open(BytesIO(response.read()))\n except (HTTPError, ConnectionError, OSError, DecompressionBombError, UnicodeEncodeError) as e:\n log.exception('Failed to convert image %s. Error: %s ', url, str(e))\n raise ImageConversioinException(str(e))\n\n return img if img else None\n\ndef generate_img_by_file(path: str) -> Image:\n\n try:\n img = Image.open(path)\n except (HTTPError, ConnectionError, OSError, DecompressionBombError, UnicodeEncodeError) as e:\n log.exception('Failed to convert image %s. Error: %s ', path, str(e))\n raise ImageConversioinException(str(e))\n\n return img if img else None\n\ndef set_image_hashes(post: Post, hash_size: int = 16) -> Post:\n log.debug('%s - Hashing image post %s', os.getpid(), post.post_id)\n try:\n img = generate_img_by_url(post.url)\n except ImageConversioinException as e:\n raise\n\n try:\n dhash_h = imagehash.dhash(img, hash_size=hash_size)\n dhash_v = imagehash.dhash_vertical(img, hash_size=hash_size)\n ahash = imagehash.average_hash(img, hash_size=hash_size)\n post.dhash_h = str(dhash_h)\n post.dhash_v = str(dhash_v)\n post.ahash = str(ahash)\n except Exception as e:\n # TODO: Specific exception\n log.exception('Error creating hash', exc_info=True)\n raise\n\n return post\n\ndef get_image_hashes(url: Text, hash_size: int = 16) -> Dict:\n result = {\n 'dhash_h': None,\n 'dhash_v': None,\n 'ahash': None,\n }\n log.debug('Hashing image %s', url)\n img = generate_img_by_url(url)\n try:\n dhash_h = imagehash.dhash(img, hash_size=hash_size)\n dhash_v = imagehash.dhash_vertical(img, hash_size=hash_size)\n ahash = imagehash.average_hash(img, hash_size=hash_size)\n result['dhash_h'] = str(dhash_h)\n result['dhash_v'] = str(dhash_v)\n result['ahash'] = str(ahash)\n except Exception as e:\n # TODO: Specific exception\n log.exception('Error creating hash', exc_info=True)\n raise\n\n return result\n\ndef set_image_hashes_api(post: Post, api_url: str) -> Post:\n \"\"\"\n Call an external API to create image hashes.\n This allows us to offload bandwidth to another server. In the current case, a Digital Ocean Load Balancer\n :param post: Post to hash\n :param api_url: API URL to call\n :return: Dict of hashes\n \"\"\"\n log.debug('Hashing image post using api %s', post.post_id)\n r = requests.get(api_url, params={'url': post.url})\n if r.status_code != 200:\n log.error('Back statuscode from DO API %s', r.status_code)\n raise ImageConversioinException('Bad response from DO API')\n\n hashes = json.loads(r.text)\n log.debug(hashes)\n\n post.dhash_h = hashes['dhash_h']\n post.dhash_v = hashes['dhash_v']\n post.ahash = hashes['ahash']\n\n return post\n\n","sub_path":"redditrepostsleuth/core/util/imagehashing.py","file_name":"imagehashing.py","file_ext":"py","file_size_in_byte":4081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"121761504","text":"import sys\nsys.path.append('../regression')\n\nimport inference #from ../regression\n\ndef main():\n ## hyperparameters\n resize = 224\n mean_element = 0.5\n std_element = 0.5\n num_images = -1\n rootpath = \"../../../dataset_image_to_gravity/AirSim/5cam/val\"\n csv_name = \"imu_camera.csv\"\n batch_size = 10\n weights_path = \"../../weights/regression1cam.pth\"\n ## infer\n inference_model = inference.InferenceModel(\n resize, mean_element, std_element, num_images,\n rootpath, csv_name, batch_size,\n weights_path\n )\n inference_model.infer()\n\nif __name__ == '__main__':\n main()\n","sub_path":"pysrc/trash/regression_single/inference_single_nn.py","file_name":"inference_single_nn.py","file_ext":"py","file_size_in_byte":626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"313813406","text":"from flask import Flask, request, render_template, url_for\nfrom flask import Response, redirect, make_response, jsonify\nfrom scipy.io import loadmat\nimport io\nimport base64\nimport cv2\nimport time\nimport os\nfrom werkzeug.utils import secure_filename\nfrom predict import predict_frame_score\nimport numpy as np\nimport numpy.matlib\nimport scipy.signal\n\nos.system('python -m webbrowser -t \"http://localhost:5000/index_page\" ')\n\nUPLOAD_FOLDER = '/home/vivaainng/Desktop/AnomalyDetectionCVPR2018/Test_Folder'\n\n#Path that points to the Testing Videos (.mp4 files)\ntesting_vid_path = '/home/vivaainng/Desktop/AnomalyDetectionCVPR2018/Eval_Res/Testing_Videos/'\n\n#Path that points to the Temporal Annotations for each testing videos (.mat files)\ntemp_anno_path = '/home/vivaainng/Desktop/AnomalyDetectionCVPR2018/Eval_Res/Temporal_Annotations/'\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\n@app.route('/index_page') # Main Page\ndef index_page():\n return render_template('index.html')\n\n\ndef generate_frame():\n fps = 30\n f = open(os.path.join('static/input_c3d/video_name.txt'))\n video_path = f.read()\n f.close()\n\n cap = cv2.VideoCapture(str(video_path))\n\n while True:\n _, frame = cap.read()\n\n \n font = cv2.FONT_HERSHEY_TRIPLEX\n cv2.rectangle(\n frame,\n (100, 0),\n (190, 25),\n (0, 0, 0),\n cv2.FILLED\n )\n cv2.putText(\n frame, \n str(cap.get(cv2.CAP_PROP_POS_FRAMES)), \n (100, 17), \n font, \n 0.7, \n (255, 255, 255), \n 1)\n\n \n\n imgencode = cv2.imencode('.jpg', frame)[1]\n yield (b'--frame\\r\\n'\n b'Content-Type: text/plain\\r\\n\\r\\n' + imgencode.tostring() + b'\\r\\n')\n time.sleep(0.2 / fps)\n\n cap.release()\n\n\n\n@app.route('/load_video')\ndef load_video():\n return Response(generate_frame(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n@app.route('/upload_data', methods=['GET', 'POST'])\ndef upload_data():\n x = None\n y = None\n video = ''\n filename = ''\n \n # Save C3D averaged features & video name in path below\n saved_input_path = 'static/input_c3d/' \n\n # Upload C3D features from Test_Folder/\n if request.method == 'POST':\n file = request.files['c3d_file']\n filename = file.filename #C3D path\n file.save(os.path.join(saved_input_path, 'c3d_input_txt'))\n video = testing_vid_path + filename[:-6] + '.mp4'\n\n\n #--------Include the name of video file--------\n f = open(os.path.join(saved_input_path + 'video_name.txt'), 'w')\n f.write(testing_vid_path + filename[:-6] + '.mp4')\n f.close()\n\n \n cap = cv2.VideoCapture(video)\n max_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n\n #---- Prediction of scores for each 32 segments ------\n c3d_input_path = saved_input_path + 'c3d_input_txt'\n predicted_scores = predict_frame_score(c3d_input_path)\n\n\n \n total_segments = np.linspace(1, max_frame, num=33)\n total_segments = total_segments.round()\n\n Frames_Score = []\n count = -1\n for iv in range(0, 32):\n F_Score = np.matlib.repmat(predicted_scores[iv],1,(int(total_segments[iv+1])-int(total_segments[iv])))\n count = count + 1\n if count == 0:\n Frames_Score = F_Score\n if count > 0:\n Frames_Score = np.hstack((Frames_Score, F_Score))\n\n x = np.linspace(1, max_frame, max_frame)\n scores = Frames_Score\n scores1 = scores.reshape((scores.shape[1],))\n y = scipy.signal.savgol_filter(scores1, 101, 3)\n x = x.tolist()\n y = y.tolist()\n\n # ----------Load temporal anomaly ----------\n temporal_ann_path = temp_anno_path + filename[:-3] + 'mat'\n temporal_ann = loadmat(temporal_ann_path)\n temporal_ann = temporal_ann['Annotation_file']['Anno']\n annotations = temporal_ann[0, 0]\n\n store_ann = []\n for i in annotations:\n start_frm = i[0]\n store_ann.append(start_frm)\n end_frm = i[1]\n store_ann.append((end_frm))\n\n annotation_list = []\n\n # For single temporal annotations\n if len(store_ann) == 2:\n for x_val in x:\n if x_val >= store_ann[0] and x_val <= store_ann[1]:\n annotation_list.append(1)\n else:\n annotation_list.append(0)\n\n # For two distinct temporal annotations \n if len(store_ann) == 4:\n for x_val in x:\n if x_val >= store_ann[0] and x_val <= store_ann[1]:\n annotation_list.append(1) \n elif x_val >= store_ann[2] and x_val <= store_ann[3]:\n annotation_list.append(1)\n else:\n annotation_list.append(0)\n \n\n \n return render_template(\n 'line_chart.html',\n values=y, \n labels=x, \n legend=filename[:-6], \n annotations=annotation_list)\n\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n'''\nInput to the server:\n1)Averaged C3D files with 32 segments with 4096-dim (.txt) via: ~/Test_Folder dir\n\n2)Path to video files corresponding to extracted averaged C3D files, via: \n ~/Eval_Res/Testing_Videos/ dir\n\n3)Path to temporal annotations (.mat files) for each test set, via:\n ~/Eval_Res/Temporal_Annotations/ dir\n'''","sub_path":"web_demo/web_app.py","file_name":"web_app.py","file_ext":"py","file_size_in_byte":5532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"515236991","text":"expected_output = {\n 'version': {\n 'bootldr_version': 'Cisco IOS Software, s72033_rp Software (s72033_rp-ADVENTERPRISEK9_DBG-M), Version 15.4(0.10)S, EARLY DEPLOYMENT ENGINEERING WEEKLY BUILD, synced to BLD_DARLING_122S_040709_1301',\n 'chassis': 'CISCO7606',\n 'compiled_by': 'alnguyen',\n 'compiled_date': 'Wed 26-Jun-13 02:21',\n 'control_processor_uptime': '22 weeks, 6 days, 1 hour, 57 minutes',\n 'controller': {\n 'counts': 1,\n 'serial': 4,\n 'type': 'Enhanced FlexWAN',\n },\n 'cpu': {\n 'implementation': '1284',\n 'l2_cache': '512KB',\n 'name': 'SR71000',\n 'rev': '1.2',\n 'speed': '600MHz',\n },\n 'curr_config_register': '0x2',\n 'hostname': 'ipcore-ssr-uut2',\n 'image_id': 's72033_rp-ADVENTERPRISEK9_DBG-M',\n 'interfaces': {\n 'gigabit_ethernet': 52,\n 'serial': 4,\n 'virtual_ethernet': 1,\n },\n 'last_reload': {\n 'reason': 'abort at PC 0x433A11BC',\n 'type': 'Normal Reload',\n },\n 'last_reset': 's/w',\n 'main_mem': '983008',\n 'memory': {\n 'flash_internal_SIMM': 65536,\n 'non_volatile_conf': 1917,\n 'packet_buffer': 8192,\n },\n 'os': 'IOS',\n 'platform': 's72033_rp',\n 'processor_board_id': 'FOX11140RN8',\n 'processor_type': 'R7000',\n 'returned_to_rom_by': 'power cycle at 03:04:03 PDT Thu May 18 2017 (SP by power on)',\n 'rom': 'System Bootstrap, Version 12.2(17r)SX7, RELEASE SOFTWARE',\n 'rom_version': '(fc1)',\n 'system_image': 'disk0:s72033-adventerprisek9_dbg-mz.154-0.10.S-ipcore-ssr-uut2',\n 'uptime': '22 weeks, 6 days, 2 hours, 1 minute',\n 'version': '15.4(0.10)S',\n },\n }","sub_path":"src/genie/libs/parser/ios/c7600/tests/ShowVersion/cli/equal/golden_output_expected.py","file_name":"golden_output_expected.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403929624","text":"# **************************************************\r\n# ----- Import Library\r\n# **************************************************\r\nimport os\r\nimport sys\r\nimport glob\r\nimport datetime\r\nimport requests\r\nimport workdays\r\nimport pandas as pd\r\nimport japandas as jpd\r\nfrom lxml import etree\r\n\r\n\r\n# **************************************************\r\n# ----- Function Main\r\n# **************************************************\r\ndef main ():\r\n\tfiles = glob.glob (\"event-*.tsv\")\r\n\t\r\n\tfor f in files:\r\n\t\tAdjDate (f)\r\n\r\n\t\r\n# **************************************************\r\n# ----- Function AdjDate\r\n# **************************************************\r\ndef AdjDate (File):\r\n\tevent_data = File\r\n\tevent_name = os.path.splitext (File)[0]\r\n\tprint (event_name)\r\n\r\n\tread_event_data = pd.read_csv (event_data, sep=\"\\t\", header=0, index_col=None, encoding=\"cp932\")\r\n\tread_event_data[\"date_announcement_adj\"] = \"\"\r\n\tread_event_data[event_name] = \"\"\r\n\tread_event_data[\"title\"] = \"\"\r\n\t\r\n\tSearchWords = GetSearchWords (event_name)[0]\r\n\tNgWords = GetSearchWords (event_name)[1]\r\n\t\r\n\tfor r in read_event_data.iterrows ():\r\n\t\tsearch_ticker = r[1][\"ticker\"]\r\n\t\tsearch_date = r[1][\"date_announcement\"]\r\n\t\t\r\n\t\tUrl = f\"http://resource.ufocatch.com/atom/tdnet/query/{search_ticker}\"\r\n\t\tResponse = requests.get (Url)\r\n\t\t\r\n\t\ttree = etree.fromstring (Response.content)\r\n\t\tMatch_title = tree.findall (\".//{http://www.w3.org/2005/Atom}title\")[1:]\r\n\t\tMatch_updated = tree.findall (\".//{http://www.w3.org/2005/Atom}updated\")[1:]\r\n\t\t\r\n\t\tif len (Match_title) != len (Match_updated):\r\n\t\t\tprint (\"検索結果不正\")\r\n\t\t\tprint (f\"title:{len (Match_title)}\")\r\n\t\t\tprint (f\"update:{len (Match_updated)}\")\r\n\t\t\tsys.exit (1)\r\n\t\t\r\n\t\tMatch_merge = [[t.text, u.text] for t, u in zip (Match_title, Match_updated)]\r\n\t\tsearch_result = [m for m in Match_merge if m[1].startswith (search_date.replace (\"/\", \"-\"))]\r\n\t\t\r\n\t\tfor s in SearchWords:\r\n\t\t\tsearch_extract = [e for e in search_result if s in e[0]]\r\n\t\t\t\r\n\t\t\tif len (search_extract) > 0 and len (NgWords) > 0:\r\n\t\t\t\tTmpTitle = search_extract[-1][0]\r\n\t\t\t\tfor n in NgWords:\r\n\t\t\t\t\tsearch_extract = [e for e in search_extract if n not in e[0]]\r\n\t\t\t\telse:\r\n\t\t\t\t\tif len (search_extract) == 0:\r\n\t\t\t\t\t\tread_event_data.loc[r[0], \"title\"] = TmpTitle\r\n\t\t\t\r\n\t\t\tif len (search_extract) > 0:\r\n\t\t\t\tbreak\r\n\t\t\r\n\t\tprint (search_ticker)\r\n\t\tprint (read_event_data.loc[r[0]])\r\n\t\t\r\n\t\tif len (search_extract) >= 1:\r\n\t\t\tprint (search_extract[-1][0])\r\n\t\t\t\r\n\t\t\tdtDate = datetime.datetime.strptime (search_extract[-1][1], \"%Y-%m-%dT%H:%M:%S+09:00\")\r\n\t\t\tdtBase = datetime.datetime (year=dtDate.year, month=dtDate.month, day=dtDate.day, hour=15, minute=00, second=00)\r\n\t\t\t\r\n\t\t\tif dtDate > dtBase:\r\n\t\t\t\tdate_announcement_adj = datetime.datetime.strftime (workdays.workday (dtDate, 1, Holidays), \"%Y/%m/%d\")\r\n\t\t\telse:\r\n\t\t\t\tdate_announcement_adj = search_date\r\n\t\t\t\r\n\t\t\tread_event_data.loc[r[0], \"date_announcement_adj\"] = date_announcement_adj\r\n\t\t\tread_event_data.loc[r[0], event_name] = 1\r\n\t\t\tread_event_data.loc[r[0], \"title\"] = search_extract[-1][0]\r\n\t\t\t\r\n\t\telse:\r\n\t\t\tread_event_data.loc[r[0], \"date_announcement_adj\"] = search_date\r\n\t\t\tread_event_data.loc[r[0], event_name] = 0\r\n\t\t\r\n\t\tprint (read_event_data.loc[r[0]])\r\n\t\t\r\n\t\tprint (\"********************\")\r\n\t\r\n\tread_event_data.to_csv (f\"{File}\".replace (\".tsv\", \"_edt.tsv\"), index=False, encoding=\"cp932\", sep=\"\\t\")\r\n\t\r\n\treturn\r\n\r\n\r\n# **************************************************\r\n# ----- Function GetSearchWords\r\n# **************************************************\r\ndef GetSearchWords (EventName):\r\n\tif EventName == \"event-public_offering\":\r\n\t\tSearchWords = [\"新株式発行\", \"公募\"]\r\n\t\tNgWords = []\r\n\telif EventName == \"event-secondary_offering\":\r\n\t\tSearchWords = [\"株式売出\", \"売出\", \"株式売り出\", \"売り出\", \"株式売り出し\", \"売り出し\"]\r\n\t\tNgWords = []\r\n\telif EventName == \"event-stock_split\":\r\n\t\tSearchWords = [\"株式分割\", \"分割\"]\r\n\t\tNgWords = []\r\n\telif EventName == \"event-reverse_share_split\":\r\n\t\tSearchWords = [\"株式併合\", \"併合\", \"株式合併\", \"合併\"]\r\n\t\tNgWords = []\r\n\telif EventName == \"event-cancellation_of_treasury_share\":\r\n\t\tSearchWords = [\r\n\t\t\t\"自己株式の消却\", \"自己株式消却\", \"自己株の消却\", \"自己株消却\", \"消却\",\r\n\t\t\t\"自己株式の消去\", \"自己株式消去\"\r\n\t\t]\r\n\t\tNgWords = []\r\n\telif EventName == \"event-share_buyback\":\r\n\t\tSearchWords = [\r\n\t\t\t\"自己株式取得\", \"自己株式の取得\", \"自己株取得\", \"自己株の取得\", \"自己の株式取得\", \"自己の株式の取得\",\r\n\t\t\t\"自己株式買付け\", \"自己株式の買付け\", \"自己株式買付\", \"自己株式の買付\", \"自己株買付け\", \"自己株の買付け\",\r\n\t\t\t\"株式買取り\", \"株式買取\", \"株式の買取り\", \"株式の買取\", \"株式取得\", \"株式の取得\"\r\n\t\t]\r\n\t\tNgWords = [\"端数\", \"1株に満たない\", \"所在不明株主\", \"取得状況\"]\r\n\telse:\r\n\t\tprint (\"イベント判定失敗\")\r\n\t\tsys.exit (1)\r\n\t\r\n\treturn SearchWords, NgWords\r\n\r\n\r\n# **************************************************\r\n# ----- Process Main\r\n# **************************************************\r\nif __name__ == '__main__':\r\n\tCalender = jpd.JapaneseHolidayCalendar ()\r\n\tHolidays = Calender.holidays ().tolist ()\r\n\t\r\n\tmain ()\r\n\r\n\r\n# **************************************************\r\n# ----- End\r\n# **************************************************\r\n","sub_path":"ir_get.py","file_name":"ir_get.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"99072375","text":"#!/usr/bin/env python\n\"\"\"Reducer for finding the color for cars that are mostly ticketed\"\"\"\n\nfrom itertools import groupby\nfrom operator import itemgetter\nimport sys\n\ndef read_mapper_output(file, separator='\\t'):\n for line in file:\n yield line.rstrip().split(separator, 1)\n\ndef main(separator='\\t'):\n # input comes from STDIN (standard input)\n \n data = read_mapper_output(sys.stdin, separator=separator)\n result = {}\n \n # groupby groups multiple pairs by car color,\n # and creates an iterator that returns consecutive keys and their group:\n # current_word - string containing a car type (the key)\n # group - iterator yielding all [\"<current_word>\", \"<count>\"] items\n for current_word, group in groupby(data, itemgetter(0)):\n try:\n total_count = sum(int(count) for current_word, count in group)\n result[current_word] = total_count\n except ValueError:\n # count was not a number, so silently discard this item\n pass\n\n sortedresult = sorted(result.items(), key = lambda kv: (kv[1], kv[0]), reverse = True) \n (carcolor, ticketcount) = next(iter(sortedresult))\n print(\"The color of cars most commonly ticketed is : \" +carcolor)\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"PythonMapReduce/reducercolor.py","file_name":"reducercolor.py","file_ext":"py","file_size_in_byte":1287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"564596215","text":"from iocage_lib.zfs import all_properties\n\nimport threading\n\n\nclass Cache:\n\n cache_lock = threading.Lock()\n\n def __init__(self):\n self.dataset_data = self.pool_data = None\n\n @property\n def datasets(self):\n with self.cache_lock:\n if not self.dataset_data:\n self.dataset_data = all_properties()\n return self.dataset_data\n\n @property\n def pools(self):\n with self.cache_lock:\n if not self.pool_data:\n self.pool_data = all_properties(resource_type='zpool')\n return self.pool_data\n\n def reset(self):\n with self.cache_lock:\n self.dataset_data = self.pool_data = None\n\n\ncache = Cache()\n","sub_path":"iocage_lib/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"42918751","text":"# My 8 Ball\n\nimport random\n\n# write answers\nans1=\"Go for it!\"\nans2=\"No way, Jose!\"\nans3=\"I'm not sure. Ask me again.\"\nans4=\"Fear of the unknown is what imprisons us.\"\nans5=\"It would be madness to do that!\"\nans6=\"Only you can save mankind!\"\nans7=\"Makes no diffence to me, do or don't - whatever.\"\nans8=\"Yes, I think on balance that is the right choice.\"\n\nprint(\"Welcome to My8Ball.\")\n\n# get the users question\nquestion = input(\"Ask me for advice then press ENTER to shake me.\\n\")\n\nprint(\"shaking ...\\n\" * 4)\n\n# use the randint() function to select the correct answer\nchoice=random.randint(1, 8)\nif choice==1:\n answer=ans1\nelif choice==2:\n answer=ans2\nelif choice==3:\n answer=ans3\nelif choice==4:\n answer=ans4\nelif choice==5:\n answer=ans5\nelif choice==6:\n answer=ans6\nelif choice==7:\n answer=ans7\nelse:\n answer=ans8\n\n# print the answer to the screen\nprint(answer)\n\ninput(\"\\n\\nPress the ENTER key to finish.\")\n","sub_path":"content/static/python/my8ball.py","file_name":"my8ball.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"231416630","text":"import os\nimport signal\nimport traceback\nimport yaml\nimport copy\nimport functools\nimport collections\n\nimport lib\nfrom lib.utils import safe_makedirs\nfrom lib.test_suite import TestSuite\n\nfrom lib.colorer import color_stdout, color_log\nfrom lib.bee_server import BeeServer\n\n# Utils\n#######\n\n\ndef find_suites():\n suite_names = lib.Options().args.suites\n if suite_names == []:\n for root, dirs, names in os.walk(os.getcwd(), followlinks=True):\n if \"suite.ini\" in names:\n suite_names.append(os.path.basename(root))\n\n suites = [TestSuite(suite_name, lib.Options().args)\n for suite_name in sorted(suite_names)]\n return suites\n\n\ndef parse_reproduce_file(filepath):\n reproduce = []\n if not filepath:\n return reproduce\n try:\n with open(filepath, 'r') as f:\n for task_id in yaml.load(f):\n task_name, task_conf = task_id\n reproduce.append((task_name, task_conf))\n except IOError:\n color_stdout('Cannot read \"%s\" passed as --reproduce argument\\n' %\n filepath, schema='error')\n exit(1)\n return reproduce\n\n\ndef get_reproduce_file(worker_name):\n main_vardir = os.path.realpath(lib.Options().args.vardir)\n reproduce_dir = os.path.join(main_vardir, 'reproduce')\n return os.path.join(reproduce_dir, '%s.list.yaml' % worker_name)\n\n\ndef print_greetings():\n # print information about bee\n color_stdout('\\n')\n BeeServer.print_exe()\n\n\n# Get tasks and worker generators\n#################################\n\n\ndef get_task_groups():\n \"\"\"Scan directories where tests files expected to reside, create the list\n of tests and group it by suites. Create workers generator for each of these\n group.\n \"\"\"\n suites = find_suites()\n res = collections.OrderedDict()\n for suite in suites:\n key = os.path.basename(suite.suite_path)\n gen_worker = functools.partial(Worker, suite) # get _id as an arg\n task_ids = [task.id for task in suite.find_tests()]\n if task_ids:\n res[key] = {\n 'gen_worker': gen_worker,\n 'task_ids': task_ids,\n 'is_parallel': suite.is_parallel(),\n }\n return res\n\n\ndef reproduce_task_groups(task_groups):\n \"\"\"Filter provided task_groups down to the one certain group. Sort tests in\n this group as in the reproduce file.\n \"\"\"\n found_keys = []\n reproduce = parse_reproduce_file(lib.Options().args.reproduce)\n if not reproduce:\n raise ValueError('[reproduce] Tests list cannot be empty')\n for i, task_id in enumerate(reproduce):\n for key, task_group in task_groups.items():\n if task_id in task_group['task_ids']:\n found_keys.append(key)\n break\n if len(found_keys) != i + 1:\n raise ValueError('[reproduce] Cannot find test \"%s\"' %\n str(task_id))\n found_keys = list(set(found_keys))\n if len(found_keys) < 1:\n raise ValueError('[reproduce] Cannot find any suite for given tests')\n elif len(found_keys) > 1:\n raise ValueError(\n '[reproduce] Given tests contained by different suites')\n\n res_key = found_keys[0]\n res_task_group = copy.deepcopy(task_groups[key])\n res_task_group['task_ids'] = reproduce\n return {res_key: res_task_group}\n\n\n# Worker results\n################\n\n\nclass BaseWorkerMessage(object):\n \"\"\"Base class for all objects passed via result queues. It holds worker_id\n (int) and worker_name (string). Used as a structure, i.e. w/o data fields\n incapsulation.\n \"\"\"\n def __init__(self, worker_id, worker_name):\n super(BaseWorkerMessage, self).__init__()\n self.worker_id = worker_id\n self.worker_name = worker_name\n\n\nclass WorkerTaskResult(BaseWorkerMessage):\n \"\"\" Passed into the result queue when a task processed (done) by the\n worker. The short_status (string) field intended to give short note whether\n the task processed successfully or not, but with little more flexibility\n than binary True/False. The task_id (any hashable object) field hold ID of\n the processed task.\n \"\"\"\n def __init__(self, worker_id, worker_name, task_id, short_status):\n super(WorkerTaskResult, self).__init__(worker_id, worker_name)\n self.short_status = short_status\n self.task_id = task_id\n\n\nclass WorkerOutput(BaseWorkerMessage):\n \"\"\"The output passed by worker processes via color_stdout/color_log\n functions. The output wrapped into objects of this class by setting queue\n and wrapper in the Colorer class (see lib/colorer.py). Check\n LogOutputWatcher and OutputWatcher classes in listeners.py file to see how\n the output multiplexed by the main process.\n \"\"\"\n def __init__(self, worker_id, worker_name, output, log_only):\n super(WorkerOutput, self).__init__(worker_id, worker_name)\n self.output = output\n self.log_only = log_only\n\n\nclass WorkerDone(BaseWorkerMessage):\n \"\"\"Report the worker as done its work.\"\"\"\n def __init__(self, worker_id, worker_name):\n super(WorkerDone, self).__init__(worker_id, worker_name)\n\n\n# Worker\n########\n\n\nclass VoluntaryStopException(Exception):\n pass\n\n\nclass Worker:\n def report_keyboard_interrupt(self):\n color_stdout('\\n[Worker \"%s\"] Caught keyboard interrupt; stopping...\\n'\n % self.name, schema='test_var')\n\n def wrap_output(self, output, log_only):\n return WorkerOutput(self.id, self.name, output, log_only)\n\n def done_marker(self):\n return WorkerDone(self.id, self.name)\n\n def wrap_result(self, task_id, short_status):\n return WorkerTaskResult(self.id, self.name, task_id, short_status)\n\n def sigterm_handler(self, signum, frame):\n self.sigterm_received = True\n\n def __init__(self, suite, _id):\n self.sigterm_received = False\n signal.signal(signal.SIGTERM, lambda x, y, z=self:\n z.sigterm_handler(x, y))\n\n self.initialized = False\n self.server = None\n self.inspector = None\n\n self.id = _id\n self.suite = suite\n self.name = '%03d_%s' % (self.id, self.suite.suite_path)\n\n main_vardir = self.suite.ini['vardir']\n self.suite.ini['vardir'] = os.path.join(main_vardir, self.name)\n\n self.reproduce_file = get_reproduce_file(self.name)\n safe_makedirs(os.path.dirname(self.reproduce_file))\n\n color_stdout.queue_msg_wrapper = self.wrap_output\n\n self.last_task_done = True\n self.last_task_id = -1\n\n try:\n self.server = suite.gen_server()\n self.inspector = suite.start_server(self.server)\n self.initialized = True\n except KeyboardInterrupt:\n self.report_keyboard_interrupt()\n self.stop_server(cleanup=False)\n except Exception as e:\n color_stdout('Worker \"%s\" cannot start bee server; '\n 'the tasks will be ignored...\\n' % self.name,\n schema='error')\n color_stdout(\"The raised exception is '%s' of type '%s'.\\n\"\n % (str(e), str(type(e))), schema='error')\n color_stdout('Worker \"%s\" received the following error:\\n'\n % self.name + traceback.format_exc() + '\\n',\n schema='error')\n self.stop_server(cleanup=False)\n\n def stop_server(self, rais=True, cleanup=True, silent=True):\n try:\n self.suite.stop_server(self.server, self.inspector, silent=silent,\n cleanup=cleanup)\n except (KeyboardInterrupt, Exception):\n if rais:\n raise\n\n # XXX: What if KeyboardInterrupt raised inside task_queue.get() and 'stop\n # worker' marker readed from the queue, but not returned to us?\n def task_get(self, task_queue):\n self.last_task_done = False\n self.last_task_id = task_queue.get()\n return self.last_task_id\n\n @staticmethod\n def is_joinable(task_queue):\n return 'task_done' in task_queue.__dict__.keys()\n\n def task_done(self, task_queue):\n if Worker.is_joinable(task_queue):\n task_queue.task_done()\n self.last_task_done = True\n\n def find_task(self, task_id):\n for cur_task in self.suite.tests:\n if cur_task.id == task_id:\n return cur_task\n raise ValueError('Cannot find test: %s' % str(task_id))\n\n # Note: it's not exception safe\n def run_task(self, task_id):\n if not self.initialized:\n return self.done_marker()\n try:\n task = self.find_task(task_id)\n with open(self.reproduce_file, 'a') as f:\n f.write('- ' + yaml.safe_dump(task.id))\n short_status = self.suite.run_test(\n task, self.server, self.inspector)\n except KeyboardInterrupt:\n self.report_keyboard_interrupt()\n raise\n except Exception as e:\n color_stdout(\n 'Worker \"%s\" received the following error; stopping...\\n'\n % self.name + traceback.format_exc() + '\\n', schema='error')\n raise\n return short_status\n\n def run_loop(self, task_queue, result_queue):\n \"\"\" called from 'run_all' \"\"\"\n while True:\n task_id = self.task_get(task_queue)\n # None is 'stop worker' marker\n if task_id is None:\n color_log('Worker \"%s\" exhausted task queue; '\n 'stopping the server...\\n' % self.name,\n schema='test_var')\n self.stop_worker(task_queue, result_queue)\n break\n short_status = self.run_task(task_id)\n result_queue.put(self.wrap_result(task_id, short_status))\n if not lib.Options().args.is_force and short_status == 'fail':\n color_stdout(\n 'Worker \"%s\" got failed test; stopping the server...\\n'\n % self.name, schema='test_var')\n raise VoluntaryStopException()\n if self.sigterm_received:\n color_stdout('Worker \"%s\" got signal to terminate; '\n 'stopping the server...\\n' % self.name,\n schema='test_var')\n raise VoluntaryStopException()\n self.task_done(task_queue)\n\n def run_all(self, task_queue, result_queue):\n if not self.initialized:\n self.flush_all_tasks(task_queue, result_queue)\n result_queue.put(self.done_marker())\n return\n\n try:\n self.run_loop(task_queue, result_queue)\n except (KeyboardInterrupt, Exception):\n self.stop_worker(task_queue, result_queue, cleanup=False)\n\n result_queue.put(self.done_marker())\n\n def stop_worker(self, task_queue, result_queue, cleanup=True):\n try:\n if not self.last_task_done:\n self.task_done(task_queue)\n self.flush_all_tasks(task_queue, result_queue)\n self.stop_server(cleanup=cleanup)\n except (KeyboardInterrupt, Exception):\n pass\n\n def flush_all_tasks(self, task_queue, result_queue):\n \"\"\" A queue flusing is necessary only for joinable queue (when runner\n controlling workers with using join() on task queues), so doesn't\n used in the current test-run implementation.\n \"\"\"\n if not Worker.is_joinable(task_queue):\n return\n\n # None is 'stop worker' marker\n while self.last_task_id is not None:\n task_id = self.task_get(task_queue)\n result_queue.put(self.wrap_result(task_id, 'not_run'))\n self.task_done(task_queue)\n","sub_path":"test-run/lib/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":11836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347766003","text":"################################################################################\n# This file can only be run if the corresponding deltazero_energy.py script #\n# has been run first! Otherwise, 'observables_Emin' type file below will not #\n# exist and the script will fail. #\n################################################################################\n\nimport numpy as np\nimport subprocess\nimport sys\nimport time\nsys.path.append('../../scripts/')\nfrom singlerun import SingleRun\nfrom readparams import ReadParams\n\n\n\n\nif __name__==\"__main__\":\n\n\n\n start_time = time.time()\n \n FAILED_E = 1e300\n\n scan = {}\n \n loadsuf=savesuf=[\"K_{33}\",\"k_{24}\",\"\\\\Lambda\",\"\\\\omega\",\"\\\\gamma_s\"]\n\n scan_dir = \"scanforward\"\n\n # first, load the minimum for delta = 0 case, so you know the upper bound for\n # the energy minimum.\n\n rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)\n\n run = SingleRun(rp)\n\n\n strains = np.linspace(0,0.02,num=201,endpoint=True)\n\n\n for i,u in enumerate(strains):\n\n if i == 0:\n \n # for the zero strain case, I need to determine what eta_eq is,\n # so I run the full 3 variable (R,eta,delta) minimization.\n \n executable = \"../../../bin/full3var_onerun\"\n \n else:\n \n executable = \"../../../bin/delta1var_onerun\"\n scan['etaguess'] = str(eta_eq/(1+u))\n scan['Rguess'] = str(R_eq/np.sqrt(1+u))\n\n \n\n # read in file name info\n rp = ReadParams(scan=scan,loadsuf=loadsuf,savesuf=savesuf)\n \n # create a class to do calculations with current parameters in scan.\n run = SingleRun(rp,scan_dir=scan_dir,executable=executable)\n\n # run C executable.\n run.run_exe()\n\n # move file written by C executable from temporary data path to true data path\n run.mv_file(f'observables')\n\n\n # load the final values of E, R, eta, delta, and surface twist.\n Ei,Ri,etai,deltai,surftwisti = run.get_all_observables('observables',str2float=True)\n\n if i == 0:\n\n # again, if the strain is zero, then I have just determined the equilibrium\n # inverse d band spacing, which I now need to set (and do so below).\n\n eta_eq = etai\n R_eq = Ri\n\n run.concatenate_observables(None,externalparam=u)\n\n # now just adjust my guess for delta\n \n deltaguess = str(deltai)\n\n\n\n if not (np.isnan(float(deltaguess))\n or abs(float(deltaguess))<1e-5):\n scan['deltaguess'] = deltaguess\n scan['deltaupper'] = '0.818'\n \n if float(deltaguess) < 0.81:\n scan['deltalower'] = str(0.95*float(deltaguess))\n else:\n scan['deltalower'] = '0.81'\n\n print(f\"Took {(time.time()-start_time)/3600} hours to complete.\")\n","sub_path":"gradient_descent_new/experiments/2019-03-01/pull-on-fibril-1d-relaxation/pulling.py","file_name":"pulling.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"605357106","text":"# coding=UTF-8\n\nimport time\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver import ActionChains\nfrom selenium.webdriver.support import expected_conditions\nfrom selenium.webdriver.common.by import By\n# from selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import WebDriverWait\nimport re\n\nfrom tests.base import Component\n\n\n# custom expected condition\nclass text_to_change(object):\n def __init__(self, xpath, text):\n self.locator = (By.XPATH, xpath)\n self.before_text = text\n\n def __call__(self, driver):\n text = expected_conditions._find_element(driver, self.locator).text\n return text != self.before_text\n\n\nclass ContentEdit(Component):\n\n IFRAME = \"//div[@class='composeEditorFrame']//iframe\"\n BASE = \"//body[@id='tinymce']\"\n\n BASE_BTN = \"//span[contains(@class, 'mce_{}')]\"\n BOLD_BTN = BASE_BTN.format('bold')\n ITALIC_BTN = BASE_BTN.format('italic')\n UNDERLINE_BTN = BASE_BTN.format('underline')\n TEXT_COLOR_BTN = BASE_BTN.format('forecolor')\n BACK_COLOR_BTN = BASE_BTN.format('backcolor')\n FONT_BTN = BASE_BTN.format('fontactions')\n ALIGN_BTN = BASE_BTN.format('justifyselect')\n INDENT_BTN = BASE_BTN.format('textindentactions')\n LIST_BTN = BASE_BTN.format('bullistactions')\n EMOTIONS_BTN = BASE_BTN.format('emotions')\n UNDO_BTN = BASE_BTN.format('undo')\n REDO_BTN = BASE_BTN.format('redo')\n SPELLING_BTN = BASE_BTN.format('appspelling')\n TRANSLATE_BTN = BASE_BTN.format('apptransfer')\n MORE_ACTIONS_BTN = \"//a[contains(@class, 'mce_moreactions')]\"\n MINIMIZE_TOOLBAR_BTN = \"//a[contains(@class, 'mce_enableTextEditor')]\"\n MAXIMIZE_TOOLBAR_BTN = \"//a[contains(@class, 'mce_enableHTMLEditor')]\"\n\n TEXT_COLOR_PICK_BTN = \"//div[contains(@class, 'mce_forecolor')]//a[@_mce_color='{}']\"\n BACK_COLOR_PICK_BTN = \"//div[contains(@class, 'mce_backcolor')]//a[@_mce_color='{}']\"\n FONT_PICK_BTN = \"//div[contains(@class, 'mce_fontactions_menu')]//a[@id='mce_{}_aria']\"\n ALIGN_PICK_BTN = \"//div[contains(@class, 'Justify{}')]/a\"\n INDENT_CHANGE_BTN = \"//div[contains(@class, '{}')]/a\"\n LIST_INSERT_BTN = \"//div[contains(@class, 'Insert{}List')]/a\"\n\n EMOTIONS_TAB_BTN = \"//div[contains(@class, 'mceEmotionsTab0')]\"\n EMOTION_PICK_BTN = \"//img[@class='{}']\"\n EMOTION_TEXT_BTN = \"//img[contains(@src, '{}')]\"\n\n LINE_INSERT_BTN = \"//div[contains(@class, 'InsertHorizontalRule')]/a\"\n LINE_THROUGH_BTN = \"//div[contains(@class, 'Strikethrough')]/a\"\n TRANSLIT_BTN = \"//div[contains(@class, 'mceAppTranslit')]/a\"\n VIRTUAL_KEYBOARD_BTN = \"//div[contains(@class, 'mceAppKeyboard')]/a\"\n REMOVE_FORMAT_BTN = \"//div[contains(@class, 'RemoveFormat')]/a\"\n\n ADD_LINK_BTN = \"//div[contains(@class, 'mceLink')]/a\"\n LINK_HREF_FIELD = \"//div[contains(@class, 'mceLinkMenu')]//input[@name='href']\"\n LINK_TITLE_FIELD = \"//div[contains(@class, 'mceLinkMenu')]//input[@name='title']\"\n LINK_SUBMIT_BTN = \"//div[contains(@class, 'mceLinkMenu')]//input[@type='submit']\"\n\n BIG_TOOLBAR = \"//table[contains(@class, 'mceToolbarRow1')]\"\n SMALL_TOOLBAR = \"//table[contains(@class, 'mceToolbarRow3')]\"\n\n COMPOSE_FRAME_TABLE = \"//div[contains(@class, 'composeEditorFrame')]/table[@class='mlruTmpId{}']\"\n DESIGN_BTN = \"//a[contains(@class, 'mce_design')]\"\n DESIGN_PICK_BTN = \"//div[contains(@class, 'js-decoration_appearance')]//div[contains(@class, 'compose__decoration__slider__item__inner_big')]\"\n DESIGN_CLEAR_BTN = \"//div[contains(@class, 'js-decoration_appearance')]//li[contains(@class, 'js-clear')]/div\"\n CARDS_BTN = \"//a[contains(@class, 'mce_cards')]\"\n CARD_PICK_BTN = \"//div[contains(@class, 'js-decoration_cards')]//div[contains(@class, 'compose__decoration__slider__item__inner_big')]\"\n CARD_CLEAR_BTN = \"//div[contains(@class, 'js-decoration_cards')]//li[contains(@class, 'js-clear')]/div\"\n CARD_ELEM = BASE + \"//img[contains(@src, '/{}.')]\"\n\n SPELLING_CLOSE_BTN = \"//div[contains(@class, 'mdl-btn') and div[text()='Закрыть']]\"\n SPELLING_NO_ERRORS = \"//div[text()='В тексте письма орфографических ошибок не обнаружено.']\"\n SAVE_BTN = \"//div[contains(@class, 'mdl-btn') and div[text()='Сохранить изменения']]\"\n KEYBOARD_KEY_BTN = u\"//form[@name='keyb']//input[@value='{}']\"\n\n def switch_to_edit(self):\n WebDriverWait(self.driver, 5).until(expected_conditions.presence_of_element_located((By.XPATH, self.IFRAME)))\n self.driver.switch_to_frame(self.driver.find_element_by_xpath(self.IFRAME))\n\n def switch_back(self):\n self.driver.switch_to_default_content()\n\n def _clear_edit(self):\n self.driver.find_element_by_xpath(self.BASE).clear()\n\n def clear_edit(self):\n self.switch_to_edit()\n self._clear_edit()\n self.switch_back()\n\n def change_text(self, text):\n self.switch_to_edit()\n self._clear_edit()\n field = self.driver.find_element_by_xpath(self.BASE)\n field.send_keys(text)\n field.click()\n self.switch_back()\n\n def get_text(self):\n self.switch_to_edit()\n text = self.driver.find_element_by_xpath(self.BASE).text\n self.switch_back()\n return text\n\n def send_backspaces(self, num):\n self.switch_to_edit()\n self.driver.find_element_by_xpath(self.BASE).send_keys('\\b' * num)\n self.switch_back()\n\n def select_text(self):\n area = self.driver.find_element_by_xpath(self.BASE)\n ActionChains(self.driver).move_to_element_with_offset(area, 10, 10).double_click().perform()\n\n def add_simple_style(self, style):\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(getattr(self, style.upper() + '_BTN')).click()\n\n def check_tag(self, tag):\n self.switch_to_edit()\n elems = len(self.driver.find_elements_by_xpath(self.BASE + '//' + tag))\n self.switch_back()\n return elems\n\n def check_bold(self):\n return self.check_tag('strong') == 1\n\n def check_italic(self):\n return self.check_tag('em') == 1\n\n def check_elem_style(self, style, value, elem='span', child='//'):\n self.switch_to_edit()\n element = self.driver.find_element_by_xpath(self.BASE + child + elem)\n css = element.value_of_css_property(style)\n self.switch_back()\n return css == value\n\n def check_underline(self):\n return self.check_elem_style('text-decoration', 'underline')\n\n def add_text_color(self, color):\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.TEXT_COLOR_BTN).click()\n self.driver.find_element_by_xpath(self.TEXT_COLOR_PICK_BTN.format(color)).click()\n\n def check_text_color(self, color):\n r, g, b = [int(color[i:i+2], 16) for i in xrange(1, 6, 2)]\n return self.check_elem_style('color', 'rgba({}, {}, {}, 1)'.format(r, g, b))\n\n def add_background_color(self, color):\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.BACK_COLOR_BTN).click()\n self.driver.find_element_by_xpath(self.BACK_COLOR_PICK_BTN.format(color)).click()\n\n def check_background_color(self, color):\n r, g, b = [int(color[i:i+2], 16) for i in xrange(1, 6, 2)]\n return self.check_elem_style('background-color', 'rgba({}, {}, {}, 1)'.format(r, g, b))\n\n def pick_font_size(self, size):\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.FONT_BTN).click()\n self.driver.find_element_by_xpath(self.FONT_PICK_BTN.format(size - 1)).click()\n\n def check_font_size(self, size):\n sizes = {1: 10, 2: 12, 3: 15, 4: 18, 5: 24, 6: 36, 7: 42}\n return self.check_elem_style('font-size', '{}px'.format(sizes[size]))\n\n def pick_font_family(self, fam):\n fonts = {\n 'arial': 8,\n 'arial black': 9,\n 'georgia': 13,\n 'comic sans': 11\n }\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.FONT_BTN).click()\n self.driver.find_element_by_xpath(self.FONT_PICK_BTN.format(fonts[fam])).click()\n\n def check_font_family(self, fam):\n self.switch_to_edit()\n element = self.driver.find_element_by_xpath(self.BASE + '//span')\n css = element.value_of_css_property('font-family')\n self.switch_back()\n return fam in css\n\n def add_align(self, align):\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.ALIGN_BTN).click()\n self.driver.find_element_by_xpath(self.ALIGN_PICK_BTN.format(align.title())).click()\n\n def check_align(self, align):\n return self.check_elem_style('text-align', align, 'div')\n\n def add_indent(self):\n self.driver.find_element_by_xpath(self.INDENT_BTN).click()\n self.driver.find_element_by_xpath(self.INDENT_CHANGE_BTN.format('Indent')).click()\n\n def remove_indent(self):\n self.driver.find_element_by_xpath(self.INDENT_BTN).click()\n self.driver.find_element_by_xpath(self.INDENT_CHANGE_BTN.format('Outdent')).click()\n\n def check_indent(self, i):\n # time.sleep(10)\n return self.check_elem_style('margin-left', '{}px'.format(i * 30), '', '')\n\n def add_text(self, text):\n self.switch_to_edit()\n self.driver.find_element_by_xpath(self.BASE).send_keys(text)\n self.switch_back()\n\n def add_list(self, order):\n self.driver.find_element_by_xpath(self.LIST_BTN).click()\n self.driver.find_element_by_xpath(self.LIST_INSERT_BTN.format(order.title())).click()\n\n def check_list(self, order, num=1):\n order_tag = {'ordered': 'ol', 'unordered': 'ul'}\n list_tag = order_tag[order]\n if self.check_tag(list_tag) == 1 and self.check_tag('li') == num:\n return True\n return False\n\n def add_emotion(self, emotion):\n self.driver.find_element_by_xpath(self.EMOTIONS_BTN).click()\n WebDriverWait(self.driver, 2).until(expected_conditions.visibility_of_element_located((By.XPATH, self.EMOTIONS_TAB_BTN)))\n self.driver.find_element_by_xpath(self.EMOTIONS_TAB_BTN).click()\n WebDriverWait(self.driver, 2).until(expected_conditions.visibility_of_element_located((By.XPATH, self.EMOTION_PICK_BTN.format(emotion))))\n self.driver.find_element_by_xpath(self.EMOTION_PICK_BTN.format(emotion)).click()\n\n def check_emotion(self, emotion):\n self.switch_to_edit()\n found = len(self.driver.find_elements_by_xpath(self.EMOTION_TEXT_BTN.format(emotion)))\n self.switch_back()\n return found >= 1\n\n def undo(self):\n self.switch_to_edit()\n self.driver.find_element_by_xpath(self.BASE).click()\n self.switch_back()\n self.driver.find_element_by_xpath(self.UNDO_BTN).click()\n\n def redo(self):\n self.switch_to_edit()\n self.driver.find_element_by_xpath(self.BASE).click()\n self.switch_back()\n self.driver.find_element_by_xpath(self.REDO_BTN).click()\n\n def add_line(self):\n self.driver.find_element_by_xpath(self.MORE_ACTIONS_BTN).click()\n self.driver.find_element_by_xpath(self.LINE_INSERT_BTN).click()\n\n def add_link(self, href, title):\n self.driver.find_element_by_xpath(self.MORE_ACTIONS_BTN).click()\n self.driver.find_element_by_xpath(self.ADD_LINK_BTN).click()\n self.driver.find_element_by_xpath(self.LINK_HREF_FIELD).clear()\n self.driver.find_element_by_xpath(self.LINK_HREF_FIELD).send_keys(href)\n self.driver.find_element_by_xpath(self.LINK_TITLE_FIELD).send_keys(title)\n self.driver.find_element_by_xpath(self.LINK_SUBMIT_BTN).click()\n\n def translit_text(self):\n text = self.get_text()\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.MORE_ACTIONS_BTN).click()\n self.driver.find_element_by_xpath(self.TRANSLIT_BTN).click()\n self.switch_to_edit()\n WebDriverWait(self.driver, 5).until(text_to_change(self.BASE, text))\n self.switch_back()\n\n def remove_format(self):\n self.switch_to_edit()\n self.select_text()\n self.switch_back()\n self.driver.find_element_by_xpath(self.MORE_ACTIONS_BTN).click()\n self.driver.find_element_by_xpath(self.REMOVE_FORMAT_BTN).click()\n\n def check_line(self):\n return self.check_tag('hr')\n\n def check_link(self, href, title):\n self.switch_to_edit()\n links = self.driver.find_elements_by_xpath(self.BASE + \"/a[@href='{}']\".format(href))\n if len(links) != 1:\n return False\n return links[0].text == title\n\n def check_tags(self):\n return self.check_tag('*')\n\n def minimize_toolbar(self):\n self.driver.find_element_by_xpath(self.MINIMIZE_TOOLBAR_BTN).click()\n\n def maximize_toolbar(self):\n self.driver.find_element_by_xpath(self.MAXIMIZE_TOOLBAR_BTN).click()\n\n def check_toolbar(self):\n return 'big' if self.driver.find_element_by_xpath(self.BIG_TOOLBAR).is_displayed() else 'small'\n\n def pick_theme(self, num):\n self.driver.find_element_by_xpath(self.DESIGN_BTN).click()\n themes = self.driver.find_elements_by_xpath(self.DESIGN_PICK_BTN)\n themes[num].click()\n self.driver.find_element_by_xpath(self.DESIGN_BTN).click()\n\n def delete_theme(self):\n self.driver.find_element_by_xpath(self.DESIGN_BTN).click()\n self.driver.find_element_by_xpath(self.DESIGN_CLEAR_BTN).click()\n self.driver.find_element_by_xpath(self.DESIGN_BTN).click()\n\n def check_theme(self, num):\n themes = self.driver.find_elements_by_xpath(self.DESIGN_PICK_BTN)\n url = themes[num].value_of_css_property('background-image')\n internal_num = re.search('([0-9]+)\\.[a-z]+\\\"?\\)$', url).group(1)\n elems = self.driver.find_elements_by_xpath(self.COMPOSE_FRAME_TABLE.format(internal_num))\n return len(elems) > 0\n\n def pick_card(self, num):\n self.driver.find_element_by_xpath(self.CARDS_BTN).click()\n cards = self.driver.find_elements_by_xpath(self.CARD_PICK_BTN)\n cards[num].click()\n self.driver.find_element_by_xpath(self.CARDS_BTN).click()\n\n def delete_card(self):\n self.driver.find_element_by_xpath(self.CARDS_BTN).click()\n self.driver.find_element_by_xpath(self.CARD_CLEAR_BTN).click()\n self.driver.find_element_by_xpath(self.CARDS_BTN).click()\n\n def check_card(self, num):\n cards = self.driver.find_elements_by_xpath(self.CARD_PICK_BTN)\n url = cards[num].value_of_css_property('background-image')\n internal_num = re.search('([0-9]+)i\\.[a-z]+\\\"?\\)$', url)\n internal_num = internal_num.group(1)\n self.switch_to_edit()\n elems = self.driver.find_elements_by_xpath(self.CARD_ELEM.format(internal_num))\n self.switch_back()\n return len(elems) > 0\n\n def check_spelling(self):\n main_window_handle = None\n while not main_window_handle:\n main_window_handle = self.driver.current_window_handle\n self.driver.find_element_by_xpath(self.SPELLING_BTN).click()\n spelling_window_handle = None\n while not spelling_window_handle:\n for handle in self.driver.window_handles:\n if handle != main_window_handle:\n spelling_window_handle = handle\n break\n self.driver.switch_to.window(spelling_window_handle)\n text = self.driver.find_elements_by_xpath(self.SPELLING_NO_ERRORS)\n self.driver.find_element_by_xpath(self.SPELLING_CLOSE_BTN).click()\n self.driver.switch_to.window(main_window_handle)\n return len(text) == 1\n\n def translate(self):\n main_window_handle = None\n while not main_window_handle:\n main_window_handle = self.driver.current_window_handle\n self.driver.find_element_by_xpath(self.TRANSLATE_BTN).click()\n spelling_window_handle = None\n while not spelling_window_handle:\n for handle in self.driver.window_handles:\n if handle != main_window_handle:\n spelling_window_handle = handle\n break\n self.driver.switch_to.window(spelling_window_handle)\n self.driver.find_element_by_xpath(self.SAVE_BTN).click()\n self.driver.switch_to.window(main_window_handle)\n\n def virtual_keyboard_type(self, text):\n main_window_handle = None\n while not main_window_handle:\n main_window_handle = self.driver.current_window_handle\n self.driver.find_element_by_xpath(self.MORE_ACTIONS_BTN).click()\n self.driver.find_element_by_xpath(self.VIRTUAL_KEYBOARD_BTN).click()\n spelling_window_handle = None\n while not spelling_window_handle:\n for handle in self.driver.window_handles:\n if handle != main_window_handle:\n spelling_window_handle = handle\n break\n self.driver.switch_to.window(spelling_window_handle)\n for ch in text:\n self.driver.find_element_by_xpath(self.KEYBOARD_KEY_BTN.format(ch)).click()\n self.driver.find_element_by_xpath(self.SAVE_BTN).click()\n self.driver.switch_to.window(main_window_handle)","sub_path":"tests/components/content_edit.py","file_name":"content_edit.py","file_ext":"py","file_size_in_byte":17583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"178650010","text":"import os\nimport urllib\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\nimport jinja2\nimport webapp2\n\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'])\n\nclass HomePage(webapp2.RequestHandler):\n\n def get(self):\n\n template = JINJA_ENVIRONMENT.get_template('index.html')\n self.response.write(template.render())\n\nclass About(webapp2.RequestHandler):\n\n def get(self):\n\n template = JINJA_ENVIRONMENT.get_template('about.html')\n self.response.write(template.render())\n\nclass Services(webapp2.RequestHandler):\n\n def get(self):\n\n template = JINJA_ENVIRONMENT.get_template('services.html')\n self.response.write(template.render())\n\nclass Faq(webapp2.RequestHandler):\n\n def get(self):\n\n template = JINJA_ENVIRONMENT.get_template('faq.html')\n self.response.write(template.render())\n\nclass Contact(webapp2.RequestHandler):\n\n def get(self):\n\n template = JINJA_ENVIRONMENT.get_template('contact.html')\n self.response.write(template.render())\n\napplication = webapp2.WSGIApplication([\n ('/', HomePage),\n ('/about', About),\n ('/services', Services),\n ('/faqs', Faq),\n ('/contact', Contact),\n], debug=True)","sub_path":"gicareers.py","file_name":"gicareers.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"437408778","text":"import re\nfrom abc import ABC, abstractmethod\nfrom sys import argv as arguments\n\n\nclass ArgumentHandler:\n class InvalidNumberOfArgumentsException(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n def __init__(self, arguments: list) -> None:\n self.__arguments = arguments\n if not self.__arguments_are_valid():\n raise ArgumentHandler.InvalidNumberOfArgumentsException(\n \"There must be one argument indicating filename for the program. Entered: {}.\".format(len(self.__arguments) - 1))\n\n def __arguments_are_valid(self) -> bool:\n return len(self.__arguments) == 2\n\n @property\n def input_filename(self) -> str:\n return self.__arguments[1]\n\n\nclass InputFileHandler:\n\n def __init__(self, filename: str) -> None:\n with open(filename, \"r\", encoding=\"utf-8\") as input_file:\n self.__raw_content = input_file.read()\n\n @property\n def commands(self):\n return self.__raw_content.split(\"\\n\")\n\n\nclass CommandHandler:\n\n def __init__(self, commands: list) -> None:\n self.commands = commands\n\n @staticmethod\n def operation_dict(args: str) -> dict:\n \"\"\"Returns a dictionary containing command objects for specified arguments.\"\"\"\n return {\n Command.CREATE_HALL: CreateHall(args),\n Command.SELL_TICKET: SellTicket(args),\n Command.CANCEL_TICKET: CancelTicket(args),\n Command.BALANCE: Balance(args),\n Command.SHOW_HALL: ShowHall(args)\n }\n\n def execute_all(self) -> None:\n for command in self.commands:\n try:\n args = Command.get_command_arg_as_str(command)\n CommandHandler.operation_dict(args)[Command.get_command_base(command).upper()].execute()\n except (KeyError, IndexError):\n if len(command) != 0:\n Output.print(\"The command \\'{}\\' is invalid.\".format(command), prefix=Output.Prefix.ERROR)\n except (Command.InvalidCommandException, Command.BadArgumentException) as e:\n Output.print(e.message, prefix=Output.Prefix.ERROR)\n except Command.IllegalOperationException as iae2:\n Output.print(iae2.message, prefix=Output.Prefix.WARNING)\n\n\nclass Command:\n CREATE_HALL = \"CREATEHALL\"\n SELL_TICKET = \"SELLTICKET\"\n CANCEL_TICKET = \"CANCELTICKET\"\n BALANCE = \"BALANCE\"\n SHOW_HALL = \"SHOWHALL\"\n\n class InvalidCommandException(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n class BadArgumentException(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n class IllegalOperationException(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n @staticmethod\n def get_command_base(command: str) -> str:\n return command.split()[0]\n\n @staticmethod\n def get_command_arg_as_str(command: str) -> str:\n return \" \".join(command.split()[1:])\n\n\nclass HallException:\n class HallDoesNotExistException(Command.IllegalOperationException):\n pass\n\n class HallColumnOutOfRangeException(Command.BadArgumentException):\n pass\n\n class HallRowOutOfRangeException(Command.BadArgumentException):\n pass\n\n class SeatAlreadyOccupiedException(Command.IllegalOperationException):\n pass\n\n class HallAlreadyCreatedException(Command.IllegalOperationException):\n pass\n\n\nclass BaseCommand(ABC):\n \"\"\"An abstract class for all command objects.\"\"\"\n\n @abstractmethod\n def execute(self):\n pass\n\n\nclass CreateHall(BaseCommand):\n REGEX = r\"[^ ]+ \\d+x\\d+\"\n\n def __init__(self, args: str) -> None:\n self.args = args\n\n def check_validity(self) -> None:\n arg_list = self.args.split()\n if re.fullmatch(CreateHall.REGEX, self.args) is not None:\n hall_name_arg = arg_list[0]\n hall_size_arg = arg_list[1]\n rows = int(hall_size_arg.split(\"x\")[0])\n columns = int(hall_size_arg.split(\"x\")[1])\n\n if rows > 26:\n raise Command.BadArgumentException(\"Number of rows cannot be greater than 26. Entered: {}.\".format(int(rows)))\n\n if rows == 0:\n raise Command.BadArgumentException(\"Number of rows cannot be 0.\")\n\n if columns == 0:\n raise Command.BadArgumentException(\"Number of columns cannot be 0.\")\n\n for hall in Cinema.halls.values():\n if hall.name == hall_name_arg:\n raise HallException.HallAlreadyCreatedException(\"Cannot create the hall \\'{0}\\' for a second time. Cinema already has {0}.\".format(hall.name))\n else:\n if len(arg_list) == 2:\n hall_size_arg = arg_list[1]\n if \"x\" in hall_size_arg:\n hall_size_arg_list = hall_size_arg.split(\"x\")\n if len(hall_size_arg_list) > 2:\n raise Command.BadArgumentException(\"Cannot create a hall that has three or more dimensions.\")\n rows = hall_size_arg_list[0]\n columns = hall_size_arg_list[1]\n if not rows.isdigit():\n raise Command.BadArgumentException(\"Row size \\'{}\\' must be a positive integer.\".format(rows))\n if not columns.isdigit():\n raise Command.BadArgumentException(\"Column size \\'{}\\' must be a positive integer.\".format(columns))\n else:\n raise Command.BadArgumentException(\"Second argument must contain \\'x\\' character in order to construct hall plan.\")\n else:\n raise Command.BadArgumentException(\"Number of arguments for CREATEHALL must be 2, which is {} {} than current one.\".format(\n abs(2 - len(arg_list)), \"less\" if len(arg_list) > 2 else \"more\"))\n\n def get_args(self) -> tuple:\n self.check_validity()\n return self.args.split()[0], self.args.split()[1]\n\n def execute(self) -> None:\n hall_name_arg, hall_size_arg = self.get_args()\n hall = Hall(hall_name_arg, int(hall_size_arg.split(\"x\")[0]), int(hall_size_arg.split(\"x\")[1]))\n Cinema.halls.update({hall.name: hall}) # Adds a new hall object into the dictionary.\n Output.print(\"The hall \\'{}\\' having {} seat{} have been successfully created.\".format(hall.name, hall.hall_size, \"s\" if hall.hall_size > 1 else \"\"),\n prefix=Output.Prefix.SUCCESS)\n\n\nclass SellTicket(BaseCommand):\n REGEX = r\"[^ ]+ (full|student) [^ ]+ .+\"\n\n SEAT_NAME_REGEX = r\"(([a-z]|[A-Z])(\\d+))|((([a-z]|[A-Z])(\\d+)-(\\d+)))\"\n\n def __init__(self, args: str) -> None:\n self.args = args\n\n @property\n def customer_name(self):\n return self.args.split()[0]\n\n @property\n def customer_fare(self):\n return self.args.split()[1]\n\n @property\n def hall_name(self):\n return self.args.split()[2]\n\n @property\n def seat_arguments(self):\n return self.args.split()[3:]\n\n def sell(self, seat_arg: str) -> None:\n customer = Customer(self.customer_name, self.customer_fare)\n if Seat.is_seat_range(seat_arg):\n for absolute_seat in SellTicket.get_absolute_seats(seat_arg):\n position = (Util.get_letter_index(absolute_seat[0]), int(absolute_seat[1:]))\n Cinema.halls[self.hall_name].add_customer(position, customer)\n else:\n position = (Util.get_letter_index(seat_arg[0]), int(seat_arg[1:]))\n Cinema.halls[self.hall_name].add_customer(position, customer)\n Output.print(\"{} has bought {} at {}.\".format(self.customer_name, seat_arg, self.hall_name), prefix=Output.Prefix.SUCCESS)\n\n def execute(self) -> None:\n self.check_command_validity()\n self.start_selling()\n\n def start_selling(self) -> None:\n \"\"\"Treats every seat individually via try-except.\"\"\"\n for seat_arg in self.seat_arguments:\n try:\n self.check_seat_arg_validity(seat_arg)\n self.sell(seat_arg)\n except (HallException.HallColumnOutOfRangeException, HallException.HallRowOutOfRangeException, Command.BadArgumentException) as e:\n Output.print(e.message, prefix=Output.Prefix.ERROR)\n except HallException.SeatAlreadyOccupiedException as sao:\n Output.print(sao.message, prefix=Output.Prefix.WARNING)\n\n def check_seat_arg_validity(self, seat_arg: str) -> None:\n if re.fullmatch(SellTicket.SEAT_NAME_REGEX, seat_arg) is None:\n raise Command.BadArgumentException(\"The seat argument \\'{}\\' is invalid.\".format(seat_arg))\n\n seat_matrix = Cinema.halls[self.hall_name].seat_matrix\n\n row_index = Util.get_letter_index(seat_arg[0])\n\n if row_index >= Cinema.halls[self.hall_name].rows:\n raise HallException.HallRowOutOfRangeException(\n \"The hall \\'{}\\' has less row than the specified row {}.\".format(self.hall_name, seat_arg[0]))\n\n if Seat.is_seat_range(seat_arg):\n seat_column_slice = seat_arg[1:].split(\"-\")\n seat_column_start = int(seat_column_slice[0])\n seat_column_end = int(seat_column_slice[1])\n\n if seat_column_start >= seat_column_end:\n raise Command.BadArgumentException(\n \"Cannot sell seats \\'{}\\', because first index of seat range cannot be greater than or equal to last index.\".format(seat_arg))\n\n if seat_column_end >= len(seat_matrix[row_index]) + 1:\n raise HallException.HallColumnOutOfRangeException(\n \"The hall \\'{}\\' has less column than the specified index in {}.\".format(self.hall_name, seat_arg))\n\n if False in [(seat.customer is None) or seat.customer.name == self.customer_name for seat in\n [seat_matrix[row_index][c] for c in range(seat_column_start, seat_column_end)]]:\n raise HallException.SeatAlreadyOccupiedException(\n \"The seats {} at {} cannot be sold to {}, because some of them have already been sold.\".format(seat_arg, self.hall_name, self.customer_name))\n else:\n column_index = int(seat_arg[1:])\n\n if column_index >= len(seat_matrix[row_index]):\n raise HallException.HallColumnOutOfRangeException(\n \"The hall \\'{}\\' has less column than the specified index in {}.\".format(self.hall_name, seat_arg))\n\n seat = seat_matrix[row_index][column_index]\n if seat.customer is not None:\n raise HallException.SeatAlreadyOccupiedException(\n \"The seat {} at {} cannot be sold to {}, because they already bought it.\".format(seat_arg, self.hall_name, self.customer_name)\n if seat.customer.name == self.customer_name\n else \"The seat {} at {} cannot be sold to {} because it already been sold to {}.\".format(seat_arg, self.hall_name, self.customer_name, seat.customer.name))\n\n def check_command_validity(self) -> None:\n arg_list = self.args.split()\n if re.fullmatch(SellTicket.REGEX, self.args) is not None:\n if self.hall_name not in Cinema.halls.keys():\n raise HallException.HallDoesNotExistException(\n \"Cannot sell ticket to {} because the hall {} does not exist.\".format(self.customer_name, self.hall_name))\n else:\n if len(arg_list) < 4:\n raise Command.InvalidCommandException(\"The command SELLTICKET must have at least 4 arguments. Only {} entered.\".format(len(arg_list)))\n if arg_list[1].lower() != Customer.FARE_STUDENT and arg_list[1].lower() != Customer.FARE_FULL:\n raise Command.BadArgumentException(\"Second argument of SELLTICKET must indicate fare: full or student.\")\n raise Command.InvalidCommandException(\"The command \\\"SELLTICKET {}\\\" is invalid.\".format(self.args))\n\n @staticmethod\n def get_absolute_seats(seat_arg: str) -> list:\n \"\"\"\n Returns absolute seat list for provided seat range.\n For example, for seat range A3-6, returns ['A3', 'A4', 'A5'].\n \"\"\"\n abs_seats = []\n if \"-\" in seat_arg:\n row_letter = seat_arg[0].upper()\n seat_column_slice = seat_arg[1:].split(\"-\")\n seat_column_start = int(seat_column_slice[0])\n seat_column_end = int(seat_column_slice[1])\n abs_seats.extend([row_letter + str(column) for column in range(seat_column_start, seat_column_end)])\n else:\n abs_seats.append(seat_arg)\n return abs_seats\n\n\nclass CancelTicket(BaseCommand):\n REGEX = r\"[^ ]+ .+\"\n\n SEAT_NAME_REGEX = r\"(([a-z]|[A-Z])(\\d+))|((([a-z]|[A-Z])(\\d+)-(\\d+)))\"\n\n def __init__(self, args: str) -> None:\n self.args = args\n\n def execute(self):\n self.check_command_validity()\n self.start_cancelling()\n\n def cancel(self, seat_arg: str) -> None:\n already_free = False\n seat_range_all_occupied = False\n seats_already_free = []\n is_seat_range = Seat.is_seat_range(seat_arg)\n absolute_seats = SellTicket.get_absolute_seats(seat_arg)\n\n if is_seat_range:\n for absolute_seat in absolute_seats:\n position = (Util.get_letter_index(absolute_seat[0]), int(absolute_seat[1:]))\n if self.hall.get_seat(position).customer is None:\n seats_already_free.append(absolute_seat)\n if len(seats_already_free) == len(absolute_seats):\n already_free = True\n if len(seats_already_free) == 0:\n seat_range_all_occupied = True\n else:\n position = (Util.get_letter_index(seat_arg[0]), int(seat_arg[1:]))\n if self.hall.get_seat(position).customer is None:\n already_free = True\n\n if not already_free:\n if is_seat_range:\n if seat_range_all_occupied:\n for absolute_seat in absolute_seats:\n position = (Util.get_letter_index(absolute_seat[0]), int(absolute_seat[1:]))\n self.hall.remove_customer(position)\n Output.print(\"The ticket for seat range {} at {} has been cancelled.\".format(seat_arg, self.hall_name), prefix=Output.Prefix.SUCCESS)\n else:\n for absolute_seat in absolute_seats:\n position = (Util.get_letter_index(absolute_seat[0]), int(absolute_seat[1:]))\n if self.hall.get_seat(position).customer is None:\n Output.print(\"The seat {} at {} is already free. Nothing to cancel.\".format(absolute_seat, self.hall_name), prefix=Output.Prefix.ERROR)\n else:\n self.hall.remove_customer(position)\n Output.print(\"The ticket for seat {} at {} has been cancelled.\".format(absolute_seat, self.hall_name), prefix=Output.Prefix.SUCCESS)\n else:\n position = (Util.get_letter_index(seat_arg[0]), int(seat_arg[1:]))\n self.hall.remove_customer(position)\n Output.print(\"The ticket for seat {} at {} has been cancelled.\".format(seat_arg, self.hall_name), prefix=Output.Prefix.SUCCESS)\n else:\n message_already_free = \"The seat{} {} at {} is already free. Nothing to cancel.\".format(\"s\" if is_seat_range else \"\", seat_arg, self.hall_name)\n Output.print(message_already_free, prefix=Output.Prefix.SUCCESS)\n\n def start_cancelling(self) -> None:\n for seat_arg in self.seat_arguments:\n try:\n self.check_seat_arg_validity(seat_arg)\n self.cancel(seat_arg)\n except (HallException.HallColumnOutOfRangeException, HallException.HallRowOutOfRangeException, Command.BadArgumentException) as e:\n Output.print(e.message, prefix=Output.Prefix.ERROR)\n\n def check_seat_arg_validity(self, seat_arg: str) -> None:\n if re.fullmatch(CancelTicket.SEAT_NAME_REGEX, seat_arg) is None:\n raise Command.BadArgumentException(\"The seat argument \\'{}\\' is invalid.\".format(seat_arg))\n\n seat_matrix = Cinema.halls[self.hall_name].seat_matrix\n\n row_index = Util.get_letter_index(seat_arg[0])\n\n if row_index >= Cinema.halls[self.hall_name].rows:\n raise HallException.HallRowOutOfRangeException(\n \"The hall \\'{}\\' has less row than the specified row {}.\".format(self.hall_name, seat_arg[0]))\n\n if Seat.is_seat_range(seat_arg):\n seat_column_slice = seat_arg[1:].split(\"-\")\n seat_column_start = int(seat_column_slice[0])\n seat_column_end = int(seat_column_slice[1])\n if seat_column_start >= seat_column_end:\n raise Command.BadArgumentException(\n \"Cannot cancel tickets for seats \\'{}\\', because first index of seat range cannot be greater than or equal to last index.\".format(seat_arg))\n\n if seat_column_end >= len(seat_matrix[row_index]) + 1:\n raise HallException.HallColumnOutOfRangeException(\n \"The hall \\'{}\\' has less column than the specified index in {}.\".format(self.hall_name, seat_arg))\n else:\n column_index = int(seat_arg[1:])\n if column_index >= len(seat_matrix[row_index]):\n raise HallException.HallColumnOutOfRangeException(\n \"The hall \\'{}\\' has less column than the specified index in {}.\".format(self.hall_name, seat_arg))\n\n @property\n def hall_name(self):\n return self.args.split()[0]\n\n @property\n def hall(self):\n return Cinema.halls[self.hall_name]\n\n @property\n def seat_arguments(self):\n return self.args.split()[1:]\n\n def check_command_validity(self):\n arg_list = self.args.split()\n if re.fullmatch(CancelTicket.REGEX, self.args) is not None:\n hall_name = arg_list[0]\n seat_args = arg_list[1:]\n\n if hall_name not in Cinema.halls.keys():\n raise HallException.HallDoesNotExistException(\n \"Cannot cancel ticket{} because the hall {} does not exist\".format(\"s\" if len(seat_args) > 1 else \"\", hall_name))\n else:\n if len(arg_list) < 2:\n raise Command.InvalidCommandException(\"The command CANCELTICKET must have at least 2 arguments. Only {} entered.\".format(len(arg_list)))\n raise Command.InvalidCommandException(\"The command \\\"CANCELTICKET {}\\\" is invalid.\".format(self.args))\n\n\nclass Balance(BaseCommand):\n PRICE_STUDENT = 5\n PRICE_FULL = 10\n\n def __init__(self, args: str) -> None:\n self.args = args\n\n def execute(self):\n if len(self.hall_names) < 1:\n raise Command.InvalidCommandException(\"The command BALANCE must include at least one argument.\")\n for name in self.hall_names:\n if name in Cinema.halls.keys():\n title = \"Hall report of \\'{}\\'\".format(name)\n Output.print(title, prefix=Output.Prefix.SUCCESS)\n Output.print(\"-\" * len(title), prefix=\" \" * Output.Prefix.PADDING)\n Output.print(Cinema.halls[name].balance)\n else:\n Output.print(\"Cannot show hall report of \\'{}\\' because the hall does not exist.\".format(name), prefix=Output.Prefix.ERROR)\n\n @property\n def hall_names(self):\n return self.args.split()\n\n\nclass ShowHall(BaseCommand):\n\n def __init__(self, args: str) -> None:\n self.args = args\n\n def execute(self):\n if len(self.hall_names) < 1:\n raise Command.InvalidCommandException(\"The command SHOWHALL must include at least one argument.\")\n for name in self.hall_names:\n if name in Cinema.halls.keys():\n Output.print(\"Showing hall \\'{}\\' below:\".format(name), prefix=Output.Prefix.SUCCESS)\n Output.print(str(Cinema.halls[name]))\n else:\n Output.print(\"Cannot show hall \\'{}\\' because it does not exist.\".format(name), prefix=Output.Prefix.ERROR)\n\n @property\n def hall_names(self):\n return self.args.split()\n\n\nclass Customer:\n FARE_FULL = \"full\"\n FARE_STUDENT = \"student\"\n\n def __init__(self, name: str, fare: str) -> None:\n self.name = name\n self.fare = fare\n\n\nclass Seat:\n SEAT_EMPTY = \"X\" # Personal note: This would have been much kinder to the eyes if this was simply \".\"\n SEAT_STUDENT = \"S\"\n SEAT_FULL = \"F\"\n\n def __init__(self, customer) -> None:\n self.customer = customer\n\n def __str__(self):\n if self.customer is None:\n return Seat.SEAT_EMPTY\n if self.customer.fare == \"student\":\n return Seat.SEAT_STUDENT\n if self.customer.fare == \"full\":\n return Seat.SEAT_FULL\n\n @staticmethod\n def is_seat_range(seat_arg: str) -> bool:\n return \"-\" in seat_arg\n\n\nclass Hall:\n\n def __init__(self, name: str, rows: int, columns: int) -> None:\n self.name = name\n self.rows = rows\n self.columns = columns\n self.seat_matrix = [[Seat(None) for _ in range(self.columns)] for _ in range(self.rows)]\n\n def __str__(self):\n string = \"\"\n padding = len(str(self.columns - 1)) + 1 # Dynamically adds padding according to number of columns.\n padding_from_start = 3 # Output.Prefix.PADDING + 1\n for row in range(self.rows, -1, -1): # Starts from the last, putting row A to the bottom.\n string += \" \" * (padding_from_start - 1)\n if row != self.rows:\n string += Util.LETTERS[row]\n for column in range(self.columns + 1):\n if row != self.rows and column != self.columns:\n string += str(self.seat_matrix[row][column]).rjust(padding if column != 0 else 2)\n string += \"\\n\"\n string += \" \" * padding_from_start + \" 0\" + \"\".join([str(i).rjust(padding) for i in range(1, self.columns)])\n string += \"\\n\"\n return string\n\n @property\n def balance(self) -> str:\n balance = \" \" * Output.Prefix.PADDING\n revenue_student = self.number_of_students * Balance.PRICE_STUDENT\n revenue_full = self.number_of_full * Balance.PRICE_FULL\n balance += \"Revenue from students: {} liras, \".format(revenue_student)\n balance += \"Revenue from full fares: {} liras, \".format(revenue_full)\n balance += \"Overall revenue: {} liras\".format(revenue_full + revenue_student)\n return balance\n\n @property\n def hall_size(self) -> int:\n return self.rows * self.columns\n\n @property\n def number_of_students(self) -> int:\n n = 0\n for row_index in range(len(self.seat_matrix)):\n for column_index in range(len(self.seat_matrix[row_index])):\n seat = self.seat_matrix[row_index][column_index]\n if seat.customer is not None and seat.customer.fare == Customer.FARE_STUDENT:\n n += 1\n return n\n\n @property\n def number_of_full(self) -> int:\n n = 0\n for row_index in range(len(self.seat_matrix)):\n for column_index in range(len(self.seat_matrix[row_index])):\n seat = self.seat_matrix[row_index][column_index]\n if seat.customer is not None and seat.customer.fare == Customer.FARE_FULL:\n n += 1\n return n\n\n def add_customer(self, position: tuple, customer: Customer) -> None:\n self.seat_matrix[position[0]][position[1]] = Seat(customer)\n\n def remove_customer(self, position: tuple) -> None:\n self.seat_matrix[position[0]][position[1]] = Seat(None)\n\n def get_seat(self, position: tuple) -> Seat:\n return self.seat_matrix[position[0]][position[1]]\n\n\nclass Cinema:\n halls = dict() # A dictionary that holds all hall objects.\n\n\nclass Util:\n LETTERS = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n\n @staticmethod\n def get_letter_index(letter) -> int:\n return Util.LETTERS.index(letter.upper())\n\n\nclass Output:\n output_file = open(\"out.txt\", \"w\", encoding=\"utf-8\")\n\n class Prefix: # Respecting example output format.\n PADDING = 9 # 10\n ERROR = \"Error:\".ljust(PADDING) # \"[ERROR]\"\n WARNING = \"Warning:\".ljust(PADDING) # \"[WARNING]\"\n SUCCESS = \"Success:\".ljust(PADDING) # \"[SUCCESS]\"\n\n @staticmethod\n def print(value: object, prefix: str = None) -> None:\n print(str(value) if prefix is None else prefix + str(value))\n print(str(value) if prefix is None else prefix + str(value), file=Output.output_file)\n\n\ndef main() -> None:\n argument_handler = ArgumentHandler(arguments)\n input_file_handler = InputFileHandler(argument_handler.input_filename)\n command_handler = CommandHandler(input_file_handler.commands)\n command_handler.execute_all()\n\n\nif __name__ != \"__main__\":\n Output.print(\"You are trying to run this program by importing as a module.\", prefix=Output.Prefix.WARNING)\n\ntry:\n main()\nexcept ArgumentHandler.InvalidNumberOfArgumentsException as iae:\n Output.print(iae.message, prefix=Output.Prefix.ERROR)\nexcept FileNotFoundError as fnf:\n Output.print(fnf.strerror + \": {}\".format(fnf.filename), prefix=Output.Prefix.ERROR)\nfinally:\n Output.output_file.close()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"488805751","text":"# -*- coding:utf-8 -*-\nimport os\nfrom flask_babel import gettext\n\nNAME = 'DEFAULT'\n\nPROJECT_ROOT = '/'.join(os.path.dirname(os.path.abspath(__file__)).split('/')[:-1])\n\nDEBUG = True\nTESTING = False\n\nSECRET_KEY = 'a862e99349d5d437aac910598fc85102c8ae83747d37a07a'\n\nTEST_DATABASE_NAME = 'sentry_test'\n\nSQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://%s:%s@localhost/%s' % (\n 'sentry',\n '',\n 'sentry'\n)\nSQLALCHEMY_ECHO = True\n\nWTF_CSRF_METHODS = ['POST', 'PUT', 'PATCH', 'DELETE']\n\nLOCALE_KO = 'ko'\nLOCALE_EN = 'en'\nLOCALES = [LOCALE_KO, LOCALE_EN]\nLOCALE_NAMES = {\n LOCALE_KO: gettext(u'한국어'),\n LOCALE_EN: gettext(u'영어'),\n}\n\nDEFAULT_LOCALE = LOCALE_KO\n\nCOUNTRY_KR = 'KR'\nCOUNTRY_US = 'US'\nCOUNTRIES = [COUNTRY_KR, COUNTRY_US]\nCOUNTRY_NAMES = {\n COUNTRY_KR: gettext(u\"한국\"),\n COUNTRY_US: gettext(u\"미국\"),\n}\nDEFAULT_COUNTRY = COUNTRY_KR\n\nLOCALE_TO_COUNTRY = {\n LOCALE_KO: COUNTRY_KR,\n LOCALE_EN: COUNTRY_US,\n}\n\nCOUNTRY_TO_TIMEZONE = {\n COUNTRY_KR: u'Asia/Seoul',\n COUNTRY_US: u'America/New_York',\n}\n\nDEFAULT_MAX_FILE_SIZE = 1024 * 1024 * 100","sub_path":"config/default.py","file_name":"default.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"70664051","text":"import threading\nfrom sklearn.externals import joblib\nfrom kafka import KafkaConsumer, KafkaProducer\nimport json\nimport time\nimport sys\nsys.path.append(\"..\")\nfrom db.mysql import mysql\nfrom config import mysql_args as db_args\n\nclass job(threading.Thread):\n\n def __init__(self, app_id, model_path, app_input, kafka_servers, timeout=60, mysql_args=db_args):\n super(job, self).__init__()\n self.__flag = threading.Event() # 用于暂停线程的标识\n self.__flag.set() # 设置为True\n self.__running = threading.Event() # 用于停止线程的标识\n self.__running.set() # 将running设置为True\n self.app_id = app_id\n self.app_input = {}\n self.timeout = timeout\n self.checkout = int(time.time()) % self.timeout\n #self.kafka_servers = kafka_servers\n self.model = joblib.load(model_path)\n self.consumer = KafkaConsumer('deviceData',\n bootstrap_servers=kafka_servers,\n group_id='app_'+str(self.app_id))\n # enable_auto_commit=False,\n # auto_offset_reset='latest')\n self.producer = KafkaProducer(bootstrap_servers = kafka_servers)\n self.topic = str(app_id)\n for i, item in enumerate(app_input):\n v = self.app_input.setdefault(item['device_id'], {})\n v.setdefault(item['type'], i)\n #print(self.app_input)\n self.mysql_args = mysql_args\n\n def run(self):\n data = [0.] * len(self.app_input)\n db = mysql(**self.mysql_args)\n while self.__running.isSet():\n checkout_tmp = int(time.time()) % self.timeout\n if checkout_tmp != self.checkout:\n self.checkout = checkout_tmp\n sql_select = \"select * from app where app_id = %d and stop > 0\" % (self.app_id)\n if len(list(db.select(sql_select))) > 0:\n print(self.app_id, \" stoped!\")\n db.close()\n break\n # db.close()\n self.__flag.wait() # 为True时立即返回, 为False时阻塞直到内部的标识位为True后返回\n try:\n msg_tmp = list(self.consumer.poll(timeout_ms=5000, max_records=1).values())[0][0]\\\n .value.decode('utf-8').replace('\\'', '\\\"')\n except:\n continue\n #print(list(self.consumer.poll(timeout_ms=5000, max_records=1).values())[0])\n #print(msg_tmp)\n msg_value = json.loads(msg_tmp)\n device_id = msg_value['deviceId']\n if msg_value['deviceId'] in self.app_input:\n for item in msg_value['data']:\n device_type = item['key']\n device_type2id = self.app_input[device_id]\n if device_type in device_type2id:\n data[device_type2id[device_type]] = item['value']\n ret = self.model.predict([data])[0]\n #print(str(ret).encode('utf-8'))\n sql_update = \"update app set timestamp = %d, predict = '%s'\" \\\n \"where app_id = %d\" % (int(time.time()), str(ret), self.app_id)\n db.update(sql_update)\n #self.producer.send(self.topic, str(ret).encode('utf-8'))\n print(self.app_id, \" is running, predict result is \", ret)\n\n def pause(self):\n self.__flag.clear() # 设置为False, 让线程阻塞\n\n def resume(self):\n self.__flag.set() # 设置为True, 让线程停止阻塞\n\n def stop(self):\n self.__flag.set() # 将线程从暂停状态恢复, 如何已经暂停的话\n self.__running.clear() # 设置为False\n\nif __name__ == '__main__':\n #t = job(1,2,3,4)\n import pickle\n print(mysql)\n #pickle.dumps(t)","sub_path":"util/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"601964970","text":"# -*- coding: UTF-8 -*-\nimport numpy\n\n\n# 获取频谱矩阵\ndef getM(dirPosition):\n return numpy.load(dirPosition + \"/numpyDataDir/gcov1.npy\")\n\n\n# 获取测试用例是否通过的结果矩阵\ndef getR(dirPosition):\n return numpy.load(dirPosition + \"/numpyDataDir/answerNumpy.npy\")\n\n\n# 获取只包含未通过的测试用例的矩阵\ndef getMFAndMP(gcovList, answerList, MF, MP):\n for i in range(0, answerList.__len__()):\n if answerList[i] == 0:\n MF.append(gcovList[i])\n else:\n MP.append(gcovList[i])\n\n\n# 获取未通过测试用例的数目\ndef getTF(answerList):\n count = 0\n for item in answerList:\n if item == 0:\n count = count + 1\n return count\n\n\n# 获取fai值\ndef getSum(mf, line):\n temp = 0\n tempList = numpy.zeros(mf.__len__(), dtype=numpy.int)\n for i in range(line.__len__()):\n if line[i] == 1:\n tempList = tempList + mf[:, i]\n for item in tempList:\n if item > 0:\n temp = temp + 1\n return temp\n\n\n# 主函数\n# dirPosition:错误程序所在文件夹路径(例:/home/temp.c则传入/home)\n# 最终会产生一个Inialize.npy文件用于存放产生的初始化种群存放位置在numpyDataDir文件夹中\ndef Inialize(dirPosition):\n M = getM(dirPosition)\n R = getR(dirPosition)\n # M = numpy.array(\n # [[1, 1, 1, 1, 1, 1],\n # [1, 1, 1, 1, 1, 1],\n # [1, 1, 1, 1, 1, 1],\n # [1, 1, 1, 1, 1, 1],\n # [1, 1, 0, 1, 0, 1],\n # [1, 1, 0, 1, 0, 1],\n # [1, 1, 1, 1, 1, 1],\n # [1, 1, 0, 1, 1, 1],\n # [1, 1, 1, 1, 1, 1]])\n # M = M.T\n # R = numpy.array([0, 0, 0, 0, 1, 1])\n MF = []\n MP = []\n getMFAndMP(M, R, MF, MP)\n MF = numpy.array(MF)\n MP = numpy.array(MP)\n TF = getTF(R)\n n = M[0].__len__()\n nList = numpy.eye(n, dtype=numpy.int)\n i = 1\n flag = 0\n lineList = []\n individual = []\n while i <= n:\n if flag == 0:\n lineList = nList[i - 1]\n cSum = getSum(MF, lineList)\n if cSum != TF:\n flag = 1\n lineNum = 0\n max = 0\n for l in range(n):\n tempList = list(lineList)\n if tempList[l] != 1:\n tempList[l] = 1\n cSum = getSum(MF, tempList)\n if cSum == TF:\n individual.append(tempList)\n flag = 0\n i = i + 1\n break\n elif cSum > max:\n max = cSum\n lineNum = l\n lineList[lineNum] = 1\n else:\n individual.append(lineList)\n flag = 0\n i = i + 1\n numpy.save(dirPosition + \"/numpyDataDir/Inialize.npy\", individual)\n numpy.save(dirPosition + \"/numpyDataDir/MF.npy\", MF)\n numpy.save(dirPosition + \"/numpyDataDir/MP.npy\", MP)\n\n\n# if __name__ == '__main__':\n# Inialize(\"/home/kalasu/PycharmProjects/tot_info\")\n","sub_path":"calculator/inherited/InializeMain.py","file_name":"InializeMain.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"183445226","text":"import librosa\r\nimport scipy.io.wavfile as wavfile\r\nimport numpy as np\r\nfrom keras.optimizers import Adam\r\nfrom keras.models import model_from_json\r\nimport datetime\r\nimport os\r\n\r\n\r\ndef extract_mfcc(s,sr, win_len, hop_len):\r\n mfcc40 = librosa.feature.mfcc(y=s, sr=sr, n_fft =win_len, hop_length=hop_len, n_mfcc=40)\r\n return mfcc40\r\n\r\n\r\ndef extract_spectral(s, win_len, hop_len):\r\n spectral_complex = librosa.stft(y=s, n_fft=win_len, hop_length=hop_len)\r\n spectral = np.abs(spectral_complex)\r\n spectral = spectral / np.sum(spectral, axis=0) # normalize using \"sum-to-one\"\r\n return spectral\r\n\r\n\r\ndef contextWin(fea_orig, n_contextWin = 2):\r\n # Feature processing\r\n fea_orig = fea_orig[:, :-1]\r\n n_dims, n_feas = fea_orig.shape\r\n\r\n # construct feaMat\r\n new_fea = np.empty((n_feas, n_dims * (1 + 2 * n_contextWin)))\r\n for i in range(n_feas):\r\n if (i - n_contextWin) >= 0 and (i + n_contextWin) < n_feas:\r\n row = fea_orig[:, i - n_contextWin:i + n_contextWin + 1].flatten()\r\n else:\r\n row = np.zeros(new_fea[0].shape)\r\n for w in range(i - n_contextWin, i + n_contextWin + 1):\r\n if n_feas > w >= 0:\r\n k = w - (i - n_contextWin)\r\n row[k * n_dims:(k + 1) * n_dims] = fea_orig[:, w]\r\n new_fea[i] = row\r\n\r\n # save new feature matrix\r\n return new_fea\r\n\r\n\r\ndef confidence_ensemble(probs_1,probs_2):\r\n l1 = abs(probs_1 - 0.5) > abs(probs_2-0.5)\r\n l2 = ~l1\r\n ensemble_probs = l1 * probs_1 + l2 * probs_2\r\n return ensemble_probs\r\n\r\n\r\ndef compress_label(labelMat, threshold = 0.5):\r\n \"\"\"\r\n input: labelMat is a (n_wins,n_dims) matrix.\r\n output: ret, a row vector of size (1, n_dims)\r\n Compress \"labelMat\" from a matrix to a row vector \"ret\", where ret[i] = 1 if any(labelMat[:,i] == 1), otherwise 0\r\n \"\"\"\r\n label_dims = labelMat.shape[1]\r\n ret = np.empty(labelMat[0,:].shape)\r\n for i in range(label_dims):\r\n ret[i] = 1 if any(labelMat[:,i]>=threshold) else 0\r\n return ret\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # parameter configurations: define stream chunk size\r\n fs = 44100\r\n chunkSize_in_sec = 1\r\n chunk_size = fs * chunkSize_in_sec\r\n win_len = int(fs * 0.04)\r\n hop_len = int(fs * 0.02)\r\n context_chunk_size = win_len # add context of 1 second before and after current chunk\r\n audio_file = \"../combined_01.wav\"\r\n dir_classifier = '../target(1)_basic_tanh'\r\n label_gt_file = \"../all_label.npy\"\r\n\r\n # load ground truth label\r\n label_gt = np.load(label_gt_file)\r\n label_gt = label_gt[:, [0,1,2]]\r\n class_label = ['smokeAlarm', 'dogbarking','doorbell']\r\n\r\n # load the raw audio stream\r\n fs, data_stream = wavfile.read(audio_file)\r\n if data_stream.dtype == np.dtype('int16'):\r\n data_stream = data_stream.astype('float32') / np.iinfo(np.dtype('int16')).min\r\n\r\n # load mfcc model\r\n json_file_mfcc = open(os.path.join(dir_classifier, \"model_target(1).json\"), 'r')\r\n model_json_mfcc = json_file_mfcc.read()\r\n json_file_mfcc.close()\r\n mfcc_model = model_from_json(model_json_mfcc)\r\n # load weights into new model\r\n mfcc_model.load_weights(os.path.join(dir_classifier, \"model_target(1).h5\"))\r\n print(\"Loaded mfcc model from disk\")\r\n # compile model\r\n mfcc_model.compile(optimizer=Adam(lr=0.00001), loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n # load spectral model\r\n json_file_spectral = open(os.path.join(dir_classifier, \"model_target(2).json\"), 'r')\r\n model_json_spectral = json_file_spectral.read()\r\n json_file_spectral.close()\r\n spectral_model = model_from_json(model_json_spectral)\r\n # load weights into new model\r\n spectral_model.load_weights(os.path.join(dir_classifier, \"model_target(2).h5\"))\r\n print(\"Loaded spectral model from disk\")\r\n # compile model\r\n spectral_model.compile(optimizer=Adam(lr=0.00001), loss='binary_crossentropy', metrics=['accuracy'])\r\n\r\n # wait for audio to play in local machine\r\n raw_input(\"Waiting for audio playing in local machine.\\nPress enter to start running: \")\r\n print(\"Sound event detection starts.....\")\r\n\r\n # read data chunk by chunk from the audio stream; process each chunk, including playing the chunk and classification\r\n t = 0 # time stamp\r\n\r\n for i in range(chunk_size,len(data_stream), chunk_size):\r\n\r\n # read the data chunk\r\n start_time = datetime.datetime.now()\r\n data_chunk = data_stream[i - context_chunk_size : min(i+chunk_size+context_chunk_size, len(data_stream))]\r\n\r\n # extract the spectral features\r\n mfcc40 = extract_mfcc(data_chunk, fs, win_len, hop_len)\r\n mfcc40 = contextWin(mfcc40)\r\n spectral = extract_spectral(data_chunk, win_len, hop_len)\r\n spectral = contextWin(spectral)\r\n\r\n # classification\r\n probs_mfcc = mfcc_model.predict(mfcc40)\r\n preds_mfcc = compress_label(probs_mfcc[context_chunk_size/hop_len : -context_chunk_size/hop_len,:])\r\n probs_spectral = spectral_model.predict(spectral)\r\n preds_spectral = compress_label(probs_spectral[context_chunk_size / hop_len :\r\n -context_chunk_size / hop_len, :])\r\n probs = confidence_ensemble(probs_mfcc, probs_spectral)\r\n predictions = compress_label(probs[context_chunk_size/hop_len : -context_chunk_size/hop_len,:])\r\n\r\n # construct strings to show the detection results\r\n t = t + 1 # current time stamp: (offset from beginning) + 1\r\n print_str = \"\" # classification result string\r\n for j, preds in enumerate(predictions):\r\n if preds > 0:\r\n print_str += class_label[j] + \", \"\r\n print_str = print_str[:-len(\", \")]\r\n if not print_str:\r\n print_str = \"None\"\r\n gt_string = \"\" # ground truth string\r\n gt = compress_label(label_gt[t: t + chunkSize_in_sec,:])\r\n for j, label in enumerate(gt):\r\n if label > 0:\r\n gt_string += class_label[j] + \", \"\r\n if gt_string:\r\n gt_string = gt_string[:-len(\", \")]\r\n\r\n # wait for the time\r\n delta = datetime.datetime.now()-start_time\r\n while delta.microseconds < (chunkSize_in_sec*1000000-200):\r\n delta = datetime.datetime.now() - start_time\r\n\r\n # print the result strings\r\n print('\\r\\n')\r\n print(\"The {}th second:\".format(t-1))\r\n print(print_str)\r\n print(\"({})\".format(gt_string))\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"model/A_Demo-Dog-alarm-doorbell.py","file_name":"A_Demo-Dog-alarm-doorbell.py","file_ext":"py","file_size_in_byte":6539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"90819663","text":"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras' base preprocessing layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\nfrom tensorflow.python.platform import test\n\n\nclass PreprocessingLayerTest(test.TestCase):\n \"\"\"Base test class for preprocessing layer API validation.\"\"\"\n\n def assert_accumulator_equal(self, combiner, acc1, acc2, message=None):\n data_1 = combiner.extract(acc1)\n data_2 = combiner.extract(acc2)\n self.assertAllClose(data_1, data_2, msg=message)\n\n def validate_accumulator_computation(self, combiner, data, expected):\n \"\"\"Validate that various combinations of compute and merge are identical.\"\"\"\n if len(data) < 4:\n raise AssertionError(\"Data must have at least 4 elements.\")\n data_0 = np.array([data[0]])\n data_1 = np.array([data[1]])\n data_2 = np.array(data[2:])\n\n single_compute = combiner.compute(data)\n\n all_merge = combiner.merge([\n combiner.compute(data_0),\n combiner.compute(data_1),\n combiner.compute(data_2)\n ])\n self.assert_accumulator_equal(\n combiner,\n single_compute,\n all_merge,\n message=\"Sharding data should not change the data output.\")\n\n unordered_all_merge = combiner.merge([\n combiner.compute(data_1),\n combiner.compute(data_2),\n combiner.compute(data_0)\n ])\n self.assert_accumulator_equal(\n combiner,\n all_merge,\n unordered_all_merge,\n message=\"The order of merge arguments should not change the data \"\n \"output.\"\n )\n\n hierarchical_merge = combiner.merge([\n combiner.compute(data_1),\n combiner.merge([combiner.compute(data_2),\n combiner.compute(data_0)])\n ])\n self.assert_accumulator_equal(\n combiner,\n all_merge,\n hierarchical_merge,\n message=\"Nesting merge arguments should not change the data output.\")\n\n nested_compute = combiner.compute(\n data_0, combiner.compute(data_1, combiner.compute(data_2)))\n self.assert_accumulator_equal(\n combiner,\n all_merge,\n nested_compute,\n message=\"Nesting compute arguments should not change the data output.\")\n\n mixed_compute = combiner.merge([\n combiner.compute(data_0),\n combiner.compute(data_1, combiner.compute(data_2))\n ])\n self.assert_accumulator_equal(\n combiner,\n all_merge,\n mixed_compute,\n message=\"Mixing merge and compute calls should not change the data \"\n \"output.\")\n\n self.assertAllClose(expected, combiner.extract(all_merge))\n\n def validate_accumulator_extract_and_restore(self, combiner, data, expected):\n \"\"\"Validate that the extract<->restore loop loses no data.\"\"\"\n acc = combiner.compute(data)\n extracted_data = combiner.extract(acc)\n restored_acc = combiner.restore(extracted_data)\n self.assert_accumulator_equal(combiner, acc, restored_acc)\n self.assertAllClose(expected, combiner.extract(restored_acc))\n\n def validate_accumulator_serialize_and_deserialize(self, combiner, data,\n expected):\n \"\"\"Validate that the serialize<->deserialize loop loses no data.\"\"\"\n acc = combiner.compute(data)\n extracted_data = combiner.serialize(acc)\n restored_acc = combiner.deserialize(extracted_data)\n self.assert_accumulator_equal(combiner, acc, restored_acc)\n self.assertAllClose(expected, combiner.extract(restored_acc))\n\n def validate_accumulator_uniqueness(self, combiner, data, expected):\n \"\"\"Validate that every call to compute creates a unique accumulator.\"\"\"\n acc = combiner.compute(data)\n acc2 = combiner.compute(data)\n self.assertIsNot(acc, acc2)\n self.assertAllClose(expected, combiner.extract(acc))\n","sub_path":"tensorflow/python/keras/layers/preprocessing/preprocessing_test_utils.py","file_name":"preprocessing_test_utils.py","file_ext":"py","file_size_in_byte":4490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"7209426","text":"import os\nimport json\nimport sys\nimport random\nimport configparser\nfrom flask import Flask\nfrom flask import request\nfrom flask import render_template\nfrom flask import send_from_directory\nfrom core import xproductv1\nfrom preprocessor import preprocess\nfrom postprocessor import postprocess\n\nconf = configparser.ConfigParser()\nconf.read(\"../config/test.cfg\")\n\nsrcPath = os.getenv(\"{}_{}\".format(\"src\", \"src_path\"), conf.get(\"src\", \"src_path\"))\np = os.path.abspath(srcPath)\nsys.path.insert(0, p)\n\nfrom util import ConfigReader, DateParser\n\napp = Flask(__name__, static_url_path='')\n\n@app.route(\"/variations\", methods=['POST'])\ndef variations():\n print(\"variations invoked\")\n request.get_data()\n format = request.args.get(\"format\", \"json\")\n high = int(request.args.get(\"count\", \"100\"))\n payload = request.json\n print(\"fetched payload: {}\".format(payload))\n resp = {}\n http_status_code = 200\n try:\n entity = payload[\"entity\"]\n entity_name = entity[\"entity_name\"]\n entity_type = entity[\"entity_type\"]\n sets = preprocess(entity, high)\n reordering = [i for i in range(len(sets))]\n random.shuffle(reordering)\n print(\"reordering: {}\".format(reordering))\n reordered_sets = [sets[i] for i in reordering]\n print(\"sets: {}\".format(reordered_sets))\n N = 1\n for s in reordered_sets:\n N = N * len(s)\n low = random.randint(0, N - high)\n print(\"selecting items from {} to {} in the xproduct of size {}\".format(low, low + high, N))\n result = xproductv1(low, low + high, *reordered_sets)\n # result = xproductv1(0, high, *sets)\n print(\"xproduct computation is complete\")\n # need to reset the order of the result elements\n orig_order = []\n orig_ordered_results = []\n for i in range(len(sets)):\n orig_order.append(reordering.index(i))\n print(\"orig_order: {}\".format(orig_order))\n for r in result:\n orig_ordered_results.append([r[i] for i in orig_order])\n print(orig_ordered_results)\n final_result = postprocess(entity, orig_ordered_results, format)\n # final_result = postprocess(entity, result, format)\n # print(\"result: {}\".format(final_result))\n resp = {'status':'success', 'result': final_result}\n except Exception as e:\n print(\"Exception occured: {}\".format(e))\n http_status_code = 500\n resp = {'status':'failure', 'result': \"Data generation failed\"}\n finally:\n return json.dumps(resp), http_status_code\n\nif __name__ == '__main__':\n port = os.getenv(\"PORT\")\n file_encoding = os.getenv(\"PYTHONIOENCODING\")\n print(\"starting server on port: {}, python-io-encoding: {}\".format(port, file_encoding))\n app.run(host= '0.0.0.0', debug=True, port=port)\n # app.run(debug=True, port=port)\n","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"227508905","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2015-2016 BigML\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"BigMLer - project creation and update\n\n\"\"\"\nfrom __future__ import absolute_import\n\nimport sys\nimport os\nimport shutil\n\nimport bigmler.utils as u\nimport bigmler.processing.args as a\nimport bigmler.processing.projects as pp\n\nfrom bigmler.defaults import DEFAULTS_FILE\nfrom bigmler.command import get_stored_command\nfrom bigmler.dispatcher import (SESSIONS_LOG, command_handling,\n clear_log_files)\n\nCOMMAND_LOG = u\".bigmler_project\"\nDIRS_LOG = u\".bigmler_project_dir_stack\"\nLOG_FILES = [COMMAND_LOG, DIRS_LOG, u.NEW_DIRS_LOG]\n\n\ndef project_dispatcher(args=sys.argv[1:]):\n \"\"\"Parses command line and calls the different processing functions\n\n \"\"\"\n\n command = command_handling(args, COMMAND_LOG)\n\n # Parses command line arguments.\n command_args = a.parse_and_check(command)\n if command_args.resume:\n command_args, session_file, _ = get_stored_command(\n args, command_args.debug, command_log=COMMAND_LOG,\n dirs_log=DIRS_LOG, sessions_log=SESSIONS_LOG)\n else:\n if command_args.output_dir is None:\n command_args.output_dir = a.NOW\n directory = u.check_dir(\"%s/x.txt\" % command_args.output_dir)\n command_args.output_dir = directory\n session_file = os.path.join(directory, SESSIONS_LOG)\n u.log_message(command.command + \"\\n\", log_file=session_file)\n\n\n directory = u.check_dir(os.path.join(command_args.output_dir, \"tmp\"))\n session_file = os.path.join(directory, SESSIONS_LOG)\n u.log_message(command.command + \"\\n\", log_file=session_file)\n try:\n shutil.copy(DEFAULTS_FILE, os.path.join(directory, DEFAULTS_FILE))\n except IOError:\n pass\n u.sys_log_message(u\"%s\\n\" % os.path.abspath(directory),\n log_file=DIRS_LOG)\n\n\n path = u.check_dir(\"%s/x.txt\" % command_args.output_dir)\n session_file = u\"%s%s%s\" % (path, os.sep, SESSIONS_LOG)\n # If logging is required set the file for logging\n log = None\n if command_args.log_file:\n u.check_dir(command_args.log_file)\n log = command_args.log_file\n # If --clear_logs the log files are cleared\n clear_log_files([log])\n\n\n # Creates the corresponding api instance\n api = a.get_api_instance(command_args, u.check_dir(session_file))\n a.get_output_args(api, command_args, command_args.resume)\n a.attribute_args(command_args)\n\n\n if not command_args.project_id and command_args.name:\n command_args.project = command_args.name\n if command_args.project:\n # create project\n pp.project_processing(\n api, command_args, command_args.resume, session_file=session_file,\n path=path, log=log, create=True)\n if command_args.project_id and (\n command_args.project_attributes or\n command_args.name or command_args.tag or command_args.description\n or command_args.category):\n # update project's attributes\n pp.update_project(command_args, api, command_args.resume, \\\n session_file=session_file)\n\n u.log_message(\"_\" * 80 + \"\\n\", log_file=session_file)\n u.print_generated_files(command_args.output_dir, log_file=session_file,\n verbosity=command_args.verbosity)\n","sub_path":"bigmler/project/dispatcher.py","file_name":"dispatcher.py","file_ext":"py","file_size_in_byte":3881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"96990203","text":"# -*- coding: utf-8 -*-\n\"\"\"The Simulation using only the Basic Model.\nFor the simulation using only the Latent Model see file 'simulations_latent.py'.\nFor a file containing simulations using both models, see file 'simulations.py'.\n\"\"\"\n\nimport torch\nimport torch.optim as optim\nimport models\nimport plots_simulations as plots\n\n\nlamda = torch.tensor([0.9,0.1]) #p(no pain)=0.9, p(pain)=0.1\nvs = torch.tensor([[1.],[50.],[10.],[1.]]) #4 different precision settings\nalphas_o = vs * lamda\n\n#Create lists to store updated probs. after each observation.\nlamdas_nopain = [[],[],[],[]] #p(no pain)\nlamdas_pain = [[],[],[],[]] #p(pain)\n\n#Create artificial data.\ndata = []\nfor _ in range(30):\n data.append(torch.tensor([1.,0.])) #30 times no pain\nfor _ in range(30):\n data.append(torch.tensor([0.,1.])) #30 times pain\n \nprint(\"--------------------------------\")\nprint(\"Simulation using the Basic Model.\")\nprint(\"O E {pain, no pain}. Four different settings of the prior precision are\\\n compared.\")\nprint(\"Obervations: 30 times no pain followed by 30 times pain.\")\nprint(\"--------------------------------\")\nfor v in range(len(vs)): #for each precision setting\n print(\"Precision setting {} of 4\".format(v+1))\n print(\"Prior parameter:\")\n print(\"No pain: \",\"{0:.3f}\".format(alphas_o[v][0].item()),\n \"; Pain: \",\"{0:.3f}\".format(alphas_o[v][1].item()))\n print(\"Updated parameter:\")\n for d in data: #for each artificial observation\n model = models.Basic_Model_DM(alphas_o[v])\n optimizer = optim.Adam(model.parameters(), lr=0.01)\n for step in range(10000):\n optimizer.zero_grad()\n loss = model(data=d,num_obs=1,num_rsamples=1000)\n loss.backward()\n optimizer.step()\n \n #Get updated parameter.\n alpha_o = model.q_log_alpha_o.exp().detach()\n #Update model parameter for next instantiation.\n alphas_o[v] = alpha_o\n #Add post. probs of pain and no pain to storage lists.\n lamdas_pain[v].append(alpha_o[1]/(alpha_o.sum()))\n lamdas_nopain[v].append(alpha_o[0]/(alpha_o.sum()))\n\n #Print current posterior parameter.\n print(\"No pain: \",\"{0:.3f}\".format(alpha_o[0].item()),\n \"; Pain: \",\"{0:.3f}\".format(alpha_o[1].item()))\n\n#Create a plot like in Anna-Lena's figure 3.7\nplots.plot_bm(lamdas_nopain,lamdas_pain)","sub_path":"Simulations/simulations_basic_model.py","file_name":"simulations_basic_model.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"604418348","text":"import time\nimport pytest\n\nfrom swsscommon import swsscommon\n\n\ndef create_fvs(**kwargs):\n return swsscommon.FieldValuePairs(list(kwargs.items()))\n\n\nclass TestTunnelBase(object):\n APP_TUNNEL_DECAP_TABLE_NAME = \"TUNNEL_DECAP_TABLE\"\n ASIC_TUNNEL_TABLE = \"ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL\"\n ASIC_TUNNEL_TERM_ENTRIES = \"ASIC_STATE:SAI_OBJECT_TYPE_TUNNEL_TERM_TABLE_ENTRY\"\n ASIC_RIF_TABLE = \"ASIC_STATE:SAI_OBJECT_TYPE_ROUTER_INTERFACE\"\n ASIC_VRF_TABLE = \"ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER\"\n\n ecn_modes_map = {\n \"standard\" : \"SAI_TUNNEL_DECAP_ECN_MODE_STANDARD\",\n \"copy_from_outer\": \"SAI_TUNNEL_DECAP_ECN_MODE_COPY_FROM_OUTER\"\n }\n\n dscp_modes_map = {\n \"pipe\" : \"SAI_TUNNEL_DSCP_MODE_PIPE_MODEL\",\n \"uniform\" : \"SAI_TUNNEL_DSCP_MODE_UNIFORM_MODEL\"\n }\n\n ttl_modes_map = {\n \"pipe\" : \"SAI_TUNNEL_TTL_MODE_PIPE_MODEL\",\n \"uniform\" : \"SAI_TUNNEL_TTL_MODE_UNIFORM_MODEL\"\n }\n\n\n def check_interface_exists_in_asicdb(self, asicdb, sai_oid):\n if_table = swsscommon.Table(asicdb, self.ASIC_RIF_TABLE)\n status, fvs = if_table.get(sai_oid)\n return status\n\n def check_vr_exists_in_asicdb(self, asicdb, sai_oid):\n vfr_table = swsscommon.Table(asicdb, self.ASIC_VRF_TABLE)\n status, fvs = vfr_table.get(sai_oid)\n return status\n\n def check_tunnel_termination_entry_exists_in_asicdb(self, asicdb, tunnel_sai_oid, dst_ips):\n tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES)\n\n tunnel_term_entries = tunnel_term_table.getKeys()\n assert len(tunnel_term_entries) == len(dst_ips)\n\n for term_entry in tunnel_term_entries:\n status, fvs = tunnel_term_table.get(term_entry)\n\n assert status == True\n assert len(fvs) == 5\n\n for field, value in fvs:\n if field == \"SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_VR_ID\":\n assert self.check_vr_exists_in_asicdb(asicdb, value)\n elif field == \"SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TYPE\":\n assert value == \"SAI_TUNNEL_TERM_TABLE_ENTRY_TYPE_P2MP\"\n elif field == \"SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_TUNNEL_TYPE\":\n assert value == \"SAI_TUNNEL_TYPE_IPINIP\"\n elif field == \"SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_ACTION_TUNNEL_ID\":\n assert value == tunnel_sai_oid\n elif field == \"SAI_TUNNEL_TERM_TABLE_ENTRY_ATTR_DST_IP\":\n assert value in dst_ips\n else:\n assert False, \"Field %s is not tested\" % field\n\n def create_and_test_tunnel(self, db, asicdb, tunnel_name, **kwargs):\n \"\"\" Create tunnel and verify all needed enties in ASIC DB exists \"\"\"\n\n is_symmetric_tunnel = \"src_ip\" in kwargs;\n\n # create tunnel entry in DB\n ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME)\n\n fvs = create_fvs(**kwargs)\n\n ps.set(tunnel_name, fvs)\n\n # wait till config will be applied\n time.sleep(1)\n\n # check asic db table\n tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE)\n\n tunnels = tunnel_table.getKeys()\n assert len(tunnels) == 1\n\n tunnel_sai_obj = tunnels[0]\n\n status, fvs = tunnel_table.get(tunnel_sai_obj)\n\n assert status == True\n # 6 parameters to check in case of decap tunnel\n # + 1 (SAI_TUNNEL_ATTR_ENCAP_SRC_IP) in case of symmetric tunnel\n assert len(fvs) == 7 if is_symmetric_tunnel else 6\n\n expected_ecn_mode = self.ecn_modes_map[kwargs[\"ecn_mode\"]]\n expected_dscp_mode = self.dscp_modes_map[kwargs[\"dscp_mode\"]]\n expected_ttl_mode = self.ttl_modes_map[kwargs[\"ttl_mode\"]]\n\n for field, value in fvs:\n if field == \"SAI_TUNNEL_ATTR_TYPE\":\n assert value == \"SAI_TUNNEL_TYPE_IPINIP\"\n elif field == \"SAI_TUNNEL_ATTR_ENCAP_SRC_IP\":\n assert value == kwargs[\"src_ip\"]\n elif field == \"SAI_TUNNEL_ATTR_DECAP_ECN_MODE\":\n assert value == expected_ecn_mode\n elif field == \"SAI_TUNNEL_ATTR_DECAP_TTL_MODE\":\n assert value == expected_ttl_mode\n elif field == \"SAI_TUNNEL_ATTR_DECAP_DSCP_MODE\":\n assert value == expected_dscp_mode\n elif field == \"SAI_TUNNEL_ATTR_OVERLAY_INTERFACE\":\n assert self.check_interface_exists_in_asicdb(asicdb, value)\n elif field == \"SAI_TUNNEL_ATTR_UNDERLAY_INTERFACE\":\n assert self.check_interface_exists_in_asicdb(asicdb, value)\n else:\n assert False, \"Field %s is not tested\" % field\n\n self.check_tunnel_termination_entry_exists_in_asicdb(asicdb, tunnel_sai_obj, kwargs[\"dst_ip\"].split(\",\"))\n\n def remove_and_test_tunnel(self, db, asicdb, tunnel_name):\n \"\"\" Removes tunnel and checks that ASIC db is clear\"\"\"\n\n tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE)\n tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES)\n tunnel_app_table = swsscommon.Table(asicdb, self.APP_TUNNEL_DECAP_TABLE_NAME)\n\n tunnels = tunnel_table.getKeys()\n tunnel_sai_obj = tunnels[0]\n\n status, fvs = tunnel_table.get(tunnel_sai_obj)\n\n # get overlay loopback interface oid to check if it is deleted with the tunnel\n overlay_infs_id = {f:v for f,v in fvs}[\"SAI_TUNNEL_ATTR_OVERLAY_INTERFACE\"]\n\n ps = swsscommon.ProducerStateTable(db, self.APP_TUNNEL_DECAP_TABLE_NAME)\n ps.set(tunnel_name, create_fvs(), 'DEL')\n\n # wait till config will be applied\n time.sleep(1)\n\n assert len(tunnel_table.getKeys()) == 0\n assert len(tunnel_term_table.getKeys()) == 0\n assert len(tunnel_app_table.getKeys()) == 0\n assert not self.check_interface_exists_in_asicdb(asicdb, overlay_infs_id)\n\n\n def cleanup_left_over(self, db, asicdb):\n \"\"\" Cleanup APP and ASIC tables \"\"\"\n\n tunnel_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TABLE)\n for key in tunnel_table.getKeys():\n tunnel_table._del(key)\n\n tunnel_term_table = swsscommon.Table(asicdb, self.ASIC_TUNNEL_TERM_ENTRIES)\n for key in tunnel_term_table.getKeys():\n tunnel_term_table._del(key)\n\n tunnel_app_table = swsscommon.Table(asicdb, self.APP_TUNNEL_DECAP_TABLE_NAME)\n for key in tunnel_app_table.getKeys():\n tunnel_table._del(key)\n\n\nclass TestDecapTunnel(TestTunnelBase):\n \"\"\" Tests for decap tunnel creation and removal \"\"\"\n\n def test_TunnelDecap_v4(self, dvs, testlog):\n \"\"\" test IPv4 tunnel creation \"\"\"\n\n db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)\n asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)\n\n self.cleanup_left_over(db, asicdb)\n\n # create tunnel IPv4 tunnel\n self.create_and_test_tunnel(db, asicdb, tunnel_name=\"IPINIPv4Decap\", tunnel_type=\"IPINIP\",\n dst_ip=\"2.2.2.2,3.3.3.3\", dscp_mode=\"uniform\",\n ecn_mode=\"standard\", ttl_mode=\"pipe\")\n self.remove_and_test_tunnel(db, asicdb, \"IPINIPv4Decap\")\n\n def test_TunnelDecap_v6(self, dvs, testlog):\n \"\"\" test IPv6 tunnel creation \"\"\"\n\n db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)\n asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)\n\n self.cleanup_left_over(db, asicdb)\n\n # create tunnel IPv6 tunnel\n self.create_and_test_tunnel(db, asicdb, tunnel_name=\"IPINIPv6Decap\", tunnel_type=\"IPINIP\",\n dst_ip=\"2::2,3::3\", dscp_mode=\"pipe\",\n ecn_mode=\"copy_from_outer\", ttl_mode=\"uniform\")\n self.remove_and_test_tunnel(db, asicdb,\"IPINIPv6Decap\")\n\n\nclass TestSymmetricTunnel(TestTunnelBase):\n \"\"\" Tests for symmetric tunnel creation and removal \"\"\"\n\n def test_TunnelSymmetric_v4(self, dvs, testlog):\n \"\"\" test IPv4 tunnel creation \"\"\"\n\n db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)\n asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)\n\n self.cleanup_left_over(db, asicdb)\n\n # create tunnel IPv4 tunnel\n self.create_and_test_tunnel(db, asicdb, tunnel_name=\"IPINIPv4Symmetric\", tunnel_type=\"IPINIP\",\n src_ip=\"1.1.1.1\",\n dst_ip=\"2.2.2.2,3.3.3.3\", dscp_mode=\"pipe\",\n ecn_mode=\"copy_from_outer\", ttl_mode=\"uniform\")\n self.remove_and_test_tunnel(db, asicdb, \"IPINIPv4Symmetric\")\n\n def test_TunnelSymmetric_v6(self, dvs, testlog):\n \"\"\" test IPv6 tunnel creation \"\"\"\n\n db = swsscommon.DBConnector(swsscommon.APPL_DB, dvs.redis_sock, 0)\n asicdb = swsscommon.DBConnector(swsscommon.ASIC_DB, dvs.redis_sock, 0)\n\n self.cleanup_left_over(db, asicdb)\n\n # create tunnel IPv6 tunnel\n self.create_and_test_tunnel(db, asicdb, tunnel_name=\"IPINIPv6Symmetric\", tunnel_type=\"IPINIP\",\n src_ip=\"1::1\",\n dst_ip=\"2::2,3::3\", dscp_mode=\"uniform\",\n ecn_mode=\"standard\", ttl_mode=\"pipe\")\n self.remove_and_test_tunnel(db, asicdb, \"IPINIPv6Symmetric\")\n\n\n\n# Add Dummy always-pass test at end as workaroud\n# for issue when Flaky fail on final test it invokes module tear-down before retrying\ndef test_nonflaky_dummy():\n pass\n","sub_path":"tests/test_tunnel.py","file_name":"test_tunnel.py","file_ext":"py","file_size_in_byte":9660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"40932772","text":"import numpy as np\nimport tensorflow as tf\nimport json, random\n\nclass DataManager(object):\n def __init__(self, dataset):\n '''\n Read the data from dir \"dataset\"\n '''\n self.origin = {}\n for fname in ['train', 'dev', 'test']:\n data = []\n for line in open('%s/%s.res' % (dataset, fname)):\n s = json.loads(line.strip())\n if len(s) > 0:\n data.append(s)\n self.origin[fname] = data\n def getword(self):\n '''\n Get the words that appear in the data.\n Sorted by the times it appears.\n {'ok': 1, 'how': 2, ...}\n Never run this function twice.\n '''\n wordcount = {}\n def dfs(node):\n if 'children' in node:\n dfs(node['children'][0])\n dfs(node['children'][1])\n else:\n word = node['word'].lower()\n wordcount[word] = wordcount.get(word, 0) + 1\n for fname in ['train', 'dev', 'test']:\n for sent in self.origin[fname]:\n dfs(sent)\n words = list(wordcount.items())\n words.sort(key = lambda x : x[1], reverse = True)\n self.words = words\n self.wordlist = {item[0]: index+1 for index, item in enumerate(words)}\n return self.wordlist\n \n def getdata(self, grained, maxlenth):\n '''\n Get all the data, divided into (train,dev,test).\n For every sentence, {'words':[1,3,5,...], 'solution': [0,1,0,0,0]}\n For each data, [sentence1, sentence2, ...]\n Never run this function twice.\n '''\n def one_hot_vector(r):\n s = np.zeros(grained, dtype=np.float32)\n s[r] += 1.0\n return s\n def dfs(node, words):\n if 'children' in node:\n dfs(node['children'][0], words)\n dfs(node['children'][1], words)\n else:\n word = self.wordlist[node['word'].lower()]\n words.append(word)\n self.getword()\n self.data = {}\n for fname in ['train', 'dev', 'test']:\n self.data[fname] = []\n for sent in self.origin[fname]:\n words = []\n dfs(sent, words)\n lens = len(words)\n if maxlenth < lens:\n print(lens)\n words += [0] * (maxlenth - lens)\n solution = one_hot_vector(int(sent['rating']))\n now = {'words': np.array(words), \\\n 'solution': solution,\\\n 'lenth': lens}\n self.data[fname].append(now)\n return self.data['train'], self.data['dev'], self.data['test']\n \n def get_wordvector(self, name):\n fr = open(name)\n #debug by Hypo @2018-05-20\n # n, dim = list(map(int, fr.readline().split()))\n n=400000\n dim=300\n self.wv = {}\n for i in range(n - 1):\n vec = fr.readline().split()\n word = vec[0].lower()\n vec = list(map(float, vec[1:]))\n if word in self.wordlist:\n self.wv[self.wordlist[word]] = vec\n self.wordvector = []\n losscnt = 0\n for i in range(len(self.wordlist) + 1):\n if i in self.wv:\n self.wordvector.append(self.wv[i])\n else:\n losscnt += 1\n self.wordvector.append(np.random.uniform(-0.1,0.1,[dim]))\n self.wordvector = np.array(self.wordvector, dtype=np.float32)\n print(losscnt, \"words not find in wordvector\")\n print(len(self.wordvector), \"words in total\")\n return self.wordvector\n\n#datamanager = DataManager(\"../TrainData/MR\")\n#train_data, test_data, dev_data = datamanager.getdata(2, 200)\n#wv = datamanager.get_wordvector(\"../WordVector/vector.25dim\")\n#mxlen = 0\n#for item in train_data:\n# print item['lenth']\n# if item['lenth'] > mxlen:\n# mxlen =item['lenth']\n#print mxlen\n\n# test\ndataManager = DataManager('../TrainData/MR')\ndataManager.getdata(2,3)\n# dataManager.get_wordvector('../WordVector/vector.300dim')","sub_path":"ID_LSTM/datamanager_old.py","file_name":"datamanager_old.py","file_ext":"py","file_size_in_byte":4130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"142223584","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/krflorek/Documents/dryad/dryad_app/dryad_report.py\n# Compiled at: 2020-05-08 19:07:03\n# Size of source mod 2**32: 3763 bytes\nimport sys, os, re, argparse\nfrom shutil import which, copyfile\nfrom datetime import date\nimport pexpect, re, sys\n\ndef main():\n lib_path = os.path.abspath(os.path.dirname(__file__) + '/' + '../lib')\n dryad_path = os.path.abspath(os.path.dirname(__file__))\n nextflow_path = os.path.join(lib_path, 'nextflow')\n\n class MyParser(argparse.ArgumentParser):\n\n def error(self, message):\n sys.stderr.write('error: %s\\n' % message)\n self.print_help()\n sys.exit(2)\n\n parser = MyParser(description='Rebuild a previously generated PDF report.')\n parser.add_argument('rmd', type=str, help='path to Rmarkdown file (.Rmd)', nargs='?', default=False)\n parser.add_argument('snp_matrix', type=str, help='path to snp matrix', nargs='?', default=False)\n parser.add_argument('cg_tree', type=str, help='path to core genome tree', nargs='?', default=False)\n parser.add_argument('--ar', type=str, help='path to ar TSV file')\n parser.add_argument('--profile', type=str, choices=['docker', 'singularity'], help='specify nextflow profile, dryad_report will try to use docker first, then singularity')\n parser.add_argument('--get_config', action='store_true', help='get a Nextflow configuration template for dryad')\n parser.add_argument('--config', '-c', type=str, help='Nextflow custom configuration')\n args = parser.parse_args()\n if args.get_config:\n config_path = os.path.join(dryad_path, 'configs/dryad_config_template.config')\n dest_path = os.path.join(os.getcwd(), date.today().strftime('%y-%m-%d') + '_dryad.config')\n copyfile(config_path, dest_path)\n sys.exit()\n elif args.rmd:\n if not (args.snp_matrix and args.cg_tree):\n parser.print_help()\n sys.exit(1)\n if which('docker'):\n profile = '-profile docker'\n elif which('singularity'):\n profile = '-profile singularity'\n else:\n profile = ''\n config = ''\n if args.config:\n config = '-C ' + os.path.abspath(args.config)\n profile = ''\n else:\n if args.profile:\n if which(args.profile):\n profile = '-profile ' + args.profile\n else:\n print(f\"{args.profile} is not installed or found in PATH.\")\n else:\n if not profile:\n print('Singularity or Docker is not installed or not found in PATH.')\n sys.exit(1)\n else:\n work = ''\n output_path = os.path.join(os.getcwd(), 'rebuild_results')\n output_work = os.path.join(output_path, 'report_work')\n if profile:\n work = f\"-w {output_work}\"\n rmd = os.path.abspath(args.rmd)\n logo_path = os.path.abspath(os.path.dirname(__file__) + '/' + 'assets/dryad_logo_250.png')\n snp_mat = '--snp_matrix ' + os.path.abspath(args.snp_matrix)\n cg_tree = '--cg_tree ' + os.path.abspath(args.cg_tree)\n if args.ar:\n ar_tsv = '--ar_tsv ' + os.path.abspath(args.ar)\n else:\n ar_tsv = ''\n command = nextflow_path\n command = command + f\" {config} run {dryad_path}/rebuild_report.nf {profile} --logo {logo_path} --outdir {output_path} --rmd {rmd} {snp_mat} {cg_tree} {ar_tsv} {work}\"\n print('Rebuilding Dryad Report:')\n child = pexpect.spawn(command)\n child.interact()","sub_path":"pycfiles/dryad-2.0.0-py3-none-any/dryad_report.cpython-37.py","file_name":"dryad_report.cpython-37.py","file_ext":"py","file_size_in_byte":3773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275933376","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"Light curve-related functions.\"\"\"\n\nimport os\nimport warnings\nimport copy\nfrom astropy import log\nimport numpy as np\nfrom astropy.logger import AstropyUserWarning\nfrom stingray.lightcurve import Lightcurve\nfrom stingray.utils import assign_value_if_none\nfrom stingray.gti import create_gti_mask, cross_gtis, contiguous_regions\nfrom .base import (\n _look_for_array_in_array,\n hen_root,\n mkdir_p,\n interpret_bintime,\n)\nfrom .io import load_events, save_lcurve, load_lcurve\nfrom .io import HEN_FILE_EXTENSION, high_precision_keyword_read, get_file_type\nfrom .base import deorbit_events\n\n\ndef join_lightcurve_objs(lclist):\n \"\"\"Join light curves.\n\n Light curves from different instruments are put in different channels.\n Light curves from the same time interval and instrument raise\n a ValueError.\n\n Parameters\n ----------\n lclist : list of :class:`Lightcurve` objects\n The list of light curves to join\n\n Returns\n -------\n lcoutlist : joint light curves, one per instrument\n\n See Also\n --------\n scrunch_lightcurves : Create a single light curve from input light\n curves.\n\n Examples\n --------\n >>> lcA = Lightcurve(np.arange(4), np.zeros(4))\n >>> lcA.instr='BU' # Test also case sensitivity\n >>> lcB = Lightcurve(np.arange(4) + 4, [1, 3, 4, 5])\n >>> lcB.instr='bu'\n >>> lcC = join_lightcurve_objs((lcA, lcB))\n >>> np.all(lcC['bu'].time == np.arange(8))\n True\n >>> np.all(lcC['bu'].counts == [0, 0, 0, 0, 1, 3, 4, 5])\n True\n \"\"\"\n # --------------- Check consistency of data --------------\n lcdts = [lcdata.dt for lcdata in lclist]\n # Find unique elements. If multiple bin times are used, throw an exception\n lcdts = list(set(lcdts))\n assert len(lcdts) == 1, \"Light curves must have same dt for joining\"\n\n instrs = [\n lcdata.instr.lower()\n for lcdata in lclist\n if (hasattr(lcdata, \"instr\") and lcdata.instr is not None)\n ]\n\n # Find unique elements. A lightcurve will be produced for each instrument\n instrs = list(set(instrs))\n if instrs == []:\n instrs = [\"unknown\"]\n\n outlcs = {}\n for instr in instrs:\n outlcs[instr.lower()] = None\n # -------------------------------------------------------\n\n for lcdata in lclist:\n instr = assign_value_if_none(lcdata.instr, \"unknown\").lower()\n if outlcs[instr] is None:\n outlcs[instr] = lcdata\n else:\n outlcs[instr] = outlcs[instr].join(lcdata)\n\n return outlcs\n\n\ndef join_lightcurves(lcfilelist, outfile=\"out_lc\" + HEN_FILE_EXTENSION):\n \"\"\"Join light curves from different files.\n\n Light curves from different instruments are put in different channels.\n\n Parameters\n ----------\n lcfilelist : list of str\n List of input file names\n outfile :\n Output light curve\n See Also\n --------\n scrunch_lightcurves : Create a single light curve from input light\n curves.\n\n \"\"\"\n lcdatas = []\n\n for lfc in lcfilelist:\n log.info(\"Loading file %s...\" % lfc)\n lcdata = load_lcurve(lfc)\n log.info(\"Done.\")\n lcdatas.append(lcdata)\n del lcdata\n\n outlcs = join_lightcurve_objs(lcdatas)\n\n if outfile is not None:\n instrs = list(outlcs.keys())\n for instr in instrs:\n if len(instrs) == 1:\n tag = \"\"\n else:\n tag = instr\n log.info(\"Saving joined light curve to %s\" % outfile)\n\n dname, fname = os.path.split(outfile)\n save_lcurve(outlcs[instr], os.path.join(dname, tag + fname))\n\n return outlcs\n\n\ndef scrunch_lightcurve_objs(lclist):\n \"\"\"Create a single light curve from input light curves.\n\n Light curves are appended when they cover different times, and summed when\n they fall in the same time range. This is done regardless of the channel\n or the instrument.\n\n Parameters\n ----------\n lcfilelist : list of :class:`stingray.lightcurve.Lightcurve` objects\n The list of light curves to scrunch\n\n Returns\n -------\n lc : scrunched light curve\n\n See Also\n --------\n join_lightcurves : Join light curves from different files\n\n Examples\n --------\n >>> lcA = Lightcurve(np.arange(4), np.ones(4))\n >>> lcA.instr='bu1'\n >>> lcB = Lightcurve(np.arange(4), [1, 3, 4, 5])\n >>> lcB.instr='bu2'\n >>> lcC = scrunch_lightcurve_objs((lcA, lcB))\n >>> np.all(lcC.time == np.arange(4))\n True\n >>> np.all(lcC.counts == [2, 4, 5, 6])\n True\n >>> np.all(lcC.instr == 'bu1,bu2')\n True\n \"\"\"\n\n instrs = [lc.instr for lc in lclist]\n gti_lists = [lc.gti for lc in lclist]\n gti = cross_gtis(gti_lists)\n for lc in lclist:\n lc.gti = gti\n lc.apply_gtis()\n # Determine limits\n\n lc0 = lclist[0]\n\n for lc in lclist[1:]:\n lc0 = lc0 + lc\n\n lc0.instr = \",\".join(instrs)\n\n return lc0\n\n\ndef scrunch_lightcurves(\n lcfilelist, outfile=\"out_scrlc\" + HEN_FILE_EXTENSION, save_joint=False\n):\n \"\"\"Create a single light curve from input light curves.\n\n Light curves are appended when they cover different times, and summed when\n they fall in the same time range. This is done regardless of the channel\n or the instrument.\n\n Parameters\n ----------\n lcfilelist : list of str\n The list of light curve files to scrunch\n\n Returns\n -------\n time : array-like\n The time array\n lc :\n The new light curve\n gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]\n Good Time Intervals\n\n Other Parameters\n ----------------\n outfile : str\n The output file name\n save_joint : bool\n If True, save the per-channel joint light curves\n\n See Also\n --------\n join_lightcurves : Join light curves from different files\n \"\"\"\n if save_joint:\n lcdata = join_lightcurves(lcfilelist)\n else:\n lcdata = join_lightcurves(lcfilelist, outfile=None)\n\n lc0 = scrunch_lightcurve_objs(list(lcdata.values()))\n log.info(\"Saving scrunched light curve to %s\" % outfile)\n save_lcurve(lc0, outfile)\n\n return lc0\n\n\ndef filter_lc_gtis(\n lc, safe_interval=None, delete=False, min_length=0, return_borders=False\n):\n \"\"\"Filter a light curve for GTIs.\n\n Parameters\n ----------\n lc : :class:`Lightcurve` object\n The input light curve\n\n Returns\n -------\n newlc : :class:`Lightcurve` object\n The output light curve\n borders : [[i0_0, i0_1], [i1_0, i1_1], ...], optional\n The indexes of the light curve corresponding to the borders of the\n GTIs. Returned if return_borders is set to True\n\n Other Parameters\n ----------------\n safe_interval : float or [float, float]\n Seconds to filter out at the start and end of each GTI. If single\n float, these safe windows are equal, otherwise the two numbers refer\n to the start and end of the GTI respectively\n delete : bool\n If delete is True, the intervals outside of GTIs are filtered out from\n the light curve. Otherwise, they are set to zero.\n min_length : float\n Minimum length of GTI. GTIs below this length will be removed.\n return_borders : bool\n If True, return also the indexes of the light curve corresponding to\n the borders of the GTIs\n \"\"\"\n mask, newgti = create_gti_mask(\n lc.time,\n lc.gti,\n return_new_gtis=True,\n safe_interval=safe_interval,\n min_length=min_length,\n )\n\n nomask = np.logical_not(mask)\n\n newlc = copy.copy(lc)\n newlc.counts[nomask] = 0\n newlc.gti = newgti\n\n if return_borders:\n mask = create_gti_mask(lc.time, newgti)\n borders = contiguous_regions(mask)\n return newlc, borders\n else:\n return newlc\n\n\ndef lcurve_from_events(\n f,\n safe_interval=0,\n pi_interval=None,\n e_interval=None,\n min_length=0,\n gti_split=False,\n ignore_gtis=False,\n bintime=1.0,\n outdir=None,\n outfile=None,\n noclobber=False,\n deorbit_par=None,\n):\n \"\"\"Bin an event list in a light curve.\n\n Parameters\n ----------\n f : str\n Input event file name\n bintime : float\n The bin time of the output light curve\n\n Returns\n -------\n outfiles : list\n List of output light curves\n\n Other Parameters\n ----------------\n safe_interval : float or [float, float]\n Seconds to filter out at the start and end of each GTI. If single\n float, these safe windows are equal, otherwise the two numbers refer\n to the start and end of the GTI respectively\n pi_interval : [int, int]\n PI channel interval to select. Default None, meaning that all PI\n channels are used\n e_interval : [float, float]\n Energy interval to select (only works if event list is calibrated with\n `calibrate`). Default None\n min_length : float\n GTIs below this length will be filtered out\n gti_split : bool\n If True, create one light curve for each good time interval\n ignore_gtis : bool\n Ignore good time intervals, and get a single light curve that includes\n possible gaps\n outdir : str\n Output directory\n outfile : str\n Output file\n noclobber : bool\n If True, do not overwrite existing files\n\n \"\"\"\n log.info(\"Loading file %s...\" % f)\n evdata = load_events(f)\n log.info(\"Done.\")\n\n deorbit_tag = \"\"\n if deorbit_par is not None:\n evdata = deorbit_events(evdata, deorbit_par)\n deorbit_tag = \"_deorb\"\n\n bintime = np.longdouble(interpret_bintime(bintime))\n\n tag = \"\"\n\n gti = evdata.gti\n tstart = np.min(gti)\n tstop = np.max(gti)\n events = evdata.time\n if hasattr(evdata, \"instr\") and evdata.instr is not None:\n instr = evdata.instr\n else:\n instr = \"unknown\"\n\n if ignore_gtis:\n gti = np.array([[tstart, tstop]])\n evdata.gti = gti\n\n total_lc = evdata.to_lc(100)\n total_lc.instr = instr\n\n # Then, apply filters\n if pi_interval is not None and np.all(np.array(pi_interval) > 0):\n pis = evdata.pi\n good = np.logical_and(pis > pi_interval[0], pis <= pi_interval[1])\n events = events[good]\n tag = \"_PI%g-%g\" % (pi_interval[0], pi_interval[1])\n elif e_interval is not None and np.all(np.array(e_interval) > 0):\n if not hasattr(evdata, \"energy\") or evdata.energy is None:\n raise ValueError(\n \"No energy information is present in the file.\"\n + \" Did you run HENcalibrate?\"\n )\n es = evdata.energy\n good = np.logical_and(es > e_interval[0], es <= e_interval[1])\n events = events[good]\n tag = \"_E%g-%g\" % (e_interval[0], e_interval[1])\n else:\n pass\n\n if tag != \"\":\n save_lcurve(\n total_lc,\n hen_root(f) + \"_std_lc\" + deorbit_tag + HEN_FILE_EXTENSION,\n )\n\n # Assign default value if None\n outfile = assign_value_if_none(outfile, hen_root(f) + tag + deorbit_tag + \"_lc\")\n\n # Take out extension from name, if present, then give extension. This\n # avoids multiple extensions\n outfile = outfile.replace(HEN_FILE_EXTENSION, \"\") + HEN_FILE_EXTENSION\n outdir = assign_value_if_none(outdir, os.path.dirname(os.path.abspath(f)))\n\n _, outfile = os.path.split(outfile)\n mkdir_p(outdir)\n outfile = os.path.join(outdir, outfile)\n\n if noclobber and os.path.exists(outfile):\n warnings.warn(\n \"File exists, and noclobber option used. Skipping\",\n AstropyUserWarning,\n )\n return [outfile]\n\n lc = Lightcurve.make_lightcurve(\n events,\n bintime,\n tstart=tstart,\n tseg=tstop - tstart,\n mjdref=evdata.mjdref,\n gti=gti,\n )\n\n lc.instr = instr\n lc.e_interval = e_interval\n\n lc = filter_lc_gtis(\n lc, safe_interval=safe_interval, delete=False, min_length=min_length\n )\n\n if len(lc.gti) == 0:\n warnings.warn(\"No GTIs above min_length ({0}s) found.\".format(min_length))\n return\n\n lc.header = None\n if hasattr(evdata, \"header\"):\n lc.header = evdata.header\n\n if gti_split:\n lcs = lc.split_by_gti()\n outfiles = []\n\n for ib, l0 in enumerate(lcs):\n local_tag = tag + \"_gti{:03d}\".format(ib)\n outf = hen_root(outfile) + local_tag + \"_lc\" + HEN_FILE_EXTENSION\n if noclobber and os.path.exists(outf):\n warnings.warn(\"File exists, and noclobber option used. Skipping\")\n outfiles.append(outf)\n l0.instr = lc.instr\n l0.header = lc.header\n\n save_lcurve(l0, outf)\n outfiles.append(outf)\n else:\n log.info(\"Saving light curve to %s\" % outfile)\n save_lcurve(lc, outfile)\n outfiles = [outfile]\n\n # For consistency in return value\n return outfiles\n\n\ndef lcurve_from_fits(\n fits_file,\n gtistring=\"GTI\",\n timecolumn=\"TIME\",\n ratecolumn=None,\n ratehdu=1,\n fracexp_limit=0.9,\n outfile=None,\n noclobber=False,\n outdir=None,\n):\n \"\"\"\n Load a lightcurve from a fits file and save it in HENDRICS format.\n\n .. note ::\n FITS light curve handling is still under testing.\n Absolute times might be incorrect depending on the light curve format.\n\n Parameters\n ----------\n fits_file : str\n File name of the input light curve in FITS format\n\n Returns\n -------\n outfile : [str]\n Returned as a list with a single element for consistency with\n `lcurve_from_events`\n\n Other Parameters\n ----------------\n gtistring : str\n Name of the GTI extension in the FITS file\n timecolumn : str\n Name of the column containing times in the FITS file\n ratecolumn : str\n Name of the column containing rates in the FITS file\n ratehdu : str or int\n Name or index of the FITS extension containing the light curve\n fracexp_limit : float\n Minimum exposure fraction allowed\n outfile : str\n Output file name\n noclobber : bool\n If True, do not overwrite existing files\n \"\"\"\n warnings.warn(\n \"\"\"WARNING! FITS light curve handling is still under testing.\n Absolute times might be incorrect.\"\"\"\n )\n # TODO:\n # treat consistently TDB, UTC, TAI, etc. This requires some documentation\n # reading. For now, we assume TDB\n from astropy.io import fits as pf\n from astropy.time import Time\n import numpy as np\n from stingray.gti import create_gti_from_condition\n\n outfile = assign_value_if_none(outfile, hen_root(fits_file) + \"_lc\")\n outfile = outfile.replace(HEN_FILE_EXTENSION, \"\") + HEN_FILE_EXTENSION\n outdir = assign_value_if_none(outdir, os.path.dirname(os.path.abspath(fits_file)))\n\n _, outfile = os.path.split(outfile)\n mkdir_p(outdir)\n outfile = os.path.join(outdir, outfile)\n\n if noclobber and os.path.exists(outfile):\n warnings.warn(\n \"File exists, and noclobber option used. Skipping\",\n AstropyUserWarning,\n )\n return [outfile]\n\n lchdulist = pf.open(fits_file)\n lctable = lchdulist[ratehdu].data\n\n # Units of header keywords\n tunit = lchdulist[ratehdu].header[\"TIMEUNIT\"]\n\n try:\n mjdref = high_precision_keyword_read(lchdulist[ratehdu].header, \"MJDREF\")\n mjdref = Time(mjdref, scale=\"tdb\", format=\"mjd\")\n except Exception:\n mjdref = None\n\n try:\n instr = lchdulist[ratehdu].header[\"INSTRUME\"]\n except Exception:\n instr = \"EXTERN\"\n\n # ----------------------------------------------------------------\n # Trying to comply with all different formats of fits light curves.\n # It's a madness...\n try:\n tstart = high_precision_keyword_read(lchdulist[ratehdu].header, \"TSTART\")\n tstop = high_precision_keyword_read(lchdulist[ratehdu].header, \"TSTOP\")\n except Exception:\n raise (Exception(\"TSTART and TSTOP need to be specified\"))\n\n # For nulccorr lcs this whould work\n\n timezero = high_precision_keyword_read(lchdulist[ratehdu].header, \"TIMEZERO\")\n # Sometimes timezero is \"from tstart\", sometimes it's an absolute time.\n # This tries to detect which case is this, and always consider it\n # referred to tstart\n timezero = assign_value_if_none(timezero, 0)\n\n # for lcurve light curves this should instead work\n if tunit == \"d\":\n # TODO:\n # Check this. For now, I assume TD (JD - 2440000.5).\n # This is likely wrong\n timezero = Time(2440000.5 + timezero, scale=\"tdb\", format=\"jd\")\n tstart = Time(2440000.5 + tstart, scale=\"tdb\", format=\"jd\")\n tstop = Time(2440000.5 + tstop, scale=\"tdb\", format=\"jd\")\n # if None, use NuSTAR defaulf MJDREF\n mjdref = assign_value_if_none(\n mjdref,\n Time(np.longdouble(\"55197.00076601852\"), scale=\"tdb\", format=\"mjd\"),\n )\n\n timezero = (timezero - mjdref).to(\"s\").value\n tstart = (tstart - mjdref).to(\"s\").value\n tstop = (tstop - mjdref).to(\"s\").value\n\n if timezero > tstart:\n timezero -= tstart\n\n time = np.array(lctable.field(timecolumn), dtype=np.longdouble)\n if time[-1] < tstart:\n time += timezero + tstart\n else:\n time += timezero\n\n try:\n dt = high_precision_keyword_read(lchdulist[ratehdu].header, \"TIMEDEL\")\n if tunit == \"d\":\n dt *= 86400\n except Exception:\n warnings.warn(\n \"Assuming that TIMEDEL is the median difference between the\"\n \" light curve times\",\n AstropyUserWarning,\n )\n dt = np.median(np.diff(time))\n\n # ----------------------------------------------------------------\n ratecolumn = assign_value_if_none(\n ratecolumn,\n _look_for_array_in_array([\"RATE\", \"RATE1\", \"COUNTS\"], lctable.names),\n )\n\n rate = np.array(lctable.field(ratecolumn), dtype=float)\n\n try:\n rate_e = np.array(lctable.field(\"ERROR\"), dtype=np.longdouble)\n except Exception:\n rate_e = np.zeros_like(rate)\n\n if \"RATE\" in ratecolumn:\n rate *= dt\n rate_e *= dt\n\n try:\n fracexp = np.array(lctable.field(\"FRACEXP\"), dtype=np.longdouble)\n except Exception:\n fracexp = np.ones_like(rate)\n\n good_intervals = (rate == rate) * (fracexp >= fracexp_limit) * (fracexp <= 1)\n\n rate[good_intervals] /= fracexp[good_intervals]\n rate_e[good_intervals] /= fracexp[good_intervals]\n\n rate[np.logical_not(good_intervals)] = 0\n\n try:\n gtitable = lchdulist[gtistring].data\n gti_list = np.array(\n [[a, b] for a, b in zip(gtitable.field(\"START\"), gtitable.field(\"STOP\"))],\n dtype=np.longdouble,\n )\n except Exception:\n gti_list = create_gti_from_condition(time, good_intervals)\n\n lchdulist.close()\n\n lc = Lightcurve(\n time=time,\n counts=rate,\n err=rate_e,\n gti=gti_list,\n mjdref=mjdref.mjd,\n dt=dt,\n )\n\n lc.instr = instr\n lc.header = lchdulist[ratehdu].header.tostring()\n\n log.info(\"Saving light curve to %s\" % outfile)\n save_lcurve(lc, outfile)\n return [outfile]\n\n\ndef lcurve_from_txt(\n txt_file,\n outfile=None,\n noclobber=False,\n outdir=None,\n mjdref=None,\n gti=None,\n):\n \"\"\"\n Load a lightcurve from a text file.\n\n Parameters\n ----------\n txt_file : str\n File name of the input light curve in text format. Assumes two columns:\n time, counts. Times are seconds from MJDREF 55197.00076601852 (NuSTAR)\n if not otherwise specified.\n\n Returns\n -------\n outfile : [str]\n Returned as a list with a single element for consistency with\n `lcurve_from_events`\n\n Other Parameters\n ----------------\n outfile : str\n Output file name\n noclobber : bool\n If True, do not overwrite existing files\n mjdref : float, default 55197.00076601852\n the MJD time reference\n gti : [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]\n Good Time Intervals\n \"\"\"\n import numpy as np\n\n if mjdref is None:\n mjdref = np.longdouble(\"55197.00076601852\")\n\n outfile = assign_value_if_none(outfile, hen_root(txt_file) + \"_lc\")\n outfile = outfile.replace(HEN_FILE_EXTENSION, \"\") + HEN_FILE_EXTENSION\n\n outdir = assign_value_if_none(outdir, os.path.dirname(os.path.abspath(txt_file)))\n\n _, outfile = os.path.split(outfile)\n mkdir_p(outdir)\n outfile = os.path.join(outdir, outfile)\n\n if noclobber and os.path.exists(outfile):\n warnings.warn(\n \"File exists, and noclobber option used. Skipping\",\n AstropyUserWarning,\n )\n return [outfile]\n\n time, counts = np.genfromtxt(txt_file, delimiter=\" \", unpack=True)\n time = np.array(time, dtype=np.longdouble)\n counts = np.array(counts, dtype=float)\n\n lc = Lightcurve(time=time, counts=counts, gti=gti, mjdref=mjdref)\n\n lc.instr = \"EXTERN\"\n\n log.info(\"Saving light curve to %s\" % outfile)\n save_lcurve(lc, outfile)\n return [outfile]\n\n\ndef _baseline_lightcurves(lcurves, outroot, p, lam):\n outroot_save = outroot\n for i, f in enumerate(lcurves):\n if outroot is None:\n outroot = hen_root(f) + \"_lc_baseline\"\n else:\n outroot = outroot_save + \"_{}\".format(i)\n ftype, lc = get_file_type(f)\n baseline = lc.baseline(p, lam)\n lc.base = baseline\n save_lcurve(lc, outroot + HEN_FILE_EXTENSION)\n\n\ndef _wrap_lc(args):\n f, kwargs = args\n try:\n return lcurve_from_events(f, **kwargs)\n except Exception as e:\n warnings.warn(\"HENlcurve exception: {0}\".format(str(e)))\n raise\n\n\ndef _wrap_txt(args):\n f, kwargs = args\n try:\n return lcurve_from_txt(f, **kwargs)\n except Exception as e:\n warnings.warn(\"HENlcurve exception: {0}\".format(str(e)))\n return []\n\n\ndef _wrap_fits(args):\n f, kwargs = args\n try:\n return lcurve_from_fits(f, **kwargs)\n except Exception as e:\n warnings.warn(\"HENlcurve exception: {0}\".format(str(e)))\n return []\n\n\ndef _execute_lcurve(args):\n from multiprocessing import Pool\n\n bintime = args.bintime\n\n safe_interval = args.safe_interval\n e_interval, pi_interval = args.energy_interval, args.pi_interval\n if args.pi_interval is not None:\n pi_interval = np.array(args.pi_interval)\n if e_interval is not None:\n args.e_interval = np.array(args.energy_interval)\n\n # ------ Use functools.partial to wrap lcurve* with relevant keywords---\n if args.fits_input:\n wrap_fun = _wrap_fits\n argdict = {\"noclobber\": args.noclobber}\n elif args.txt_input:\n wrap_fun = _wrap_txt\n argdict = {\"noclobber\": args.noclobber}\n else:\n wrap_fun = _wrap_lc\n argdict = {\n \"noclobber\": args.noclobber,\n \"safe_interval\": safe_interval,\n \"pi_interval\": pi_interval,\n \"e_interval\": e_interval,\n \"min_length\": args.minlen,\n \"gti_split\": args.gti_split,\n \"ignore_gtis\": args.ignore_gtis,\n \"bintime\": bintime,\n \"outdir\": args.outdir,\n \"deorbit_par\": args.deorbit_par,\n }\n\n arglist = [[f, argdict.copy()] for f in args.files]\n na = len(arglist)\n outfile = args.outfile\n if outfile is not None:\n outname = os.path.splitext(outfile)[0]\n for i in range(na):\n if na > 1:\n outname = outfile + \"_{0}\".format(i)\n arglist[i][1][\"outfile\"] = outname\n\n # -------------------------------------------------------------------------\n outfiles = []\n\n if os.name == \"nt\" or args.nproc == 1:\n for a in arglist:\n outfiles.append(wrap_fun(a))\n else:\n pool = Pool(processes=args.nproc)\n for i in pool.imap_unordered(wrap_fun, arglist):\n outfiles.append(i)\n pool.close()\n\n log.debug(f\"{outfiles}\")\n\n if args.scrunch:\n scrunch_lightcurves(outfiles)\n\n if args.join:\n join_lightcurves(outfiles)\n\n\ndef main(args=None):\n \"\"\"Main function called by the `HENlcurve` command line script.\"\"\"\n import argparse\n from .base import _add_default_args, check_negative_numbers_in_args\n\n description = (\n \"Create lightcurves starting from event files. It is \"\n \"possible to specify energy or channel filtering options\"\n )\n parser = argparse.ArgumentParser(description=description)\n\n parser.add_argument(\"files\", help=\"List of files\", nargs=\"+\")\n\n parser.add_argument(\n \"-b\",\n \"--bintime\",\n type=float,\n default=1,\n help=\"Bin time; if negative, negative power of 2\",\n )\n parser.add_argument(\n \"--safe-interval\",\n nargs=2,\n type=float,\n default=[0, 0],\n help=\"Interval at start and stop of GTIs used\" + \" for filtering\",\n )\n parser = _add_default_args(parser, [\"energies\", \"pi\"])\n parser.add_argument(\n \"-s\",\n \"--scrunch\",\n help=\"Create scrunched light curve (single channel)\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"-j\",\n \"--join\",\n help=\"Create joint light curve (multiple channels)\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"-g\",\n \"--gti-split\",\n help=\"Split light curve by GTI\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"--minlen\",\n help=\"Minimum length of acceptable GTIs (default:4)\",\n default=4,\n type=float,\n )\n parser.add_argument(\n \"--ignore-gtis\",\n help=\"Ignore GTIs\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"-d\", \"--outdir\", type=str, default=None, help=\"Output directory\"\n )\n parser.add_argument(\n \"--noclobber\",\n help=\"Do not overwrite existing files\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"--fits-input\",\n help=\"Input files are light curves in FITS format\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"--txt-input\",\n help=\"Input files are light curves in txt format\",\n default=False,\n action=\"store_true\",\n )\n parser = _add_default_args(\n parser, [\"deorbit\", \"output\", \"loglevel\", \"debug\", \"nproc\"]\n )\n\n args = check_negative_numbers_in_args(args)\n args = parser.parse_args(args)\n if args.debug:\n args.loglevel = \"DEBUG\"\n log.setLevel(args.loglevel)\n\n with log.log_to_file(\"HENlcurve.log\"):\n _execute_lcurve(args)\n\n\ndef scrunch_main(args=None):\n \"\"\"Main function called by the `HENscrunchlc` command line script.\"\"\"\n import argparse\n\n description = \"Sum lightcurves from different instruments or energy ranges\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\"files\", help=\"List of files\", nargs=\"+\")\n parser.add_argument(\n \"-o\",\n \"--out\",\n type=str,\n default=\"out_scrlc\" + HEN_FILE_EXTENSION,\n help=\"Output file\",\n )\n parser.add_argument(\n \"--loglevel\",\n help=(\n \"use given logging level (one between INFO, \"\n \"WARNING, ERROR, CRITICAL, DEBUG; \"\n \"default:WARNING)\"\n ),\n default=\"WARNING\",\n type=str,\n )\n parser.add_argument(\n \"--debug\",\n help=\"use DEBUG logging level\",\n default=False,\n action=\"store_true\",\n )\n\n args = parser.parse_args(args)\n files = args.files\n\n if args.debug:\n args.loglevel = \"DEBUG\"\n\n log.setLevel(args.loglevel)\n with log.log_to_file(\"HENscrunchlc.log\"):\n scrunch_lightcurves(files, args.out)\n\n\ndef baseline_main(args=None):\n \"\"\"Main function called by the `HENbaselinesub` command line script.\"\"\"\n import argparse\n\n description = (\n \"Subtract a baseline from the lightcurve using the Asymmetric Least \"\n \"Squares algorithm. The two parameters p and lambda control the \"\n \"asymmetry and smoothness of the baseline. See below for details.\"\n )\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument(\"files\", help=\"List of files\", nargs=\"+\")\n parser.add_argument(\"-o\", \"--out\", type=str, default=None, help=\"Output file\")\n parser.add_argument(\n \"--loglevel\",\n help=(\n \"use given logging level (one between INFO, \"\n \"WARNING, ERROR, CRITICAL, DEBUG; \"\n \"default:WARNING)\"\n ),\n default=\"WARNING\",\n type=str,\n )\n parser.add_argument(\n \"--debug\",\n help=\"use DEBUG logging level\",\n default=False,\n action=\"store_true\",\n )\n parser.add_argument(\n \"-p\",\n \"--asymmetry\",\n type=float,\n help='\"asymmetry\" parameter. Smaller values make the '\n 'baseline more \"horizontal\". Typically '\n \"0.001 < p < 0.1, but not necessarily.\",\n default=0.01,\n )\n parser.add_argument(\n \"-l\",\n \"--lam\",\n type=float,\n help='lambda, or \"smoothness\", parameter. Larger'\n \" values make the baseline stiffer. Typically \"\n \"1e2 < lam < 1e9\",\n default=1e5,\n )\n\n args = parser.parse_args(args)\n files = args.files\n\n if args.debug:\n args.loglevel = \"DEBUG\"\n\n log.setLevel(args.loglevel)\n with log.log_to_file(\"HENbaseline.log\"):\n _baseline_lightcurves(files, args.out, args.asymmetry, args.lam)\n","sub_path":"hendrics/lcurve.py","file_name":"lcurve.py","file_ext":"py","file_size_in_byte":29746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167167997","text":"import pyodbc\nimport json\n\ncnxn = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\n \"Server=ME360-SQL;\"\n \"Database=ME360;\"\n \"uid=Tableau;pwd=sqlr3p0rts;\")\n\ncursor = cnxn.cursor()\nquery = 'SELECT * FROM Task'\nresult = cursor.execute(query)\n\nitems = [dict(zip([key[0] for key in cursor.description],row)) for row in result]\n\n\ncnxn.close()\n\nprint(json.dumps({'items': items}, default=str))","sub_path":"BigQueryDataLoads/connection_pyodbc.py","file_name":"connection_pyodbc.py","file_ext":"py","file_size_in_byte":475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"119815768","text":"import numpy as np\nimport pandas as pd\nimport toyplot as tp\nimport math\nimport matplotlib.pyplot as plt\nimport matplotlib.ticker as ticker\nfrom matplotlib import gridspec\nfrom matplotlib.lines import Line2D\nfrom .util import *\nimport seaborn as sns\nsns.set_style('whitegrid')\n\ndef init_plotting():\n sns.set_style('whitegrid')\n plt.rcParams['figure.figsize'] = (12, 8)\n plt.rcParams['font.size'] = 13\n plt.rcParams['font.family'] = 'OfficinaSanITCBoo'\n plt.rcParams['axes.labelsize'] = 1.1*plt.rcParams['font.size']\n plt.rcParams['axes.titlesize'] = 1.1*plt.rcParams['font.size']\n plt.rcParams['legend.fontsize'] = plt.rcParams['font.size']\n plt.rcParams['xtick.labelsize'] = plt.rcParams['font.size']\n plt.rcParams['ytick.labelsize'] = plt.rcParams['font.size']\n\ndef compare(res,obs,freq='D'):\n # input two series and a frequency\n init_plotting()\n res = res.resample(freq).mean()\n obs = obs.resample(freq).mean()\n\n fig = plt.figure(figsize=(12, 3)) \n gs = gridspec.GridSpec(1, 2, width_ratios=[3, 1]) \n\n ax0 = plt.subplot(gs[0])\n res.plot(ax=ax0, color='indianred')\n obs.plot(ax=ax0, color='k')\n ax0.set_title('%s, %s' % (res.name, obs.name), family='OfficinaSanITCMedium', loc='left')\n ax0.legend(['Simulated', 'Observed'], ncol=3)\n\n ax1 = plt.subplot(gs[1])\n r = np.corrcoef(obs.values,res.values)[0,1]\n ax1.scatter(obs.values, res.values, s=3, c='steelblue', edgecolor='none', alpha=0.7)\n ax1.set_ylabel('Simulated')\n ax1.set_xlabel('Observed')\n ax1.annotate('$R^2 = %f$' % r**2, xy=(0,0), color='0.3')\n ax1.set_xlim([0.0, ax1.get_xlim()[1]])\n ax1.set_ylim([0.0, ax1.get_ylim()[1]])\n\n plt.tight_layout()\n plt.show()\n\ndef stack(data, district_name):\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n x = np.arange(len(data[0]))\n #ax1.fill_between(x,data[0],data[1], color = 'xkcd:cornflower')\n #ax1.fill_between(x,data[1],data[2], color = 'xkcd:denim blue')\n ax1.fill_between(x,data[2],data[3], color = 'xkcd:light rose')\n ax1.fill_between(x,data[3],data[4], color = 'xkcd:neon red')\n ax1.fill_between(x,data[4],data[5], color = 'xkcd:olive')\n ax1.fill_between(x,data[5],data[6], color = 'xkcd:turquoise')\n ax1.fill_between(x,data[6],data[7], color = 'xkcd:lime')\n ax1.fill_between(x,data[7],data[8], color = 'xkcd:cream')\n ax1.fill_between(x,data[8],data[9], color = 'xkcd:bright yellow')\n ax1.fill_between(x,data[9],data[10], color = 'xkcd:golden yellow')\n ax1.set_ylabel('District Deliveries (tAF) District Supplies (tAF) ')\n ax1.set_xlabel('Days from 10/1/1996')\n ax1.set_xlim([0.0, ax1.get_xlim()[1]])\n sq1 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:cornflower')\n sq2 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:denim blue')\n sq3 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:light rose')\n sq4 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:neon red')\n sq5 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:olive')\n sq6 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:turquoise')\n sq7 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:lime')\n sq8 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:cream')\n sq9 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:bright yellow')\n sq10 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'xkcd:golden yellow')\n\n plt.legend((sq10, sq9, sq8, sq7, sq6, sq5, sq4, sq3), (\"Paper Trade Balance\", \"Carryover Surface Storage\", \"Annual Contract Allocation\", \"District SW Deliveries\", \"Deliveries from In-Leiu Partner\", \"Deliveries from District Banked Storage\", \"Private Pumping\", \"Pumping to In-Leiu Partner\"), ncol = 3, mode = 'expand', bbox_to_anchor = (0.0, 1.025, 1.0, .4), loc = 3, borderaxespad = 0.0)\n plt.title(district_name + \" Water Accounting\")\n plt.show()\n\ndef waterbank_storage(data,member_list,bank_name):\n colorlist = ['xkcd:purple', 'xkcd:green', 'xkcd:blue', 'xkcd:pink', 'xkcd:brown', 'xkcd:red', 'xkcd:light blue', 'xkcd:teal', 'xkcd:orange', 'xkcd:light green', 'xkcd:magenta', 'xkcd:yellow', 'xkcd:sky blue', 'xkcd:grey', 'xkcd:lime green', 'xkcd:light purple', 'xkcd:barbie pink', 'xkcd:turquoise', 'xkcd:lavender', 'xkcd:tan', 'xkcd:cyan', 'xkcd:aqua', 'xkcd:forest green', 'xkcd:mauve', 'xkcd:dark purple', 'xkcd:bright green', 'xkcd:maroon', 'xkcd:olive']\n\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n i = 0\n legend_list = []\n for member in member_list:\n thismember = member\n x = np.arange(len(data[thismember]))\n\n if i > 0:\n ax1.fill_between(x,data[lastmember],data[thismember], color = colorlist[i])\n sq1 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = colorlist[i])\n legend_list.append(sq1)\n else:\n lastmember_data = np.zeros(len(data[thismember]))\n ax1.fill_between(x,lastmember_data,data[thismember], color = colorlist[i])\n sq1 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = colorlist[i])\n legend_list.append(sq1)\n\n\n lastmember = thismember\n i += 1\n\n plt.legend(legend_list, member_list,ncol = math.ceil(len(member_list)/3), bbox_to_anchor = (0.0, 1.025, 1.0, .4), loc = 3, borderaxespad = 0.0)\n plt.title(bank_name + \" bank\")\n plt.show()\n \ndef exposure(data,var_ind,num_ind):\n colors = (\"yellow\", \"green\", \"blue\", \"red\")\n groups = (\"Total Deliveries, In-District + Banking Partners\", \"In-District Deliveries, Banked + Contract + In-leiu\", \"In-District Deliveries, Banked + Contract\", \"In-District Deliveries, Contract\")\n fig = plt.figure()\n ax1 = fig.add_subplot(1,1,1, axisbg = \"1.0\")\n i = num_ind\n for color, group in zip(colors, groups):\n x = data[var_ind]\n y = data[i]\n ax1.scatter(x,y, alpha = 0.8, c = color, edgecolors = 'none', s = 30, label = group)\n i = i + 1\n ax1.set_ylabel('Annual District Sales (tAF)')\n ax1.set_xlabel('Annual SWP Pumping (tAF) minus Pumping at Edmonston')\n ax1.set_xlim([0.0, ax1.get_xlim()[1]])\n ax1.set_ylim([0.0, ax1.get_ylim()[1]])\n plt.legend(loc = 2, fancybox = True, shadow = True)\n plt.show()\n\ndef financial(data,var_ind,num_ind):\n colors = (\"yellow\", \"green\", \"blue\", \"red\")\n groups = (\"Banking Recovery\", \"Banking Recharge\", \"In-District Deliveries, Variable\", \"In-District Deliveries, Contract\")\n fig = plt.figure()\n ax1 = fig.add_subplot(1,1,1, axisbg = \"1.0\")\n i = num_ind\n y = np.zeros((5,len(data[var_ind])))\n costs = np.zeros(4)\n costs[0] = .08633 - .01\n costs[1] = .01233 + .044\n costs[2] = .118\n costs[3] = .056\n i = num_ind + len(costs) - 1\n x = data[var_ind]\n index_values = np.argsort(x)\n for color, group in zip(colors, groups):\n for spot in range(0,len(data[var_ind])):\n ordered_index = index_values[spot]\n if i == num_ind + len(costs) - 1:\n data_prev = 0.0\n value_prev = 0.0\n else:\n data_prev = data[i+1][ordered_index]\n value_prev = y[i+1-num_ind][spot]\n y[i-num_ind][spot] += (data[i][ordered_index] - data_prev)*costs[i-num_ind] + value_prev\n print(i, end = \" \")\n print(ordered_index, end = \" \")\n print(value_prev, end = \" \")\n print(data_prev, end = \" \")\n print(data[i][ordered_index], end = \" \")\n print(x[ordered_index])\n y[num_ind + 1][spot] = x[ordered_index]\n \t \n i = i - 1\n print(y)\n print(data[num_ind])\n print(data[num_ind+1])\n print(data[num_ind+2])\n print(data[num_ind+3])\n\n ax1.fill_between(y[4],np.zeros(len(y[0])), y[3], color = \"yellow\")\n ax1.fill_between(y[4],y[3],y[2], color = 'green')\n ax1.fill_between(y[4],y[2],y[1], color = 'blue')\n ax1.fill_between(y[4],y[1],y[0], color = 'red')\n sq1 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'yellow')\n sq2 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'green')\n sq3 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'blue')\n sq4 = Line2D([0], [0], linestyle = \"none\", marker = \"s\", alpha = 0.4, markersize = 10, markerfacecolor = 'red')\n plt.legend((sq4, sq3, sq2, sq1), groups, ncol = 2, mode = 'expand', bbox_to_anchor = (0.0, 1.025, 1.0, .4), loc = 3, borderaxespad = 0.0)\n ax1.set_ylabel('Annual Revenue ($MM)')\n ax1.set_xlabel('Pumping difference between Banks and Edmonston, minus Article 21 Flows (tAF)')\n plt.show()\n\n fig2, ax2 = plt.subplots()\n ax2.xaxis.set_major_locator(ticker.MaxNLocator(integer = True))\n plt.plot(range(1997,2017),y[0])\n plt.ylabel('Total Revenue ($MM)')\n ax2.axis('tight')\n fig2.tight_layout()\n plt.xlabel('WY (OCT-SEPT)')\n\n #ax1.scatter(x,y, alpha = 0.8, c = 'red', edgecolors = 'black', s = 30)\n #ax1.set_ylabel('Annual District Sales ($)')\n #ax1.set_xlabel('Annual SWP Pumping minus Edmonston Pumping (tAF)')\n #ax1.set_xlim([0.0, ax1.get_xlim()[1]])\n #ax1.set_ylim([0.0, ax1.get_ylim()[1]])\n #plt.legend(loc = 2, fancybox = True, shadow = True)\n #plt.show()\n","sub_path":"cord/plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"330736956","text":"from flask import Blueprint, abort, request, jsonify\nfrom flask_restful import Resource, Api, reqparse\n\nimport requests\n\nAPI_PATH_G3 = 'https://charette9.ing.puc.cl/api'\n\n\nclass Message(Resource):\n API_PATH_M_G3 = API_PATH_G3 + '{}'.format('/posts/{}')\n\n def get(self, id_):\n token = request.args.get('access_token','')\n resp = requests.get(self.API_PATH_M_G3.format(id_), headers={'Authorization': 'Bearer ' + token})\n if resp.status_code == 200:\n resp = resp.json()\n # La API de ellos no devuelve ni el postId ni el personId\n resp['description'] = resp['content']\n resp['id'] = resp['post_id']\n resp['postId'] = id_\n resp['personId'] = resp['user_id']\n return jsonify(resp)\n else:\n abort(resp.status_code)\n\n def delete(self, id_):\n token = request.args.get('access_token','')\n resp = requests.delete(self.API_PATH_M_G3.format(id_), headers={'Authorization': 'Bearer ' + token})\n if resp.status_code == 200:\n return jsonify(resp.json())\n else:\n abort(resp.status_code)\n\nclass MessagesResponsesCollection(Resource):\n API_PATH_MRC_G3 = API_PATH_G3 + '{}'.format('/posts/{}/answers')\n\n def __init__(self):\n self.reqparse= reqparse.RequestParser()\n self.reqparse.add_argument(\n 'description',\n required=True,\n help= 'No description provided',\n location=['form', 'json',]\n )\n super().__init__()\n\n def get(self, id_):\n token = request.args.get('access_token','')\n resp = requests.get(self.API_PATH_MRC_G3.format(id_), headers={'Authorization': 'Bearer ' + token})\n if resp.status_code == 200:\n messages = []\n for message in resp.json():\n message['description'] = message['content']\n message['id'] = message['answer_id']\n message['messageId'] = id_\n message['personId'] = message['user_id']\n messages.append(message)\n '''\n item = requests.get(self.API_PATH_MRC_G3+'/{}'.format(id_, message['answer_id']),\n headers={'Authorization': 'Bearer ' + token})\n if item.status_code == 200:\n item = item.json()\n item = item[0]\n item['']\n item['content'] = item['content']\n item['reply_id'] = item['answer_id']\n item['author_id'] = item['user_id']\n item['author_name'] = \"\"\n item['published_at'] = item['pub_date']\n messages.append(item)\n else:\n abort(item.status_code)\n '''\n return jsonify(messages)\n else:\n abort(resp.status_code)\n\n\n\n def post(self, id_):\n args = self.reqparse.parse_args()\n # Nosotros solo enviamos la description en los args\n # La API G3 tambien pide el user_id y el post_identifier\n argsG3 = args.copy()\n argsG3['content'] = argsG3['description']\n token = request.args.get('access_token','')\n user = requests.get(API_PATH_G3 + '/user', headers={'Authorization': 'Bearer ' + token})\n argsG3['user_id'] = user.json()['id']\n argsG3['post_identifier'] = id_\n resp = requests.post(self.API_PATH_MRC_G3.format(id_), data=argsG3, headers={'Authorization': 'Bearer ' + token})\n if resp.status_code == 200:\n message = resp.json()\n message['description'] = argsG3['content']\n message['id'] = message['answer_id']\n message['messageId'] = id_\n message['personId'] = user.json()['id']\n return jsonify(message)\n else:\n abort(resp.status_code)\n\ng3_messages_api = Blueprint('resources_g3.messages', __name__)\n\napi = Api(g3_messages_api)\napi.add_resource(Message, '/messages/')\napi.add_resource(MessagesResponsesCollection, '/messages//responses')\n","sub_path":"grupo5_backend/resources_g3/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"626506970","text":"\"\"\"\nglobals.py\n\nHolds the globals.\n\"\"\"\n\n# colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nBLUE = (0, 0, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nPURPLE = (255, 0, 255)\n\n# images\nTILES = ['imgs/grassTile.png', 'imgs/waterTile.png', 'imgs/dirtTile.png', 'imgs/stoneTile.png',\n 'imgs/sandTile.png', 'imgs/bridgeVerTile.png', 'imgs/bridgeHorTile.png', 'imgs/stepsTile.png',\n 'imgs/treeTile.png', 'imgs/rockTile.png']\nTILE_SIZE = 20\n\n# screen size\n# width first, height second\nWIDTH = 800\nHEIGHT = 600\nSIZE = [WIDTH, HEIGHT]\nORIGIN = [0, 0]\nMAX_TILES_WIDTH = WIDTH // TILE_SIZE\nMAX_TILES_HEIGHT = HEIGHT // TILE_SIZE\n\n# player\nW = 20\nH = 20\nPLAYER_SIZE = [W, H]\nPLAYER_START = [10, 10]\nPLAYER_SCREEN_START = 1\nVEL = 5\nN_VEL = -VEL\n\n# tile names/enums\nGRASS = 0\nWATER = 1\nDIRT = 2\nSTONE = 3\nSAND = 4\nBRIDGE_VER = 5\nBRIDGE_HOR = 6\nSTEPS = 7\nTREE = 8\nROCK = 9","sub_path":"globals.py","file_name":"globals.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"449362362","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 27 16:32:13 2020\r\n\r\n@author: m58527\r\n\"\"\"\r\n\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jul 27 10:10:56 2020\r\n\r\n@author: m58527\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom functools import partial\r\nimport numpy as np\r\nfrom addMenuBar import CreateMenuBar\r\n\r\n\r\nclass DiceRoller:\r\n\r\n def __init__(self,root):\r\n \r\n self.root=root\r\n \r\n # Title of the window\r\n self.root.title(\"DICE ROLLER\")\r\n # Set icon\r\n self.root.iconbitmap('button icons/icon.ico')\r\n self.root.geometry('800x400')\r\n \r\n #%% MENU BAR \r\n CreateMenuBar(self,root)\r\n \r\n #%% CREATE FRAMES\r\n borderColor = 'tan'\r\n \r\n self.headerFrame = Frame(root)\r\n self.headerFrame.pack(side=TOP,fill=X)\r\n self.headerFrame.config(relief=RIDGE,bg=borderColor,height=30) \r\n \r\n self.leftFrame = Frame(root)\r\n self.leftFrame.pack(side=LEFT,fill=Y)\r\n self.leftFrame.config(relief=RIDGE,bg=borderColor,width=30) \r\n \r\n self.rightFrame = Frame(root)\r\n self.rightFrame.pack(side=RIGHT,fill=Y)\r\n self.rightFrame.config(relief=RIDGE,bg=borderColor,width=30) \r\n \r\n self.topFrame = Frame(root)\r\n self.topFrame.config(relief=RIDGE,\r\n height=300,\r\n width=300)\r\n self.topFrame.pack(fill=X)\r\n \r\n self.bottomFrame = Frame(root)\r\n self.bottomFrame.pack()\r\n self.bottomFrame.config(relief=FLAT,\r\n height=500,\r\n width=350) \r\n\r\n self.settingsFrame = Frame(root)\r\n self.settingsFrame.pack(fill=BOTH)\r\n self.settingsFrame.config(relief=RIDGE,bd=4,bg='gray')\r\n \r\n self.copyrightFrame = Frame(root)\r\n self.copyrightFrame.pack(fill=BOTH,side=BOTTOM)\r\n self.copyrightFrame.config(relief=RIDGE,bg=borderColor,height=30)\r\n\r\n \r\n \r\n #%% CREATE THE WINDOW LABEL\r\n #FRAME FOR LABELS \r\n self.label = ttk.Label(self.topFrame, text = 'Welcome to the dice generator!')\r\n self.label.pack()\r\n \r\n #%% CREATE DICE BUTTONS\r\n #DEFINE VARIABLES FOR DICE BUTTONS\r\n #COL\r\n self.descCol = 0\r\n self.setZeroCol = self.descCol+1\r\n self.minusCol = self.setZeroCol+1\r\n self.plusCol = self.minusCol+1\r\n self.entryCol = self.plusCol+1\r\n self.rollCol = self.entryCol+1\r\n self.answerCol = self.rollCol+1\r\n self.listResultCol = self.answerCol+1\r\n \r\n #ROW\r\n self.rollRowStart = 2\r\n\r\n \r\n #FRAMES\r\n self.rollButtonFrame = self.bottomFrame\r\n\r\n #BUTTON AND LABEL VARIABLES\r\n self.entryWidth = 6\r\n\r\n#CREATE BUTTONS, LABELS, AND DESCRIPTIONS\r\n self.diceEntries = []\r\n self.diceButtons = []\r\n self.diceLabels = []\r\n self.diceDescs = []\r\n self.listResults = []\r\n \r\n #DICE\r\n self.diceNames = ['D4','D6','D8','D10','D12','D20','D100']\r\n #pictures of dice\r\n self.dicePics = ['button icons/d4pic.png',\r\n 'button icons/d6pic.png',\r\n 'button icons/d8pic.png',\r\n 'button icons/d10pic.png',\r\n 'button icons/d12pic.png',\r\n 'button icons/d20pic.png',\r\n 'button icons/d100pic.png']\r\n \r\n for i in range(len(self.diceNames)):\r\n \r\n #CREATE TEXT FOR DESC\r\n self.buttonText = 'Roll '+self.diceNames[i]\r\n self.descText = 'Number of '+self.diceNames[i]+' dice: '\r\n \r\n #GET ROW\r\n self.currentRow = self.rollRowStart+i\r\n \r\n #CREATE ENTRY BOX\r\n self.diceEntry = ttk.Entry(self.rollButtonFrame, \r\n width=self.entryWidth)\r\n self.diceEntry.grid(row=self.currentRow,\r\n column=self.entryCol)\r\n \r\n \r\n #CREATE LABELS FOR ANSWERS\r\n self.diceLabel = ttk.Label(self.rollButtonFrame, \r\n text = '-')\r\n self.diceLabel.grid(row=self.currentRow,\r\n column=self.answerCol)\r\n \r\n self.listResultLabel = ttk.Label(self.rollButtonFrame,\r\n text='[]:')\r\n self.listResultLabel.grid(row=self.currentRow,\r\n column=self.listResultCol)\r\n \r\n #CREATE LABELS FOR DESCRIPTIONS\r\n self.diceDesc = ttk.Label(self.rollButtonFrame, \r\n text = self.descText,\r\n width = 22)\r\n self.diceDesc.grid(row=self.currentRow,\r\n column=self.descCol)\r\n \r\n #CREATE BUTTONS\r\n #dice buttons\r\n self.diceButton = ttk.Button(self.rollButtonFrame, \r\n text = self.buttonText,\r\n command = partial(self.rollDX,i))\r\n self.diceButton.grid(row=self.currentRow,\r\n column=self.rollCol)\r\n \r\n #zero, plus, minus buttons\r\n self.zeroButton = ttk.Button(self.rollButtonFrame, \r\n text = 'zero',\r\n width=5,\r\n command=partial(self.setZero,i))\r\n self.zeroButton.grid(row=self.currentRow,\r\n column=self.setZeroCol)\r\n self.minusButton = ttk.Button(self.rollButtonFrame, \r\n text = '-',\r\n width=3,\r\n command=partial(self.minusOne,i))\r\n self.minusButton.grid(row=self.currentRow,\r\n column=self.minusCol)\r\n self.plusButton = ttk.Button(self.rollButtonFrame, \r\n text = '+',\r\n width=3,\r\n command=partial(self.plusOne,i))\r\n self.plusButton.grid(row=self.currentRow,\r\n column=self.plusCol)\r\n \r\n #ADD THINGS TO LISTS\r\n self.diceEntries.append(self.diceEntry)\r\n self.diceButtons.append(self.diceButton)\r\n self.diceLabels.append(self.diceLabel)\r\n self.diceDescs.append(self.diceDesc)\r\n self.listResults.append(self.listResultLabel)\r\n \r\n #OTHER BUTTONS AND STUFF\r\n #TOTAL\r\n self.totalRow = len(self.diceNames)+2\r\n \r\n self.TotalButton = ttk.Button(self.rollButtonFrame, text = ' Roll All ',\r\n command = self.rollAll) \r\n self.TotalButton.grid(row=self.totalRow, column = self.rollCol-1, columnspan=2,)\r\n \r\n self.TotalLabel = ttk.Label(self.rollButtonFrame, text = '-')\r\n self.TotalLabel.grid(row=self.totalRow, column = self.answerCol)\r\n \r\n #RESET\r\n self.resetCol = self.descCol\r\n self.resetRow = self.totalRow\r\n \r\n self.ResetButton = ttk.Button(self.rollButtonFrame, text = 'RESET FIELDS',\r\n command = self.resetFields) \r\n self.ResetButton.grid(row=self.resetRow, column = self.resetCol)\r\n \r\n #RANDOM\r\n self.randCol = self.resetCol\r\n self.randRow = self.resetRow+1\r\n \r\n self.RandButton = ttk.Button(self.rollButtonFrame, text = 'Random Numbers',\r\n command = self.fillRandom) \r\n self.RandButton.grid(row=self.randRow, column = self.randCol) \r\n\r\n self.randRow2 = self.randRow+1\r\n \r\n self.RandButton2 = ttk.Button(self.rollButtonFrame, text = 'Random Numbers Between:',\r\n command = self.fillRandomBetween) \r\n self.RandButton2.grid(row=self.randRow2, column = self.randCol) \r\n \r\n #entries for random limits\r\n self.lowerRanLimEntry = ttk.Entry(self.rollButtonFrame, \r\n width=self.entryWidth)\r\n self.lowerRanLimEntry.grid(row=self.randRow2,\r\n column=self.randCol+1)\r\n \r\n self.upperRanLimEntry = ttk.Entry(self.rollButtonFrame, \r\n width=self.entryWidth)\r\n self.upperRanLimEntry.grid(row=self.randRow2,\r\n column=self.randCol+2)\r\n \r\n #checkboxes\r\n self.ageVar = IntVar()\r\n self.ageCheckbox = Checkbutton(self.settingsFrame,text='I am 18 years or older',variable=self.ageVar)\r\n self.ageCheckbox.pack()\r\n \r\n #%% DICE ROLLER METHODS \r\n \r\n def rollDX(self,i,output=None):\r\n \r\n self.currentDiceName = self.diceNames[i]\r\n self.diceLabel = self.diceLabels[i]\r\n self.diceEntry = self.diceEntries[i]\r\n self.listResultLabel = self.listResults[i]\r\n \r\n self.ageResult = self.ageVar.get()\r\n \r\n #max roll of dice\r\n max_sides=int(self.currentDiceName.strip('D'))\r\n #get number of dice from input\r\n numDice = self.diceEntry.get()\r\n #check if dice number is blank\r\n if numDice=='':\r\n numDice=0\r\n \r\n if self.ageResult == 1:\r\n \r\n #update label saying you have rolled dice \r\n rollText = 'You left it blank you idiot'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'red',\r\n background = 'black',\r\n font = ('Kristen ITC',24))\r\n \r\n dicePhoto = PhotoImage(file = 'button icons/dwight.png')\r\n dicePhoto = dicePhoto.subsample(5,5)\r\n self.label.img = dicePhoto\r\n self.label.config(image=dicePhoto,\r\n compound = 'left')\r\n \r\n else: \r\n #update label saying you have rolled dice \r\n rollText = 'Please fill the blank.'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'pink',\r\n font = ('Kristen ITC',24))\r\n \r\n dicePhoto = PhotoImage(file = 'button icons/dwight2.png')\r\n dicePhoto = dicePhoto.subsample(5,5)\r\n self.label.img = dicePhoto\r\n self.label.config(image=dicePhoto,\r\n compound = 'left')\r\n \r\n return\r\n else:\r\n numDice = int(numDice)\r\n #initiate dice total \r\n self.diceTotal = []\r\n \r\n #roll each dice, add up total\r\n for eachDice in range(numDice):\r\n diceRoll = np.random.randint(1,max_sides+1)\r\n self.diceTotal.append(diceRoll)\r\n \r\n self.diceTotalList = self.diceTotal\r\n self.diceTotal = sum(self.diceTotal)\r\n \r\n if output==None:\r\n #update label saying you have rolled dice \r\n rollText = 'You have rolled '+str(numDice)+' '+ self.currentDiceName +'s.'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'white',\r\n font = ('Kristen ITC',18))\r\n \r\n dicePhoto = PhotoImage(file = self.dicePics[i])\r\n dicePhoto = dicePhoto.subsample(5,5)\r\n self.label.img = dicePhoto\r\n self.label.config(image=dicePhoto,\r\n compound = 'left')\r\n \r\n \r\n #update label with result\r\n answerText = 'Total: '+str(self.diceTotal)\r\n self.diceLabel.config(text = answerText)\r\n self.listResultLabel.config(text=str(sorted(self.diceTotalList)))\r\n \r\n if output==1:\r\n return self.diceTotal\r\n \r\n def rollAll(self):\r\n \r\n self.totals = []\r\n \r\n for die in range(len(self.diceNames)):\r\n self.totals.append(self.rollDX(die,output=1))\r\n \r\n if None in self.totals:\r\n \r\n self.ageResult = self.ageVar.get()\r\n if self.ageResult == 1:\r\n \r\n #update label saying you have rolled dice \r\n rollText = 'Fill all the fields \\nya lil bitch'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'red',\r\n background = 'black',\r\n font = ('Kristen ITC',24,'bold'))\r\n \r\n dicePhoto = PhotoImage(file = 'button icons/angryface.gif')\r\n dicePhoto = dicePhoto.subsample(5,5)\r\n self.label.img = dicePhoto\r\n self.label.config(image=dicePhoto,\r\n compound = 'left')\r\n else:\r\n \r\n #update label saying you have rolled dice \r\n rollText = 'Please fill out ALL fields.'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'pink',\r\n font = ('Kristen ITC',24,'bold'))\r\n \r\n dicePhoto = PhotoImage(file = 'button icons/angryface.gif')\r\n dicePhoto = dicePhoto.subsample(5,5)\r\n self.label.img = dicePhoto\r\n self.label.config(image=dicePhoto,\r\n compound = 'left')\r\n \r\n return\r\n \r\n diceTotal = sum(self.totals)\r\n \r\n #update label saying you have rolled dice \r\n rollText = 'You have rolled all the dice.'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'white',\r\n font = ('Kristen ITC',18))\r\n dicePhoto = PhotoImage(file = 'button icons/dice.gif')\r\n dicePhoto = dicePhoto.subsample(5,5)\r\n self.label.img = dicePhoto\r\n self.label.config(image=dicePhoto,\r\n compound = 'left')\r\n \r\n #update label with result\r\n answerText = 'Total of all dice: '+str(diceTotal)\r\n self.TotalLabel.config(text = answerText) \r\n \r\n def resetFields(self):\r\n for i in range(len(self.diceNames)):\r\n self.diceEntry = self.diceEntries[i]\r\n self.diceEntry.delete(0,END)\r\n #update label saying you have rolled dice \r\n rollText = '-'\r\n self.diceLabels[i].config(text=rollText)\r\n self.TotalLabel.config(text='')\r\n self.listResults[i].config(text='[]')\r\n \r\n #update label saying you have rolled dice \r\n rollText = 'Fields have been reset'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'white',\r\n font = ('Kristen ITC',18))\r\n \r\n \r\n \r\n def fillRandom(self):\r\n \r\n for i in range(len(self.diceNames)):\r\n self.diceEntry = self.diceEntries[i]\r\n self.diceEntry.delete(0,END)\r\n self.diceEntry.insert(0,np.random.randint(0,20))\r\n \r\n #update label saying you have rolled dice \r\n rollText = 'Fields have been filled with \\nrandom values.'\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'white',\r\n font = ('Kristen ITC',18))\r\n \r\n def fillRandomBetween(self):\r\n self.upperRanLim = self.upperRanLimEntry.get()\r\n self.lowerRanLim = self.lowerRanLimEntry.get()\r\n \r\n if self.upperRanLim == '':\r\n self.upperRanLim = 10\r\n if self.lowerRanLim == '':\r\n self.lowerRanLim = 1 \r\n \r\n for i in range(len(self.diceNames)):\r\n self.diceEntry = self.diceEntries[i]\r\n self.diceEntry.delete(0,END)\r\n self.diceEntry.insert(0,np.random.randint(self.lowerRanLim,self.upperRanLim))\r\n \r\n #update label saying you have rolled dice \r\n rollText = ('Fields have been filled with \\nrandom values between '+str(self.lowerRanLim)+' and '+str(self.upperRanLim))\r\n self.label.config(text=rollText)\r\n self.label.config(wraplength = 350,\r\n justify = CENTER,\r\n foreground = 'black',\r\n background = 'white',\r\n font = ('Kristen ITC',18))\r\n \r\n def plusOne(self,i): \r\n self.currentDiceName = self.diceNames[i]\r\n self.diceLabel = self.diceLabels[i]\r\n self.diceEntry = self.diceEntries[i]\r\n \r\n #get current value\r\n currentEntryValue = self.diceEntry.get()\r\n if currentEntryValue=='':\r\n currentEntryValue=0\r\n #delete value\r\n self.diceEntry.delete(0,END)\r\n #update value\r\n updatedEntryValue = int(currentEntryValue)+1\r\n self.diceEntry.insert(0,updatedEntryValue)\r\n \r\n def minusOne(self,i):\r\n self.currentDiceName = self.diceNames[i]\r\n self.diceLabel = self.diceLabels[i]\r\n self.diceEntry = self.diceEntries[i]\r\n \r\n #get current value\r\n currentEntryValue = self.diceEntry.get()\r\n if currentEntryValue=='':\r\n currentEntryValue=0\r\n #delete value\r\n self.diceEntry.delete(0,END)\r\n #update value\r\n updatedEntryValue = int(currentEntryValue)-1\r\n self.diceEntry.insert(0,updatedEntryValue)\r\n \r\n def setZero(self,i):\r\n self.currentDiceName = self.diceNames[i]\r\n self.diceLabel = self.diceLabels[i]\r\n self.diceEntry = self.diceEntries[i]\r\n \r\n #get current value\r\n currentEntryValue = self.diceEntry.get()\r\n if currentEntryValue=='':\r\n currentEntryValue=0\r\n #delete value\r\n self.diceEntry.delete(0,END)\r\n #update value\r\n updatedEntryValue = ''\r\n self.diceEntry.insert(0,updatedEntryValue)\r\n \r\n #%% MENU BAR FUNCTIONS, ETC\r\n # Defining Exit Funtion\r\n def exit(self):\r\n self.root.destroy()\r\n\r\n \r\n#%% MAIN FUNCTION\r\n \r\ndef main():\r\n #create parent window\r\n root = Tk()\r\n app = DiceRoller(root)\r\n root.mainloop()\r\n \r\n#%% CALL MAIN\r\nif __name__ == \"__main__\": main()","sub_path":"Dice Roller/Dice Roller.py","file_name":"Dice Roller.py","file_ext":"py","file_size_in_byte":20270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"479438502","text":"\"\"\"Image processing module for OCR (image to text) parsing.\n\"\"\"\n# !/usr/bin/python3\n# coding: utf-8\n\n# Copyright 2019-2020\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport cv2\nimport numpy as np\nfrom PIL import Image\n\n\ndef sharpen_image(np_image):\n \"\"\"\n Shapening an image\n\n Args:\n np_image (np.array): Image of numpy matrix\n \n Returns:\n shapened_image (np.array): Sharpened image of numpy matrix\n \"\"\"\n # unsharp masking: https://en.wikipedia.org/wiki/Unsharp_masking\n kernel = np.array([[0,-1,0], [-1,5,-1], [0,-1,0]])\n # 2D convolution ops for image sharpening\n sharpened_image = cv2.filter2D(np_image, -1, kernel)\n return sharpened_image\n\n\ndef rotate_image(np_image, angle=-90):\n \"\"\"\n Rotates an image (angle in degrees) and expands image to avoid cropping\n\n Args:\n np_image (np.array): Image of numpy matrix\n angle (float): angle in degrees\n \n Returns:\n rotated_mat (np.array): Rotated image of numpy matrix\n \"\"\"\n\n height, width = np_image.shape[:2] # image shape has 3 dimensions\n image_center = (width/2, height/2) # getRotationMatrix2D needs coordinates in reverse order (width, height) compared to shape\n\n rotation_mat = cv2.getRotationMatrix2D(image_center, angle, 1.)\n\n # rotation calculates the cos and sin, taking absolutes of those.\n abs_cos = abs(rotation_mat[0,0]) \n abs_sin = abs(rotation_mat[0,1])\n\n # find the new width and height bounds\n bound_w = int(height * abs_sin + width * abs_cos)\n bound_h = int(height * abs_cos + width * abs_sin)\n\n # subtract old image center (bringing image back to origo) and adding the new image center coordinates\n rotation_mat[0, 2] += bound_w/2 - image_center[0]\n rotation_mat[1, 2] += bound_h/2 - image_center[1]\n\n # rotate image with the new bounds and translated rotation matrix\n rotated_mat = cv2.warpAffine(np_image, rotation_mat, (bound_w, bound_h))\n return rotated_mat\n\n\ndef deskew_image(np_image):\n \"\"\"\n Deskew Image by calculating minAreaRect that contains most threshBinary\n\n Args:\n np_image (np.array): Image of numpy matrix for opencv operation\n \n Returns:\n deskewed_image (np.array): Deskewed image of numpy matrix \n \"\"\"\n gray = cv2.cvtColor(np_image, cv2.COLOR_BGR2GRAY)\n gray = cv2.bitwise_not(gray)\n thresh = cv2.threshold(gray, 0, 255,\n cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]\n coords = np.column_stack(np.where(thresh > 0))\n angle = cv2.minAreaRect(coords)[-1]\n \n # the `cv2.minAreaRect` function returns values in the\n # range [-90, 0); as the rectangle rotates clockwise the\n # returned angle trends to 0 -- in this special case we\n # need to add 90 degrees to the angle\n if angle < -45:\n angle = -(90 + angle)\n # otherwise, just take the inverse of the angle to make\n # it positive\n else:\n angle = -angle\n\n # rotate the image to deskew it\n deskewed_image = rotate_image(np_image, angle)\n return deskewed_image\n","sub_path":"app/parser/libs/img_processing.py","file_name":"img_processing.py","file_ext":"py","file_size_in_byte":3518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"598248751","text":"'Chirp, host your own chat server. Written by Kale Champagnie '\r\nfrom sys import argv\r\nfrom app import create\r\nfrom app import start\r\nfrom app import change\r\nfrom app import list\r\nfrom app import delete\r\n\r\n\r\n\r\ndef main(args): \r\n if (len(args) < 2):\r\n print('''\r\nusage:\r\n chirp \r\n\r\npurpose:\r\n Chirp lets you host chat servers on your network. Once a user connects, they can choose to create or join a room.\r\n Users connect to your server via chirp clients such as Eime. Clients are available to download from github.com/node5/Chirp/clients.\r\n\r\ncommands:\r\n create create a new server\r\n start start a server, allow clients to connect \r\n change change a server's settings\r\n list list existing servers\r\n delete delete a server\r\n \r\nauthor:\r\n Kale Champagnie \r\n''')\r\n \r\n elif (len(args) > 2):\r\n print('chirp: to many arguments were given')\r\n \r\n else:\r\n command = args[1]\r\n\r\n if (command == 'create'):\r\n create.main()\r\n\r\n elif (command == 'start'):\r\n start.main()\r\n\r\n elif (command == 'change'):\r\n change.main()\r\n\r\n elif (command == 'list'):\r\n list.main()\r\n\r\n elif (command == 'delete'):\r\n delete.main()\r\n \r\n else:\r\n print('chirp: unknown command')\r\n\r\nmain(argv)\r\n","sub_path":"versions/Chirp-0.0/chirp.py","file_name":"chirp.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243940118","text":"from PIL import Image, ImageDraw\nfrom tkinter import Tk, Canvas, Button, Checkbutton, messagebox\nfrom path import Point, PathFinder\n\nWIDTH = 600\nHEIGHT = 600\nPIXEL = 30\n\n\nclass grid:\n def __init__(self, master, *argv, **kwargs):\n self.grid = Canvas(master, *argv, **kwargs)\n self.grid.pack()\n self.image = Image.new('RGB', (WIDTH, HEIGHT), 'white')\n self.draw = ImageDraw.Draw(self.image)\n\n self.startPointLoc = []\n self.endPointLoc = []\n\n self.gridArray = []\n\n self.btn = Button(master, text='find', command=self.drawPath)\n self.btn.pack()\n\n self.btn0 = Button(master, text='restart', command=self.clearAll)\n self.btn0.pack()\n\n self.diagonals = False\n self.allowDiagonals = Checkbutton(\n master, text='Allow diagonals', command=self.toggleDiagonals)\n self.allowDiagonals.pack()\n\n self.grid.bind('', self._drawObstacle)\n self.grid.bind('', self._drawObstacle)\n self.grid.bind('', self._drawStartPoint)\n self.grid.bind('', self._drawEndPoint)\n\n def drawGrid(self):\n for x in range(PIXEL, HEIGHT, PIXEL):\n self.grid.create_line(0, x, WIDTH, x, fill=\"grey\")\n\n for y in range(PIXEL, WIDTH, PIXEL):\n self.grid.create_line(y, 0, y, HEIGHT, fill=\"grey\")\n\n def _drawObstacle(self, event):\n x, y = event.x // PIXEL, event.y // PIXEL\n self.grid.create_rectangle(\n x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL, fill='black')\n self.draw.rectangle(\n [x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL], fill='black')\n\n def _drawStartPoint(self, event):\n x, y = event.x // PIXEL, event.y // PIXEL\n if not self.startPointLoc:\n self.startPointLoc.append((x, y))\n self.grid.create_rectangle(\n x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL, fill='blue')\n self.draw.rectangle(\n [x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL], fill='blue')\n else:\n x1, y1 = self.startPointLoc.pop(0)\n self.grid.create_rectangle(\n x1 * PIXEL, y1 * PIXEL, x1 * PIXEL + PIXEL, y1 * PIXEL + PIXEL, fill='white', outline='gray')\n self.draw.rectangle(\n [x1 * PIXEL, y1 * PIXEL, x1 * PIXEL + PIXEL, y1 * PIXEL + PIXEL], fill='white', outline='gray')\n self.startPointLoc.append((x, y))\n self.grid.create_rectangle(\n x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL, fill='blue')\n self.draw.rectangle(\n [x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL], fill='blue')\n\n def _drawEndPoint(self, event):\n x, y = event.x // PIXEL, event.y // PIXEL\n if not self.endPointLoc:\n self.endPointLoc.append((x, y))\n self.grid.create_rectangle(\n x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL, fill='red')\n self.draw.rectangle(\n [x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL], fill='red')\n else:\n x1, y1 = self.endPointLoc.pop(0)\n self.grid.create_rectangle(\n x1 * PIXEL, y1 * PIXEL, x1 * PIXEL + PIXEL, y1 * PIXEL + PIXEL, fill='white', outline='gray')\n self.draw.rectangle(\n [x1 * PIXEL, y1 * PIXEL, x1 * PIXEL + PIXEL, y1 * PIXEL + PIXEL], fill='white', outline='gray')\n self.endPointLoc.append((x, y))\n self.grid.create_rectangle(\n x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL, fill='red')\n self.draw.rectangle(\n [x * PIXEL, y * PIXEL, x * PIXEL + PIXEL, y * PIXEL + PIXEL], fill='red')\n\n def cellColor(self, x, y):\n r, g, b = self.image.getpixel((x+1, y+1))\n return r, g, b\n\n def toArray(self):\n for x in range(0, WIDTH, PIXEL):\n self.gridArray.append([])\n for y in range(0, HEIGHT, PIXEL):\n self.gridArray[x // PIXEL].append('0')\n if(self.cellColor(x, y) == (0, 0, 255)):\n start = [x, y]\n self.gridArray[x // PIXEL][y // PIXEL] = 'S'\n\n if(self.cellColor(x, y) == (255, 0, 0)):\n end = [x, y]\n self.gridArray[x // PIXEL][y // PIXEL] = 'E'\n\n if(self.cellColor(x, y) == (0, 0, 0)):\n self.gridArray[x // PIXEL][y // PIXEL] = '#'\n\n for i in self.gridArray:\n print(i)\n print(start, end)\n return self.gridArray, start, end\n\n def getShortestPath(self):\n field, startXY, endXY = self.toArray()\n if startXY and endXY:\n startPoint = Point(startXY[0] // PIXEL, startXY[1] // PIXEL)\n endPoint = Point(endXY[0] // PIXEL, endXY[1] // PIXEL)\n a = PathFinder(field)\n return a.shortest_path(startPoint, endPoint, diagonals=self.diagonals)\n else:\n return None\n\n def drawPath(self):\n shortesPath = self.getShortestPath()\n if shortesPath:\n for i in shortesPath:\n self.grid.create_rectangle(\n i.x * PIXEL, i.y * PIXEL, i.x * PIXEL + PIXEL, i.y * PIXEL + PIXEL, fill='orange')\n self.draw.rectangle(\n [i.x * PIXEL, i.y * PIXEL, i.x * PIXEL + PIXEL, i.y * PIXEL + PIXEL], fill='orange')\n\n def clearAll(self):\n self.grid.delete(\"all\")\n del self.image\n del self.draw\n self.image = Image.new('RGB', (WIDTH, HEIGHT), 'white')\n self.draw = ImageDraw.Draw(self.image)\n self.startPointLoc = []\n self.endPointLoc = []\n self.gridArray = []\n self.drawGrid()\n\n def toggleDiagonals(self):\n self.diagonals = not self.diagonals\n\n\ndef test():\n app = Tk()\n layout = grid(app, width=WIDTH, height=HEIGHT, bg='white')\n layout.drawGrid()\n app.mainloop()\n\n\nif __name__ == '__main__':\n test()\n","sub_path":"src/shortPath/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"404829664","text":"#!/usr/bin/env python\n# encoding: utf-8\n\nimport cmath\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nfrom scipy.integrate import quad\n\nheaviside = lambda x : 0.5 * (np.sign(x) + 1)\n\ndef fval(p, x1, x2, x3p, t, alpha, beta, component):\n R = np.sqrt(x1**2 + x2**2)\n r = np.sqrt(R**2 + x3p**2)\n cos_phi = x1/R\n sin_phi = x2/R\n cos_theta = x3p/r\n sin_theta = R/r\n\n if component[0] == 'M':\n q = -t/r*sin_theta+1j*cmath.sqrt((t/r)**2-(1/alpha)**2-p**2)*cos_theta\n elif component[0] == 'N':\n if component[1] == '3':\n q = -t/r*sin_theta+cmath.sqrt(-(t/r)**2+(1/beta)**2+p**2)*cos_theta\n else:\n q = -t/r*sin_theta+1j*cmath.sqrt((t/r)**2-(1/beta)**2-p**2)*cos_theta\n\n eta_alpha = cmath.sqrt((1/alpha)**2+p**2-q**2);\n eta_beta = cmath.sqrt((1/beta)**2+p**2-q**2);\n gamma = eta_beta**2+p**2-q**2;\n sigma = gamma**2+4*eta_alpha*eta_beta*(q**2-p**2);\n\n MNlist = {\n 'M11': 2*eta_beta*((q**2+p**2)*cos_phi**2-p**2),\n 'M12': 2*eta_beta*(q**2+p**2)*sin_phi*cos_phi,\n 'M13': 2*q*eta_alpha*eta_beta*cos_phi,\n 'M21': 2*eta_beta*(q**2+p**2)*sin_phi*cos_phi,\n 'M22': 2*eta_beta*((q**2+p**2)*sin_phi**2-p**2),\n 'M23': 2*q*eta_alpha*eta_beta*sin_phi,\n 'M31': q*gamma*cos_phi,\n 'M32': q*gamma*sin_phi,\n 'M33': eta_alpha*gamma,\n 'N11': 1/eta_beta*(eta_beta**2*gamma-(gamma-4*eta_alpha*eta_beta)*((q**2+p**2)*sin_phi**2-p**2)),\n 'N12': 1/eta_beta*(q**2+p**2)*(gamma-4*eta_alpha*eta_beta)*sin_phi*cos_phi,\n 'N13': -q*gamma*cos_phi,\n 'N21': 1/eta_beta*(q**2+p**2)*(gamma-4*eta_alpha*eta_beta)*sin_phi*cos_phi,\n 'N22': 1/eta_beta*(eta_beta**2*gamma-(gamma-4*eta_alpha*eta_beta)*((q**2+p**2)*cos_phi**2-p**2)),\n 'N23': -q*gamma*sin_phi,\n 'N31': -2*q*eta_alpha*eta_beta*cos_phi,\n 'N32': -2*q*eta_alpha*eta_beta*sin_phi,\n 'N33': 2*eta_alpha*(q**2-p**2)\n }\n MN = MNlist.get(component)\n\n if component[0] == 'M':\n tmp = eta_alpha/sigma/cmath.sqrt((t/r)**2-alpha**-2-p**2)*MN\n val = heaviside(t-r/alpha)*tmp.real\n elif component[0] == 'N':\n if component[1] == '1':\n tmp = eta_beta/sigma/cmath.sqrt((t/r)**2-beta**-2-p**2)*MN\n val = heaviside(t-r/beta)*tmp.real\n elif component[1] == '2':\n tmp = eta_beta/sigma/cmath.sqrt(beta**-2+p**2-(t/r)**2)*MN\n t2 = r/alpha*sin_theta+r*np.sqrt(beta**-2-alpha**-2)*cos_theta\n val = heaviside(sin_theta-beta/alpha)*(heaviside(t-t2)-heaviside(t-r/beta))*tmp.imag\n elif component[1] == '3':\n tmp = eta_beta/sigma/cmath.sqrt(beta**-2+p**2-(t/r)**2)*MN\n val = heaviside(sin_theta-beta/alpha)*heaviside(t-r/beta)*tmp.imag\n\n return val\n\ndef green(x1, x2, x3, t, x1p, x2p, x3p, tp, alpha, beta, rho, direct):\n dx1 = x1 - x1p\n dx2 = x2 - x2p\n dx3 = x3 - x3p\n dt = t - tp\n mu = beta**2 * rho\n R = np.sqrt(dx1**2 + dx2**2)\n r = np.sqrt(R**2 + dx3**2)\n cos_theta = dx3 / r\n sin_theta = R / r\n\n mval = np.zeros(len(dt))\n nval = np.zeros(len(dt))\n index = np.arange(len(dt))\n\n for i in index:\n\n component = 'M' + direct\n argv = (x1, x2, x3p, dt[i], alpha, beta, component)\n intup = cmath.sqrt((dt[i]/r)**2-alpha**-2)\n mval[i] = quad(fval, 0, intup.real, args = argv)[0]\n\n component = 'N' + direct\n argv = (x1, x2, x3p, dt[i], alpha, beta, component)\n intup = cmath.sqrt((dt[i]/r)**2-beta**-2)\n p2= cmath.sqrt(((dt[i]/r-cmath.sqrt(beta**-2-alpha**-2)*cos_theta)/sin_theta)**2-alpha**-2)\n nval[i] = quad(fval, 0, intup.real, args = argv)[0]\n nval[i] += -2 * quad(fval, 0, p2.real, args = argv)[0]\n# nval[i] += -quad(fval, 0, p2.real, args = argv)[0]\n\n g = 1/(np.pi**2*mu*r)*(mval + nval)\n return g\n\n\ndef plotG(x1, x2, x3, t, x1p, x2p, x3p, tp, alpha, beta, rho, direct, modif, index):\n G = np.zeros((len(direct), len(t)))\n plt.subplot(1,3,index)\n plt.hold(True)\n index = np.arange(len(direct))\n for i in index:\n G[i,:] = green(x1, x2, x3, t, x1p, x2p, x3p, tp, alpha, beta, rho, direct[i])\n G[i,:] += modif[i]\n plt.plot(t, G[i,:])\n\n\nif __name__ == '__main__':\n start = time.clock()\n\n x2, x3, x1p, x2p, tp = 0, 0, 0, 0, 0\n alpha, beta, rho = 8.00, 4.62, 3.30\n t = np.arange(1,4,0.1)\n direct = ['11', '31', '22', '13', '33']\n\n modif1 = [elem * 1e-4 for elem in [11, 9, 7, 5, 1]]\n modif2 = [elem * 1e-4 for elem in [16, 15, 13, 12, 6]]\n\n plt.figure(1)\n\n x1, x3p = 2, 10\n argv1 = (x1, x2, x3, t, x1p, x2p, x3p, tp, alpha, beta, rho, direct, modif1, 1)\n x1, x3p = 10, 2\n argv2 = (x1, x2, x3, t, x1p, x2p, x3p, tp, alpha, beta, rho, direct, modif1, 2)\n x1, x3p = 10, 0.2\n argv3 = (x1, x2, x3, t, x1p, x2p, x3p, tp, alpha, beta, rho, direct, modif1, 3)\n\n plotG(*argv1)\n plotG(*argv2)\n plotG(*argv3)\n\n plt.show()\n print(\"Running time: \", time.clock() - start)\n","sub_path":"greenfun.py","file_name":"greenfun.py","file_ext":"py","file_size_in_byte":5082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"512009904","text":"# - *- coding: utf- 8 - *-\r\nimport numpy as np\r\nfrom sklearn import datasets\r\nimport matplotlib.pyplot as plt\r\nfrom LinearRegression import LinearRegression\r\n\r\n# X = datasets.load_diabetes().data\r\n# Y = datasets.load_diabetes().target\r\n\r\nX = datasets.load_boston().data\r\nY = datasets.load_boston().target\r\ntypeOfGraphics = '2d'\r\n\r\n# Точки для проверки\r\ntest = X\r\n\r\nregressions = ['LinearRegression', 'LinearRegressionWithSVD', 'RidgeRegression', 'RidgeRegressionWithSVD']\r\n\r\n# Когда признаков много, визуализировать не нужно\r\nif typeOfGraphics == 'none':\r\n for r in regressions:\r\n lr = LinearRegression(X, Y, r)\r\n print(\"SSE: \" + str(lr.SSE()))\r\n\r\n# График на плоскости по одному j-му признаку\r\nif typeOfGraphics == '2d':\r\n j = 5\r\n X = X[:, j:(j + 1)]\r\n test = test[:, j:(j + 1)]\r\n # Для сетки\r\n test = np.arange(test.min(), test.max(), 0.01)\r\n # Для вычисления alpha\r\n testColumn = np.column_stack((np.ones(test.shape[0]), test))\r\n for r in regressions:\r\n plt.ioff()\r\n plt.figure(r)\r\n ax = plt.subplot()\r\n ax.title.set_text('Linear regression on %i feature, sample Boston' % (j))\r\n ax.plot(X, Y, 'r.', markersize=3, color='blue')\r\n lr = LinearRegression(X, Y, r)\r\n alpha = []\r\n for t in testColumn:\r\n alpha.append(lr.predict(t))\r\n ax.plot(test, alpha, marker='o', markersize=1, linewidth=2, color='red')\r\n print(\"SSE: \" + str(lr.SSE()))\r\n plt.show()","sub_path":"LinearRegression/main_LinearRegression.py","file_name":"main_LinearRegression.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541736616","text":"load(\"//github.com/grpc/grpc-web:closure_grpc_compile.bzl\", \"closure_grpc_compile\")\nload(\"//closure:closure_proto_compile.bzl\", \"closure_proto_compile\")\nload(\"@io_bazel_rules_closure//closure:defs.bzl\", \"closure_js_library\")\n\ndef closure_grpc_library(**kwargs):\n name = kwargs.get(\"name\")\n deps = kwargs.get(\"deps\")\n visibility = kwargs.get(\"visibility\")\n\n name_pb = name + \"_pb\"\n name_pb_grpc = name + \"_pb_grpc\"\n\n closure_proto_compile(\n name = name_pb,\n deps = deps,\n visibility = visibility,\n verbose = kwargs.pop(\"verbose\", 0),\n transitivity = kwargs.pop(\"transitivity\", {}),\n transitive = kwargs.pop(\"transitive\", True),\n )\n\n closure_grpc_compile(\n name = name_pb_grpc,\n deps = deps,\n visibility = visibility,\n verbose = kwargs.pop(\"verbose\", 0),\n transitivity = kwargs.pop(\"transitivity\", {}),\n transitive = kwargs.pop(\"transitive\", True),\n )\n\n closure_js_library(\n name = name,\n srcs = [name_pb, name_pb_grpc],\n deps = [\n \"@com_github_grpc_grpc_web//javascript/net/grpc/web:abstractclientbase\",\n \"@com_github_grpc_grpc_web//javascript/net/grpc/web:clientreadablestream\",\n \"@com_github_grpc_grpc_web//javascript/net/grpc/web:grpcwebclientbase\",\n \"@com_github_grpc_grpc_web//javascript/net/grpc/web:error\",\n \"@io_bazel_rules_closure//closure/library\",\n \"@io_bazel_rules_closure//closure/protobuf:jspb\",\n ],\n suppress = [\n \"JSC_LATE_PROVIDE_ERROR\",\n \"JSC_UNDEFINED_VARIABLE\",\n \"JSC_IMPLICITLY_NULLABLE_JSDOC\",\n \"JSC_STRICT_INEXISTENT_PROPERTY\",\n \"JSC_POSSIBLE_INEXISTENT_PROPERTY\",\n \"JSC_UNRECOGNIZED_TYPE_ERROR\",\n \"JSC_UNUSED_PRIVATE_PROPERTY\",\n \"JSC_EXTRA_REQUIRE_WARNING\",\n \"JSC_INVALID_INTERFACE_MEMBER_DECLARATION\",\n ],\n visibility = visibility,\n )\n","sub_path":"github.com/grpc/grpc-web/closure_grpc_library.bzl","file_name":"closure_grpc_library.bzl","file_ext":"bzl","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"141804142","text":"import sys\n\nfrom TruthChecker.Main import create_question, run\n\nif len(sys.argv) == 1:\n print(\"Ask me for something! : ./main [question]\")\n exit(1)\n\nquestion = create_question(sys.argv)\nrun(question)\n","sub_path":"TruthChecker/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"394994000","text":"from bs4 import BeautifulSoup\nimport requests\nimport time\nfrom config import headers\n\ndef get_music_lists(url):\n time.sleep(1)\n # 发起请求获取响应\n response = requests.get(url=url, headers=headers).text\n soup = BeautifulSoup(response, 'html.parser')\n # 找到所有的a标签以及包含的信息\n a_lists = soup.select('.dec a')\n li_lists = soup.select('#m-pl-container li')\n for i in range(len(a_lists)):\n url = a_lists[i]['href']\n title = a_lists[i]['title'].replace(',', ',')\n play_num = li_lists[i].select('.nb')[0].get_text()\n author = li_lists[i].select('p')[1].select('a')[0].get_text()\n print(url, title, play_num, author)\n # 将获取的数据存到csv文件中方便后序获取其他信息\n with open('music_lists_0_1500.csv', 'a+', encoding='utf-8') as f:\n f.write(url+','+title+','+play_num+','+author+'\\n')\n\n\nif __name__ == '__main__':\n language_lists = ['华语', '欧美', '日语', '粤语', '韩语']\n for language in language_lists:\n for i in range(0, 1500, 35):\n url = f'https://music.163.com/discover/playlist/?cat={language}&order=hot&limit=35&offset={i}'\n get_music_lists(url)\n print('OK')\n","sub_path":"spider_music_list.py","file_name":"spider_music_list.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"260685583","text":"from flask_testing import TestCase\nfrom datetime import datetime\nfrom forum import create_app\nfrom forum.database import db\nfrom forum.modules import User, Section\n\n\nclass TestModifyComment(TestCase):\n SQLALCHEMY_DATABASE_URI = \"sqlite:///db_for_test.db\"\n TESTING = True\n\n def create_app(self):\n return create_app(self)\n\n def setUp(self):\n db.drop_all()\n db.create_all()\n\n user = User.create('xua@wustl.edu', 'strong_password')\n self.user_id = user.user_id\n db.session.add(user)\n db.session.add(Section.create(\"sport\"))\n db.session.commit()\n\n self.time = datetime(2018, 9, 10, 13, 00, 00)\n\n with self.app.test_client() as client:\n response = client.post('/api/tokens', json={\n 'email': 'xua@wustl.edu',\n 'password': 'strong_password',\n })\n self.token = response.json['token']\n\n response = client.post('/api/posts', json={\n 'post_name': \"today's sports\",\n 'post_time': datetime(2018, 6, 10, 12, 55, 0),\n 'section_name': 'sport',\n 'context': \"sport is great!\",\n }, headers={'Authorization': \"Token \" + self.token})\n self.post_id = response.json['post_id']\n\n response = client.post('/api/comments', json={\n 'post_id': self.post_id,\n 'comment_time': self.time,\n 'context': 'this is great!',\n }, headers={'Authorization': \"Token \" + self.token})\n self.comment_id = response.json['comment_id']\n\n def tearDown(self):\n db.session.remove()\n db.drop_all()\n\n def test_modify_comment(self):\n with self.app.test_client() as client:\n response = client.put(f'/api/comment/{self.comment_id}', json={\n 'context': 'this is worse!'\n }, headers={'Authorization': \"Token \" + self.token})\n self.assertStatus(response, 200)\n\n response = client.get(f'/api/comment/{self.comment_id}')\n self.assertStatus(response, 200)\n self.assertEqual(response.json['context'], 'this is worse!')\n\n def test_modify_none(self):\n with self.app.test_client() as client:\n response = client.put(f'/api/comment/{self.comment_id}', json={\n\n }, headers={'Authorization': \"Token \" + self.token})\n self.assertStatus(response, 400)\n self.assertEqual(response.json, {'message': 'argument missing'})\n\n def test_modify_with_other_argument(self):\n with self.app.test_client() as client:\n response = client.put(f'/api/comment/{self.comment_id}', json={\n 'else': 'something else'\n }, headers={'Authorization': \"Token \" + self.token})\n self.assertStatus(response, 400)\n self.assertEqual(response.json, {'message': 'argument missing'})\n\n def test_modify_without_token(self):\n with self.app.test_client() as client:\n response = client.put(f'/api/comment/{self.comment_id}', json={\n 'context': 'this is worse!'\n })\n self.assertStatus(response, 401)\n","sub_path":"test/test_api/test_comment/test_modify_comment.py","file_name":"test_modify_comment.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204397838","text":"from pymongo import MongoClient\nimport os\nimport time\n\n_client = MongoClient(os.environ['MONGO_DAQ_URI'])\ndb = _client['xebra_daq']\n\nmessage_codes = {\n 'err_not_armed': 'Can\\'t start, DAQ isn\\'t armed',\n 'err_not_running': 'Can\\'t stop, DAQ isn\\'t running',\n 'err_not_idle': 'Can\\'t arm, DAQ isn\\'t idle',\n 'err_invalid_json': 'Invalid JSON',\n 'err_name_exists': 'A config with that name already exists, you can\\'t add a new one',\n 'err_no_name_exists': 'No config with that name exists, you can\\'t update it',\n 'err_not_auth': 'Authorization failed',\n 'err_not_armed': 'DAQ couldn\\'t arm',\n\n 'msg_start': 'DAQ starting',\n 'msg_arm': 'DAQ arming',\n 'msg_stop': 'DAQ stopping',\n 'msg_led': 'LED calibration starting',\n 'msg_cfg_update': 'Config updated',\n 'msg_new_cfg': 'New config saved',\n}\n\nstatus_map = [\n 'idle',\n 'arming',\n 'armed',\n 'running',\n 'error',\n 'unknown',\n]\n\n\ndef user(meta):\n return {'client_addr': meta['REMOTE_ADDR'] if 'REMOTE_ADDR' in meta else 'web',\n 'client_name': meta['REMOTE_HOST'] if 'REMOTE_HOST' in meta else 'web',\n 'client_user': meta['REMOTE_USER'] if 'REMOTE_USER' in meta else 'web'}\n\n\ndef is_schumann_subnet(meta):\n ip = user(meta)['client_addr']\n\n if ip in [\"::1\", \"127.0.0.1\", \"192.168.131.6\"]:\n return True\n subnet, _ = ip.rsplit('.', maxsplit=1)\n #return (subnet == '10.4.73')\n return True\n\ndef base_context(msgcode=None):\n modes = db['options'].distinct('name', {'detector': {'$ne': 'include'}})\n if 'bkg' in modes:\n modes.remove('bkg')\n modes = ['bkg'] + sorted(modes)\n # if 'led' in modes:\n # modes.remove('led')\n context = {}\n context['modes'] = modes\n if msgcode is not None:\n context.update({'message': message_codes.get(msgcode, '?')})\n\n return context\n\n\ndef config_context(**kwargs):\n context = base_context(**kwargs)\n modes = db['options'].distinct('name')\n context['modes'] = modes\n\n return context\n\n\ndef runs_context(**kwargs):\n context = base_context(**kwargs)\n context.update({'experiments': db['runs'].distinct('experiment')})\n\n return context\n\n\ndef update_daqspatcher(req, **kwargs):\n kwargs.update({'user': user(req.META)['client_addr'].split('.')[-1]})\n db['system_control'].update_one({'subsystem': 'daqspatcher'}, {'$set': kwargs})\n return\n\n\ndef current_status():\n for row in db['status'].find({}).sort([('_id', -1)]).limit(1):\n if time.time() - int(str(row['_id'])[:8], 16) > 10:\n return 'offline'\n return status_map[int(row['status'])]\n","sub_path":"control/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"523725671","text":"# telefon rehberi uygulamasi\n# Bu odevde bir telefon rehberi simulasyonu yapmanizi istiyoruz.\n# Program acildiginda kullaniciya, rehbere kisi ekleme, kisi silme, kisi isim ya da tel bilgisi guncelleme,\n# rehberi listeleme seceneklerini sunun. Kullanicinin secimine gore gerekli inputlarla programinizi sekillendirin.\n# Olusturulan rehberi bir dosyaya kaydedin.\n# Rehberi olustururken sozlukleri kullanin.\n\n\nmenu = (\"\"\"\\nTELEFON REHBERIM\n\n1. Rehbere isim ekle\n2. Rehberden Kisi Sil\n3. Rehber Guncelleme\n4. Rehberi Listele\n5. Cikis\n\n\"\"\")\nrehber = {}\nwhile True:\n print(menu)\n secim = input(\"Seciminiz:\\t\")\n if secim == \"1\":\n print(\"Not: Rehberde ayni isimden baska biri varsa eklenmeyecektir.\")\n ad_soyad = input(\"Rehbere eklemek istediginiz kisinin adi soyadi:\")\n tel = input(\"Rehbere eklemek istedigini kisinin tel numarasi \")\n rehber[ad_soyad.lower()] = tel\n continue\n elif secim == \"2\":\n sil = (input(\"Silmek istediginiz kisinin adini-soyadini giriniz:\")).lower()\n for k in rehber.keys():\n if k == sil:\n rehber.pop(sil)\n print(sil.upper(), \" adli kisi silindi...\")\n break\n else:\n print(\"Boyle bir ad-soyad bulunamadi..\")\n break\n continue\n elif secim == '3':\n guncelle = (input(\"Guncellemek istediginiz \\nKisinin adi soyadi ise 1,\\nTelefon numarasi ise 2'ye basiniz: \"))\n if guncelle == \"1\":\n ad_soyad = input(\"Rehberde guncellemek istediginiz kisinin adi soyadi:\").lower()\n for k in rehber.keys():\n if k == ad_soyad:\n tel = rehber[ad_soyad]\n rehber.pop(ad_soyad)\n ad_soyad2 = input(\"Guncel adi-soyadini giriniz: \").lower()\n rehber[ad_soyad2] = tel\n print(\"Basariyla guncellendi...\\n\")\n break\n else:\n print(\"Yanlis giris yaptiniz..\\n\")\n break\n continue\n elif guncelle == \"2\":\n ad_soyad = input(\"Rehberde guncellemek istediginiz kisinin adi soyadi:\").lower()\n for k in rehber.keys():\n if k == ad_soyad:\n tel = input(\"Kisinin guncel tel numarasi: \")\n rehber[ad_soyad] = tel\n print(\"Basariyla guncellendi...\\n\")\n break\n else:\n print(\"Yanlis giris yaptiniz..\\n\")\n break\n continue\n\n elif secim == '4':\n for k, v in rehber.items():\n print(\"\\nAdi-Soyadi: {}\\t\\tTelefon Numarasi:{}\".format(k.upper(), v))\n elif secim == \"5\":\n print(\"Cikis Yapiliyor...\")\n break\n else:\n print(\"Yanlis secim yaptiniz lutfen tekrar deneyiniz...\\n\")\n continue\n\ndosya = open(\"telefonrehberi.txt\", \"w\")\ndosya.write(\"Ad-Soyad\\tTelefon Numarasi\\n\")\nfor k, v in rehber.items():\n a = \"{}\\t{}\\n\".format(k.upper(), v.upper())\n dosya.write(a)\ndosya.close()\n\n\n# Şifreleme Uygulaması\n# Kullaniciya iki secenek sunarak orjinal metni sifreli metne ve sifreli metni orjinal metne donusturebilen bir program yazmanizi istiyoruz.\n# Sozlukler yardimi ile bir sifreleme algoritmasi olusturun ve kullanicidan alacaginiz inputu bu algoritma yoluyla sifreleyin\n# ve ekrana yazdirin. Kullanici daha sonra bu sifreli metni input olarak yazdiginda orjinal metne ulasabilsin.\n\ncikti = \"\"\"\nSIFRELEME PROGRAMI\n1. Orjinal metni sifreye cevir\n2. Sifreli metni orjinale cevir\n3. Cikis \n\"\"\"\nliste = []\nfor i in range(128): # ascii de 128 karaktere kadar oldugu icin\n karakter = \"%c\" % i\n liste += [karakter] # listeye ascii kodlarindaki tum karakterleri attik\nsifre = dict() # sifrelemenin yapilacagi sozlugu tanimladik\nfor i in range(len(liste)):\n sifre[liste[i]] = chr(ord(liste[i])+5)\n # sifre ascii kod decimal karsiliklarinin 5 fazlasina karsilik gelen karakter\n\nsifreli = \"\"\norjinal = \"\"\nwhile True:\n print(cikti)\n secimm = input(\"Seciminiz: \")\n sifreli = \"\" # her seferinde degeri bosaltiyoruz\n if secimm == \"1\":\n metin = input(\"Orjinal metin: \")\n for i in metin: # orjinal metni tariyoruz\n # sozlukte karaktere karsilik gelen degeri sifreli stringine atiyoruz\n sifreli += sifre[i]\n print(\"Metnin sifreli hali :\", sifreli) # yazdiriyoruz\n\n elif secimm == \"2\":\n metin = input(\"Sifrelenmis metni giriniz: \") # sifreli metni aldik\n orjinal = \"\" # her seferinde degeri bosaltiyoruz\n for i in metin: # sifreli metni tariyoruz\n for k, v in sifre.items(): # sozlukteki key ve value degerini aldik\n if i == v: # eger sifreli metindeki karakter sozlukte karsilik gelen value degerine esitse\n orjinal += k # orjinal stringine anahtar degerini atiyor\n print(\"Metnin orjinali :\", orjinal) # dongu bitince yazdiriyoruz\n elif secimm == \"3\":\n print(\"Cikis yapiliyor...\")\n break\n else:\n print(\"Yanlis giris yaptiniz..\")\n break\n","sub_path":"week7.py","file_name":"week7.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"477194109","text":"# Copyright (c) LinkedIn Corporation. All rights reserved. Licensed under the BSD-2 Clause license.\n# See LICENSE in the project root for license information.\n\nimport setuptools\nimport re\n\nwith open('src/iris/__init__.py', 'r') as fd:\n version = re.search(r'^__version__\\s*=\\s*[\\'\"]([^\\'\"]*)[\\'\"]', fd.read(), re.MULTILINE).group(1)\n\nsetuptools.setup(\n name='iris',\n version=version,\n package_dir={'': 'src'},\n packages=setuptools.find_packages('src'),\n include_package_data=True,\n install_requires=[\n 'streql==3.0.2',\n 'dnspython==1.14.0',\n 'phonenumbers==7.4.1',\n 'twilio==4.5.0',\n 'google-api-python-client==1.4.2',\n 'oauth2client==1.4.12',\n 'slackclient==0.16',\n 'PyYAML==3.11',\n 'gevent==1.1.2',\n 'falcon==1.1.0',\n 'falcon-cors==1.1.2',\n 'ujson==1.35',\n 'requests==2.20.0',\n 'PyMySQL==0.7.2',\n 'SQLAlchemy==1.0.11',\n 'Jinja2==2.8',\n 'importlib==1.0.3',\n 'Markdown==2.4.1',\n 'click==6.6',\n 'msgpack-python==0.4.5',\n 'cssmin==0.2.0',\n 'beaker==1.10.0',\n 'cryptography==2.3',\n 'webassets==0.12.1',\n 'python-ldap==2.4.9',\n 'exchangelib==1.10.0',\n 'setproctitle==1.1.8',\n 'pyfcm==1.4.3',\n 'oncallclient==1.0.0',\n 'idna==2.7'\n ],\n extras_require={\n 'kazoo': ['kazoo==2.3.1'],\n # plugin deps\n 'influxdb': ['influxdb'],\n 'prometheus': ['prometheus_client'],\n 'dev': [\n 'gunicorn',\n 'Sphinx==1.5.6',\n 'sphinxcontrib-httpdomain',\n 'sphinx_rtd_theme',\n # test deps\n 'mock==2.0.0',\n 'pytest==3.0.5',\n 'pytest-mock==1.5.0',\n 'pytest-cov',\n 'flake8==3.5.0',\n 'tox',\n 'requests-mock==1.1.0',\n ],\n },\n entry_points={\n 'console_scripts': [\n 'iris-dev = iris.bin.run_server:main',\n 'iris = iris.bin.run_server:main',\n 'iris-sender = iris.bin.sender:main',\n 'iris-owa-sync = iris.bin.owasync:main',\n 'iris-sync-targets = iris.bin.sync_targets:main',\n 'iris-process-retention = iris.bin.retention:main',\n 'iris-app-stats = iris.bin.app_stats:main',\n 'iris_ctl = iris.bin.iris_ctl:main',\n 'build_assets = iris.bin.ui_build_assets:main',\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"439956330","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 31 10:55:09 2019\n\n@author: krah\n\"\"\"\nimport numpy as np\nimport wabbit_tools as wt\nimport matplotlib.pyplot as plt\nimport re\nimport os\nimport glob\n\n\n###############################################################################\n# %% Change parameters here\n###############################################################################\n# directories needed\ndirs= {\n 'wabbit' : \"~/develop/WABBIT/\" , # directory of wabbit\n 'work' : \"./\", # where to save big data files\n 'images' : \"./\" #pictures are saved here\n }\n\n# setup for wabbit call\nwabbit_setup = {\n 'mpicommand' : \"mpirun -np 8\",\n 'memory' : \"--memory=8GB\"\n }\n# parameters to adjust\nclass params:\n \n eps_list = np.logspace(-4,0,10) # threshold of adaptation\n jmax_list = [4,5,6] # maximal tree level\n \n class domain:\n N = [2048, 2048] # number of points for 3d use 3 elements\n L = [1.0, 1.0] # length of domain\n \n #exp(-(x²+y²)/2/sigma²)\n def init(x,L):\n sigma = np.asarray(L)*0.01\n x0 = [Li/2 for Li in L]\n xrel = [x-x0 for x,x0 in zip(x,x0)]\n field = np.ones_like(x[0])\n for x,s in zip(xrel,sigma):\n field *= np.exp(-np.power(x,2)/(2*s**2))\n return field\n \n ## sin(1/x)*sin(1/y)\n def init2(x,L): # this function will be initialized on the domain\n sigma = np.asarray(L)*0.01\n x0 = [Li/2 for Li in L]\n xrel = [x-x0 for x,x0 in zip(x,x0)]\n field = np.ones_like(x[0])\n for x,s in zip(xrel,sigma):\n field *= np.sin(-np.divide(1,np.abs(x)))\n return field \n \n ## (x**2+y**2)*heaviside(x)\n def init3(x,L): # this function will be initialized on the domain\n sigma = np.asarray(L)*0.01\n x0 = [Li/2 for Li in L]\n xrel = [x-x0 for x,x0 in zip(x,x0)]\n field = (xrel[0]**2+xrel[1]**2)*np.heaviside(xrel[0],0)\n return field \n###############################################################################\n# %% \n###############################################################################\n\n\n\n\n\ndef wabbit_adapt(dirs, params, wabbit_setup):\n \n \n mpicommand = wabbit_setup[\"mpicommand\"]\n memory = wabbit_setup[\"memory\"]\n wdir = dirs[\"wabbit\"]\n work = dirs[\"work\"]\n jmax_list = params.jmax_list\n # set initial gird\n dim = len(params.domain.L)\n x = [np.linspace(0,params.domain.L[d],params.domain.N[d]) \\\n for d in range(dim) ]\n X = np.meshgrid(*x)\n phi = params.init(X,params.domain.L)\n Bs = wt.field_shape_to_bs(params.domain.N,jmax_list[-1])\n \n # create reference file\n file_ref = wt.dense_to_wabbit_hdf5(phi,work+\"/phi\", Bs, params.domain.L,0)\n \n \n params.Bs_list = []\n l2error=np.zeros([len(params.jmax_list),len(params.eps_list)])\n linferror=np.zeros_like(l2error)\n compress = np.zeros_like(l2error)\n for j,jmax in enumerate(params.jmax_list):\n Bs = wt.field_shape_to_bs(params.domain.N,jmax)\n params.Bs_list.append(Bs) \n # adapt field for different eps using wabbit-post -dense-to-sparse\n print(\"\\n\\n###################################################################\")\n print(\"###################################################################\")\n print( \"\\t\\tJmax: \", jmax, \"\\n\\n\")\n for k,eps in enumerate(params.eps_list):\n # create dense field and save to work\n fname = \"phi-j\"+str(jmax)+\"-eps\"\n file = work +\"/\"+fname\n file = wt.dense_to_wabbit_hdf5(phi,file, Bs, params.domain.L,eps*100)\n command = mpicommand + \" \" + wdir + \\\n \"wabbit-post --dense-to-sparse --eps=\" + str(eps) + \" --order=CDF44 --eps-norm=Linfty \" + memory + \\\n \" --files=\"+ file + \">adapt.log\"\n # command for densing file again\n dense_command = mpicommand + \" \" + wdir + \\\n \"wabbit-post --sparse-to-dense \" + \\\n \" \"+ file + \" \"+file + \">>adapt.log\"\n # -----------------------------\n # Execute Command\n # -----------------------------\n print(\"\\n\\n###################################################################\")\n print(\"\\t\\teps =\",eps)\n print(\"###################################################################\")\n print(\"\\n\",command,\"\\n\\n\")\n success = os.system(command) # execute command\n if np.mod(k,4)==0:\n wt.plot_wabbit_file(file,savepng=True)\n compress[j,k] = sum(wt.block_level_distribution_file( file ))\n \n print(\"\\n\",dense_command,\"\\n\\n\")\n success += os.system(dense_command) # dense the file for comparison to original\n if success != 0:\n print(\"command did not execute successfully\")\n return\n \n # ---------------------------\n # compare to original file\n # --------------------------\n l2error[j,k] = wt.wabbit_error_vs_wabbit(file_ref,file, norm=2, dim=2) \n linferror[j,k] = wt.wabbit_error_vs_wabbit(file_ref,file, norm=np.inf, dim=2) \n compress[j,k] /= sum(wt.block_level_distribution_file( file ))\n \n # delete file\n os.system(\"rm \"+ file)\n \n # delete reference file\n os.system(\"rm \" + file_ref)\n return l2error,linferror,compress\n\n\nif __name__ == \"__main__\":\n\n l2error, linferror, compress = wabbit_adapt(dirs, params, wabbit_setup) \n \n \n # %%\n plt.close(\"all\")\n fig1, ax1 = plt.subplots() \n fig2, ax2 = plt.subplots() \n \n l2plt=[0]*len(params.jmax_list)\n linfplt=[0]*len(params.jmax_list)\n for j, jmax in enumerate(params.jmax_list):\n l2plt[j], = ax1.loglog(params.eps_list,l2error[j,:],'-o', label =\"$J_\\mathrm{max}=\"+str(jmax)+'$')\n linfplt[j], = ax1.loglog(params.eps_list,linferror[j,:],'-.*', label = \"$J_\\mathrm{max}=\"+str(jmax)+'$')\n #### plot compression rate\n ax2.semilogx(params.eps_list,compress[j,:],'-o', label = \"$J_\\mathrm{max}=\"+str(jmax)+'$')\n \n l2_legend = ax1.legend(handles=l2plt, loc='lower right',title=\"$\\Vert u(x) - [u(x)]^\\epsilon \\Vert_2$\",fontsize='small')\n ax1.add_artist(l2_legend)\n linf_legend = ax1.legend(handles=linfplt, loc='upper left',title=\"$\\Vert u(x) - [u(x)]^\\epsilon \\Vert_\\infty$\",fontsize='small')\n ax1.loglog(params.eps_list,params.eps_list, 'k--')\n ax1.grid(which='both',linestyle=':')\n ax1.set_xlabel(\"$\\epsilon$\")\n ax1.set_ylabel(\"relative error\")\n \n ax2.legend()\n ax2.grid(which='both',linestyle=':')\n ax2.set_xlabel(\"$\\epsilon$\")\n ax2.set_ylabel(\"Compression Factor\")\n #ax1.set_xlim=ax2.get_xlim()\n\n\n################################################\n# save figure\n###############################################\n#fig1.savefig( pic_dir+'blob_compression_err4th.png', dpi=300, transparent=True, bbox_inches='tight' )# -*- coding: utf-8 -*-\n\n#fig2.savefig( pic_dir+'blob_compression_rate.png', dpi=300, transparent=True, bbox_inches='tight' )# -*- coding: utf-8 -*-","sub_path":"LIB/wavelet_compress_test.py","file_name":"wavelet_compress_test.py","file_ext":"py","file_size_in_byte":7350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472909023","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport shelve\nimport xml.etree.ElementTree as ET\nfrom sklearn.cluster import KMeans\nfrom sklearn.decomposition import PCA\n\n\ndef remove_non_ascii(text):\n return ''.join([i if ord(i) < 128 else ' ' for i in text])\ntree = ET.parse('treegen.xml')\nroot = tree.getroot()\nlistOfIndices = [0,2,3,6,9,20,23]\npca_2 = PCA(2)\nuser_avg = shelve.open(\"user_avg\")\ncdj = pd.read_csv(\"data.csv\")\nclusters = shelve.open(\"clusters\")\ncols = cdj.columns.tolist()\ncols.insert(0, cols.pop(cols.index('titleOfBook')))\ncdj = cdj[cols]\nkmeans_model1 = KMeans(n_clusters=6, random_state=1).fit(cdj.iloc[:, 1:])\nlabels1 = kmeans_model1.labels_\ncentroids1 = kmeans_model1.cluster_centers_\nplot_columns1 = pca_2.fit_transform(cdj.iloc[:,1:642])\n\n\n#TO BE SEEN\ncdj = cdj.drop('titleOfBook', 1)\ncounter=-1\ncounter1 = 1\nfor key,value in user_avg.iteritems() :\n\tcounter =counter + 1\n\tif counter in listOfIndices :\n\t\tcounter1 = counter1 +1\n\t\tm = open(\"results/finalResultDemo\"+str(counter1)+\".txt\", \"w+\")\n\t\tto_merge = []\n\t\tto_merge.append(cdj)\n\t\tto_merge.append(value)\n\t\tnw = pd.concat(to_merge)\n\t\tplot_columns = pca_2.fit_transform(nw.iloc[:,:]) \n\t\tkmeans_model = KMeans(n_clusters=6, random_state=1).fit(nw.iloc[:,:])\n\t\tlabels = kmeans_model.labels_\n\t\tplt.figure()\n\t\t#print plot_columns\n\t\tlabelx = kmeans_model1.predict(value)\n\t\tfor k in clusters[str(labelx[0])] : \n\t\t\tm.write(k)\n\t\t\tm.write(\"\\n\")\n\t\tm.write(\"\\n \\n \\n \\n The facebook profile data \\n \\n \\n \\n\")\n\t\tfor target in root.findall(\".//user[@id='\"+key+\"']\"):\n\t\t\tfor child in target :\n\t\t\t\tm.write(remove_non_ascii(child.tag))\n\t\t\t\tm.write(\" \" +remove_non_ascii(child.text))\n\t\t\t\tm.write(\"\\n\")\n\t\tm.write(\"\\n \\n \\n \\n Recommended Books : \\n \\n\")\n\t\tm.write(clusters[str(labelx[0])][0])\n\t\tm.write(\"\\n\")\n\t\tm.write(clusters[str(labelx[0])][2])\n\t\tm.write(\"\\n\")\n\t\tm.write(clusters[str(labelx[0])][5])\n\t\tm.write(\"\\n\")\n\t\tm.close()\n\t\tplt.scatter(x=plot_columns1[:,0], y=plot_columns1[:,1],c=labels1)\n\t\tplt.scatter(x=plot_columns[-1:,0], y=plot_columns[-1:,1],color='red',marker='v',s=100)\n\t\tplt.show()\n\t\t\n\t\traw_input(\"success\")\n","sub_path":"kmeans_allocate.py","file_name":"kmeans_allocate.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384848252","text":"import arcpy\n\n\nfor year in range(1970,2017):\n rows = arcpy.da.SearchCursor(r'C:\\Users\\George\\courses\\spa\\M6\\Project\\Week6_SPA\\Week6_SPA.gdb\\Tornados_Since_1970', (\"mag\", \"yr\", \"mo\"), \"yr = {}\".format(year))\n\n count = 0\n magtotal = 0\n for row in rows:\n mag = row[0]\n #if (count % 1000) == 0:\n # print(mag, count)\n if mag > 1:\n magtotal = mag + magtotal\n count = count + 1\n if count != 0:\n print(\"mean mag in {}:\".format(year), magtotal / count, count)\n else:\n print(\"No Tornado Data in {}\".format(year))\n","sub_path":"W7/Search_Cursor_year.py","file_name":"Search_Cursor_year.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"347760980","text":"# -*- coding:utf-8 -*-\n\n\"\"\"This module builds data pairs based on 「top1000.txt」 & 「same.txt」 & 「label.txt」.\"\"\"\n\n\ndef build_label_txt(input_file, output_file):\n \"\"\"\n Construct 3510 (test1_id, test2_id, similarity_tag) pairs and\n generate the 「label_pair.txt」 file based on the original 「label.txt」 file.\n :param input_file: Should be 「label.txt」\n :param output_file: Should be 「label_pair.txt」\n \"\"\"\n result = []\n with open(input_file, 'r') as fin, open(output_file, 'w') as fout:\n for line in fin:\n odd_index = []\n even_index = []\n pairs = line.strip().split('\\t')\n head = pairs[0]\n pairs = pairs[1:]\n for index in range(len(pairs)):\n if index % 2 == 1:\n odd_index.append(pairs[index])\n else:\n even_index.append(pairs[index])\n for i in range(len(odd_index)):\n result.append((head, even_index[i], odd_index[i]))\n for item in result:\n out_str = '' + item[0] + '\\t' + item[1] + '\\t' + item[2]\n fout.write(out_str.strip() + '\\n')\n\n print('Successfully build 「label_pair.txt」 file :)')\n\n\ndef build_not_similarity(input_file, output_file):\n \"\"\"\n Select the pairs with tag=0 and generate the 「not_similarity_raw.txt」 based on 「label_pair.txt」.\n :param input_file: Should be 「label_pair.txt」\n :param output_file: Should be 「not_similarity_raw.txt」\n \"\"\"\n result = []\n with open(input_file, 'r') as fin, open(output_file, 'w') as fout:\n for line in fin:\n pairs = line.strip().split('\\t')\n tag = pairs[2]\n if tag.startswith('0'):\n result.append(pairs)\n for item in result:\n out_str = '' + item[0] + '\\t' + item[1] + '\\t' + item[2]\n fout.write(out_str.strip() + '\\n')\n\n print('Successfully build 「not_similarity_raw.txt」 file :)')\n\n\ndef build_similarity(input_file, output_file):\n \"\"\"\n Select the pairs with tag=1/1-1/1-2 and generate the 「similarity_raw.txt」 based on 「label_pair.txt」.\n :param input_file: Should be 「label_pair.txt」\n :param output_file: Should be 「similarity_raw.txt」\n \"\"\"\n result = []\n with open(input_file, 'r') as fin, open(output_file, 'w') as fout:\n for line in fin:\n pairs = line.strip().split('\\t')\n tag = pairs[2]\n if tag.startswith('1'):\n result.append(pairs)\n for item in result:\n out_str = '' + item[0] + '\\t' + item[1] + '\\t' + item[2]\n fout.write(out_str.strip() + '\\n')\n\n print('Successfully build 「similarity_raw.txt」 file :)')\n\n\ndef build_tests_id(input_file_list, output_file):\n \"\"\"\n Extract all unique test id and generate the 「test_id.txt」 based on original three files.\n :param input_file_list: Should be ['label.txt', 'top1000.txt', 'same.txt']\n :param output_file: Should be 「test_id.txt」\n \"\"\"\n\n test_id = set()\n for file in input_file_list:\n if 'label' in file:\n with open(file, 'r') as fin:\n for eachline in fin:\n pairs = eachline.strip().split('\\t')\n head = pairs[0]\n test_id.add(head)\n pairs = pairs[1:]\n for index in range(len(pairs)):\n if index % 2 == 0:\n test_id.add(pairs[index])\n print('Now the length of test id data is:' + str(len(test_id)))\n else:\n with open(file, 'r') as fin:\n for eachline in fin:\n line = eachline.strip().split('\\t')\n for item in line:\n test_id.add(item)\n print('Now the length of test id data is:' + str(len(test_id)))\n\n with open(output_file, 'w') as fout:\n for item in test_id:\n out_str = '' + item\n fout.write(out_str.strip() + '\\n')\n\n print('Successfully build 「test_id.txt」 file :)')\n\n\ndef main():\n input_file_list = ['label.txt', 'top1000.txt', 'same.txt']\n\n build_label_txt('label.txt', 'label_pair.txt')\n build_not_similarity('label_pair.txt', 'not_similarity_raw.txt')\n build_similarity('label_pair.txt', 'similarity_raw.txt')\n build_tests_id(input_file_list, 'test_id.txt')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":4481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"474852721","text":"s = \"Hi He Lied Because Boron Could Not Oxidize Fluorine. New Nations Might Also Sign Peace Security Clause. Arthur King Can.\"\ns = s.replace(\".\", \"\").split()\nnum = [1, 5, 6, 7, 8, 9, 15, 16, 19]\ndic = {}\nfor i, v in enumerate(s):\n if i+1 in num:\n v = v[:1]\n else:\n v = v[:2]\n dic[v] = i + 1\n\nprint(dic)","sub_path":"masamune/chapter01/knock04.py","file_name":"knock04.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"145068214","text":"class Solution:\n def containsNearbyDuplicate(self, nums, k: int) -> bool:\n if len(nums) < 2:\n return False\n\n map = dict()\n flag = 0\n for i in range(len(nums)):\n if nums[i] in map.keys():\n abs_num = abs(i - map[nums[i]])\n # 如果有发现绝对值超过k的,则将当前位置存储到表中\n if abs_num > k:\n map[nums[i]] = i\n if abs_num <= k:\n flag = 1\n break\n # elif abs_num > k:\n # return False\n else:\n continue\n else:\n map[nums[i]] = i\n if flag == 1:\n return True\n else:\n return False\n\n\nif __name__ == '__main__':\n x = Solution()\n nums = [1, 2, 3, 1]\n k = 3\n # nums = [1,0,1,1]\n # k=1\n print(x.containsNearbyDuplicate(nums, k))\n","sub_path":"219. 存在重复元素 II.py","file_name":"219. 存在重复元素 II.py","file_ext":"py","file_size_in_byte":953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628776952","text":"import numpy as np\r\nimport oyaml as yaml\r\nimport pandas as pd\r\nfrom sklearn.metrics import auc, confusion_matrix\r\nimport warnings\r\n\r\ndef parse_ground_truth(annotation_path, yaml_path):\r\n \"\"\"\r\n Parse ground truth annotations from a CSV file containing both fine-level\r\n and coarse-level predictions (and possibly additional metadata).\r\n Returns a Pandas DataFrame in which the column names are coarse\r\n IDs of the form 1, 2, 3 etc.\r\n Parameters\r\n ----------\r\n annotation_path: string\r\n Path to the CSV file containing predictions.\r\n yaml_path: string\r\n Path to the YAML file containing coarse taxonomy.\r\n Returns\r\n -------\r\n gt_df: DataFrame\r\n Ground truth.\r\n \"\"\"\r\n # Create dictionary to parse tags\r\n with open(yaml_path, 'r') as stream:\r\n yaml_dict = yaml.load(stream, Loader=yaml.Loader)\r\n\r\n # Load CSV file into a Pandas DataFrame.\r\n ann_df = pd.read_csv(annotation_path)\r\n\r\n # Restrict to ground truth (\"annotator zero\").\r\n gt_df = ann_df[\r\n (ann_df[\"annotator_id\"] == 0) & (ann_df[\"split\"]==\"validate\")]\r\n\r\n\r\n # Rename coarse columns.\r\n coarse_dict = yaml_dict[\"coarse\"]\r\n coarse_renaming = {\r\n \"_\".join([str(c), coarse_dict[c], \"presence\"]): str(c)\r\n for c in coarse_dict}\r\n gt_df = gt_df.rename(columns=coarse_renaming)\r\n\r\n # Collect tag names as strings and map them to mixed (coarse-fine) ID pairs.\r\n # The \"mixed key\" is a hyphenation of the coarse ID and fine ID.\r\n fine_dict = {}\r\n for coarse_id in yaml_dict[\"fine\"]:\r\n for fine_id in yaml_dict[\"fine\"][coarse_id]:\r\n mixed_key = \"-\".join([str(coarse_id), str(fine_id)])\r\n fine_dict[mixed_key] = yaml_dict[\"fine\"][coarse_id][fine_id]\r\n\r\n # Rename fine columns.\r\n fine_renaming = {\"_\".join([k, fine_dict[k], \"presence\"]): k\r\n for k in fine_dict}\r\n gt_df = gt_df.rename(columns=fine_renaming)\r\n\r\n # Loop over coarse tags.\r\n n_samples = len(gt_df)\r\n coarse_dict = yaml_dict[\"coarse\"]\r\n for coarse_id in yaml_dict[\"coarse\"]:\r\n # Construct incomplete fine tag by appending -X to the coarse tag.\r\n incomplete_tag = str(coarse_id) + \"-X\"\r\n\r\n # If the incomplete tag is not in the prediction, append a column of zeros.\r\n # This is the case e.g. for coarse ID 7 (\"dogs\") which has a single\r\n # fine-level tag (\"7-1_dog-barking-whining\") and thus no incomplete\r\n # tag 7-X.\r\n if incomplete_tag not in gt_df.columns:\r\n gt_df[incomplete_tag] = np.zeros((n_samples,)).astype('int')\r\n\r\n # Return output in DataFrame format.\r\n return gt_df.sort_values('audio_filename')\r\ndef parse_coarse_prediction(pred_csv_path, yaml_path):\r\n \"\"\"\r\n Parse coarse-level predictions from a CSV file containing both fine-level\r\n and coarse-level predictions (and possibly additional metadata).\r\n Returns a Pandas DataFrame in which the column names are coarse\r\n IDs of the form 1, 2, 3 etc.\r\n Parameters\r\n ----------\r\n pred_csv_path: string\r\n Path to the CSV file containing predictions.\r\n yaml_path: string\r\n Path to the YAML file containing coarse taxonomy.\r\n Returns\r\n -------\r\n pred_coarse_df: DataFrame\r\n Coarse-level complete predictions.\r\n \"\"\"\r\n\r\n # Create dictionary to parse tags\r\n with open(yaml_path, 'r') as stream:\r\n yaml_dict = yaml.load(stream, Loader=yaml.Loader)\r\n\r\n # Collect tag names as strings and map them to coarse ID pairs.\r\n rev_coarse_dict = {\"_\".join([str(k), yaml_dict[\"coarse\"][k]]): k\r\n for k in yaml_dict[\"coarse\"]}\r\n\r\n # Read comma-separated values with the Pandas library\r\n pred_df = pd.read_csv(pred_csv_path)\r\n\r\n # Assign a predicted column to each coarse key, by using the tag as an\r\n # intermediate hashing step.\r\n pred_coarse_dict = {}\r\n for c in rev_coarse_dict:\r\n if c in pred_df:\r\n pred_coarse_dict[str(rev_coarse_dict[c])] = pred_df[c]\r\n else:\r\n pred_coarse_dict[str(rev_coarse_dict[c])] = np.zeros((len(pred_df),))\r\n warnings.warn(\"Column not found: \" + c)\r\n\r\n # Copy over the audio filename strings corresponding to each sample.\r\n pred_coarse_dict[\"audio_filename\"] = pred_df[\"audio_filename\"]\r\n\r\n # Build a new Pandas DataFrame with coarse keys as column names.\r\n pred_coarse_df = pd.DataFrame.from_dict(pred_coarse_dict)\r\n\r\n # Return output in DataFrame format.\r\n # The column names are of the form 1, 2, 3, etc.\r\n return pred_coarse_df.sort_values('audio_filename')\r\ndef parse_fine_prediction(pred_csv_path, yaml_path,parse_ground_truth):\r\n \"\"\"\r\n Parse fine-level predictions from a CSV file containing both fine-level\r\n and coarse-level predictions (and possibly additional metadata).\r\n Returns a Pandas DataFrame in which the column names are mixed (coarse-fine)\r\n IDs of the form 1-1, 1-2, 1-3, ..., 1-X, 2-1, 2-2, 2-3, ... 2-X, 3-1, etc.\r\n Parameters\r\n ----------\r\n pred_csv_path: string\r\n Path to the CSV file containing predictions.\r\n yaml_path: string\r\n Path to the YAML file containing fine taxonomy.\r\n Returns\r\n -------\r\n pred_fine_df: DataFrame\r\n Fine-level complete predictions.\r\n \"\"\"\r\n\r\n # Create dictionary to parse tags\r\n with open(yaml_path, 'r') as stream:\r\n yaml_dict = yaml.load(stream, Loader=yaml.Loader)\r\n\r\n # Collect tag names as strings and map them to mixed (coarse-fine) ID pairs.\r\n # The \"mixed key\" is a hyphenation of the coarse ID and fine ID.\r\n fine_dict = {}\r\n for coarse_id in yaml_dict[\"fine\"]:\r\n for fine_id in yaml_dict[\"fine\"][coarse_id]:\r\n mixed_key = \"-\".join([str(coarse_id), str(fine_id)])\r\n fine_dict[mixed_key] = \"_\".join([\r\n mixed_key, yaml_dict[\"fine\"][coarse_id][fine_id]])\r\n\r\n # Invert the key-value relationship between mixed key and tag.\r\n # Now, tags are the keys, and mixed keys (coarse-fine IDs) are the values.\r\n # This is possible because tags are unique.\r\n rev_fine_dict = {fine_dict[k]: k for k in fine_dict}\r\n\r\n # Read comma-separated values with the Pandas library\r\n pred_df = pd.read_csv(pred_csv_path)\r\n\r\n # Assign a predicted column to each mixed key, by using the tag as an\r\n # intermediate hashing step.\r\n pred_fine_dict = {}\r\n for f in sorted(rev_fine_dict.keys()):\r\n if f in pred_df:\r\n pred_fine_dict[rev_fine_dict[f]] = pred_df[f]\r\n else:\r\n pred_fine_dict[rev_fine_dict[f]] = np.zeros((len(pred_df),))\r\n warnings.warn(\"Column not found: \" + f)\r\n\r\n # Loop over coarse tags.\r\n n_samples = len(pred_df)\r\n coarse_dict = yaml_dict[\"coarse\"]\r\n for coarse_id in yaml_dict[\"coarse\"]:\r\n # Construct incomplete fine tag by appending -X to the coarse tag.\r\n incomplete_tag = str(coarse_id) + \"-X\"\r\n\r\n # If the incomplete tag is not in the prediction, append a column of zeros.\r\n # This is the case e.g. for coarse ID 7 (\"dogs\") which has a single\r\n # fine-level tag (\"7-1_dog-barking-whining\") and thus no incomplete\r\n # tag 7-X.\r\n if incomplete_tag not in fine_dict.keys():\r\n pred_fine_dict[incomplete_tag] =\\\r\n np.zeros((n_samples,)).astype('int')\r\n\r\n\r\n # Copy over the audio filename strings corresponding to each sample.\r\n pred_fine_dict[\"audio_filename\"] = pred_df[\"audio_filename\"]\r\n\r\n # Build a new Pandas DataFrame with mixed keys as column names.\r\n pred_fine_df = pd.DataFrame.from_dict(pred_fine_dict)\r\n pred_fine_df=pred_fine_df.sort_values('audio_filename')\r\n # pred_fine_df=pred_fine_df[]parse_ground_truth['audio_filename']\r\n\r\n\r\n # Return output in DataFrame format.\r\n # Column names are 1-1, 1-2, 1-3 ... 1-X, 2-1, 2-2, 2-3 ... 2-X, 3-1, etc.\r\n return pred_fine_df\r\nannotation_path = '/home/liuzhuangzhuang/DCASE_2020_TASK_5DATA/annotations.csv'\r\n\r\nyaml_path = '/home/liuzhuangzhuang/DCASE_2020_TASK_5DATA/dcase-ust-taxonomy.yaml'\r\nprediction_path='/home/liuzhuangzhuang/pycharm_P/task5-多任务0.0/work_space/submissions/main/logmel_64frames_64melbins/taxonomy_level=fine/holdout_fold=1/Cnn_9layers_AvgPooling/submission.csv'\r\nannotation_2019='/home/liuzhuangzhuang/pycharm_P/dataset_root/annotations.csv'\r\nmin_threshold = 0.01\r\n\r\n# Create dictionary to parse tags\r\nwith open(yaml_path, 'r') as stream:\r\n yaml_dict = yaml.load(stream, Loader=yaml.Loader)\r\n\r\n# Parse ground truth.\r\ngt_df = parse_ground_truth(annotation_path, yaml_path)\r\n\r\n# Parse predictions.\r\npred_df = parse_fine_prediction(prediction_path, yaml_path,gt_df)\r\n# if mode == \"fine\":\r\n# pred_df = parse_fine_prediction(prediction_path, yaml_path)\r\n# elif mode == \"coarse\":\r\n# pred_df = parse_coarse_prediction(prediction_path, yaml_path)\r\n\r\n# Check consistency between ground truth and predictions.\r\n# Make sure the files evaluated in both tables match.\r\npred_audio_set = set(pred_df['audio_filename'].tolist())\r\ntrue_audio_set = set(gt_df['audio_filename'].tolist())\r\n# if not (len(gt_df) == len(pred_df)):\r\n# print('len(gt_df) == len(pred_df)')\r\n# print(pred_df.to_dict)\r\n# print(pred_df)\r\nhaving_audio_set=set(gt_df['audio_filename'].tolist())\r\nprint('预测的标签个数{}'.format(len(pred_df)))\r\nprint('已有的标签个数{}'.format(len(gt_df)))\r\n\r\n# for index, row in pred_df.iterrows():\r\n# # print(index)\r\n# if row['audio_filename'] not in true_audio_set:\r\n# pred_df=pred_df.drop(index)\r\n# # print('**********************{}'.format(index))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":".github/单任务/evaluation_tools/weishenmetalianggebuyiyangchang.py","file_name":"weishenmetalianggebuyiyangchang.py","file_ext":"py","file_size_in_byte":9562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"521744729","text":"from tkinter import *\r\nimport random\r\nfrom tkinter import ttk\r\n# download the file in command prompt go to where the file is located use python example.py\r\n\r\n# This sets up the TK inter\r\nroot = Tk()\r\n# This sets the size of th window\r\nroot.geometry('1250x700')\r\n# This sets up the Title of the Window\r\nroot.title(\"Hotel Transylvania\")\r\nroot.resizable(False, False)\r\n\r\nframe1 = Frame(root)\r\nframe2 = Frame(root)\r\n\r\nroomlist = []\r\n\r\n\r\nclass Room:\r\n def __init__(self, root, counter):\r\n size = [\"King\", \"Double Queen\", \"Double Queen with Kitchen\", \"Suite\"]\r\n availability = {\"Available\": \"green\", \"Unavailable/Occupied\": \"red\", \"Unavailable/Dirty\": \"blue\",\r\n \"Unavailable/Maintenance\": \"purple\"}\r\n key = [\"Available\", \"Unavailable/Occupied\", \"Unavailable/Dirty\", \"Unavailable/Maintenance\"]\r\n self.avail = key[random.randint(0, 3)]\r\n self.roomsize = size[random.randint(0, 3)]\r\n self.color = availability.get(self.avail)\r\n self.root = root\r\n roomlist.append(Button(self.root, text=\"Room #\" + str(counter + 1) + ' ' + self.roomsize,\r\n command=lambda: checkRoom(self, roomlist, counter), font=(\"arial\", 12)))\r\n\r\n def configure(self, counter):\r\n roomlist[counter].configure(fg=self.color)\r\n\r\n\r\ndef clear(frame):\r\n for widget in frame.winfo_children():\r\n widget.destroy()\r\n\r\n\r\ndef ShowRooms():\r\n clear(frame2)\r\n legend = Label(frame2, text=\"Available\", font=(\"arial\", 18))\r\n legend.configure(fg=\"green\")\r\n legend2 = Label(frame2, text=\"Unavailable/Occupied\", font=(\"arial\", 18))\r\n legend2.configure(fg=\"red\")\r\n legend3 = Label(frame2, text=\"Unavailable/Dirty\", font=(\"arial\", 18))\r\n legend3.configure(fg=\"blue\")\r\n legend4 = Label(frame2, text=\"Unavailable/Maintenance\", font=(\"arial\", 18))\r\n legend4.configure(fg=\"purple\")\r\n\r\n legend.place(x=300, y=25)\r\n legend2.place(x=420, y=25)\r\n legend3.place(x=680, y=25)\r\n legend4.place(x=880, y=25)\r\n\r\n counter = 50\r\n i = 0\r\n while i < 20:\r\n Room(root, i)\r\n roomlist[i].place(x=500, y=counter + 30)\r\n counter = counter + 30\r\n i = i + 1\r\n\r\n\r\ndef checkRoom(self, rooms, counter):\r\n self.configure(counter)\r\n\r\n\r\ndef execute():\r\n print(\"welcome\")\r\n\r\n\r\ndef Search():\r\n clear(frame2)\r\n label1 = Label(frame2, text=\"Guest First Name\", font=(\"arial\", 12), height=2).grid(row=0, column=0)\r\n field1 = Entry(frame2, font=(\"arial\", 12)).grid(row=0, column=1)\r\n label2 = Label(frame2, text=\"Guest last Name\", font=(\"arial\", 12), height=2).grid(row=0, column=2)\r\n field2 = Entry(frame2, font=(\"arial\", 12)).grid(row=0, column=3)\r\n label3 = Label(frame2, text=\"Room Number\", font=(\"arial\", 12), height=2).grid(row=0, column=4)\r\n field3 = Entry(frame2, font=(\"arial\", 12)).grid(row=0, column=5)\r\n label4 = Label(frame2, text=\"Phone Number\", font=(\"arial\", 12), height=2).grid(row=0, column=6)\r\n field4 = Entry(frame2, font=(\"arial\", 12)).grid(row=0, column=7)\r\n label5 = Label(frame2, text=\"Street Address\", font=(\"arial\", 12), height=2).grid(row=1, column=0)\r\n field5 = Entry(frame2, font=(\"arial\", 12)).grid(row=1, column=1)\r\n label5 = Label(frame2, text=\"Check In Date\", font=(\"arial\", 12), height=2).grid(row=1, column=2)\r\n field5 = Entry(frame2, font=(\"arial\", 12)).grid(row=1, column=3)\r\n label6 = Label(frame2, text=\"Check In Date\", font=(\"arial\", 12), height=2).grid(row=1, column=4)\r\n field6 = Entry(frame2, font=(\"arial\", 12)).grid(row=1, column=5)\r\n Button1 = Button(frame2, text='Search', font=(\"arial\", 14)).grid(row=1, column=6)\r\n frame2.grid(row=1, column=0)\r\n\r\n\r\ndef report():\r\n clear(frame2)\r\n\r\n label1 = Label(frame2, text=\"Today's report\", font=(\"arial\", 20), height=2).grid(row=0, column=0)\r\n frame2.grid(row=1, column=0)\r\n\r\ndef housekeeping():\r\n clear(frame2)\r\n hRoomNum = Label(frame2, text=\"Room Number\", font=(\"arial\", 12), height=2).grid(row=1, column=0)\r\n hHousekeeper = Label(frame2, text=\"Housekeeper\", font=(\"arial\", 12), height=2).grid(row=1, column=1)\r\n hRoomStatus = Label(frame2, text=\"Room Status\", font=(\"arial\", 12), height=2).grid(row=1, column=2)\r\n hRoomType = Label(frame2, text=\"Room Type\", font=(\"arial\", 12), height=2).grid(row=1, column=3)\r\n hBathroom = Label(frame2, text=\"Bathroom\", font=(\"arial\", 12), height=2).grid(row=1, column=4)\r\n hTowels = Label(frame2, text=\"Towels\", font=(\"arial\", 12), height=2).grid(row=1, column=5)\r\n hBedSheets = Label(frame2, text=\"Bed Sheets\", font=(\"arial\", 12), height=2).grid(row=1, column=6)\r\n hVacuum = Label(frame2, text=\"Vacuum\", font=(\"arial\", 12), height=2).grid(row=1, column=7)\r\n hDusting = Label(frame2, text=\"Dusting\", font=(\"arial\", 12), height=2).grid(row=1, column=8)\r\n hElectronics = Label(frame2, text=\"Electronics\", font=(\"arial\", 12), height=2).grid(row=1, column=9)\r\n for x in range(10):\r\n for y in range(10):\r\n entry = Entry(frame2, width=6)\r\n entry.grid(row=x + 2, column=y)\r\n frame2.grid(row=1, column=0)\r\n\r\ndef Customer_Reservation():\r\n clear(frame2)\r\n roomList = [\"Option 1\", \"Option 2\", \"Option 3\"]\r\n crGFirst = StringVar()\r\n crGLast = StringVar()\r\n crCheckIn = StringVar()\r\n crCheckOut = StringVar()\r\n lGuestFirstName = Label(frame2, text=\"Enter First Name\", font=(\"arial\", 12), height=2).grid(row=1, column=0)\r\n lGuestLastName = Label(frame2, text=\"Enter Last Name\", font=(\"arial\", 12), height=2).grid(row=2, column=0)\r\n lCheckInDate = Label(frame2, text='Enter Check In Date', font=(\"arial\", 12)).grid(row=3, column=0)\r\n lCheckOutDate = Label(frame2, text='Enter Check Out Date', font=(\"arial\", 12)).grid(row=4, column=0)\r\n lRoomType = Label(frame2, text='Pick a Room Type', font=(\"arial\", 12)).grid(row=5, column=0)\r\n #lRoomType.set(\"Pick a room type\")\r\n\r\n eGuestFirstName = Entry(frame2, textvariable=crGFirst, font=(\"arial\", 12)).grid(row=1, column=1)\r\n eGuestLastName = Entry(frame2, textvariable=crGLast, show='*', font=(\"arial\", 12)).grid(row=2, column=1)\r\n eCheckInDate = Entry(frame2, textvariable=crCheckIn, font=(\"arial\", 12)).grid(row=3, column=1)\r\n eCheckOutDate = Entry(frame2, textvariable=crCheckOut, font=(\"arial\", 12)).grid(row=4, column=1)\r\n cbRoomType = ttk.Combobox(frame2, values=roomList).grid(row=5, column=1)\r\n bCheckAvailability = Button(frame2, text='Check Availability', command=execute, height=1, width=14, font=(\"arial\", 12)).grid(row=6, column=0)\r\n frame2.grid(row=1, column=0)\r\n\r\n\r\nCapability1 = Button(frame1, text='Show Rooms and Status', command=ShowRooms, font=(\"arial\", 12), width=20, height=5)\r\nCapability1.grid(row=0, column=0)\r\nCapability2 = Button(frame1, text='Show Room Availability', command=execute, font=(\"arial\", 12), width=20, height=5)\r\nCapability2.grid(row=0, column=1)\r\nCapability3 = Button(frame1, text='Customer Reservation', command=Customer_Reservation, font=(\"arial\", 12), width=20, height=5)\r\nCapability3.grid(row=0, column=2)\r\nCapability4 = Button(frame1, text='Housekeeping', command=housekeeping, font=(\"arial\", 12), width=20, height=5)\r\nCapability4.grid(row=0, column=3)\r\nCapability5 = Button(frame1, text='Guest Profile', command=execute, font=(\"arial\", 12), width=20, height=5)\r\nCapability5.grid(row=0, column=4)\r\nCapability6 = Button(frame1, text='Current Stay', command=execute, font=(\"arial\", 12), width=20, height=5)\r\nCapability6.grid(row=0, column=5)\r\nCapability7 = Button(frame1, text='Search', command=Search, font=(\"arial\", 12), width=20, height=5)\r\nCapability7.grid(row=0, column=6)\r\nCapability8 = Button(frame1, text='Daily Report', command=report, font=(\"arial\", 12), width=20, height=5)\r\nCapability8.grid(row=0, column=7)\r\nframe1.grid(row=0, column=0)\r\n\r\nroot.mainloop()\r\n","sub_path":"AF_Grid_Main_Window.py","file_name":"AF_Grid_Main_Window.py","file_ext":"py","file_size_in_byte":7769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"126636129","text":"import torch\nfrom torch import nn\nfrom PIL import Image\nfrom sklearn.metrics import r2_score\nfrom torch.nn.modules.loss import _Loss, _assert_no_grad\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom torch.utils.data import Dataset\n\n\nclass ImageToTensor(object):\n \"\"\"Converts a numpy.ndarray (H x W x C) to a torch.FloatTensor of shape (C x H x W).\"\"\"\n\n def __call__(self, image):\n\n image_array = np.array(image)\n try:\n tensor = torch.from_numpy(image_array).permute(2, 0, 1)\n tensor = tensor.float()\n except:\n tensor = torch.from_numpy(np.expand_dims(image_array, axis=2)).permute(2, 0, 1)\n tensor = tensor.float()\n # put it from HWC to CHW format\n return tensor\n\n\nclass TensorToImage(object):\n \"\"\"Converts a Tensor (C x H x W) to a numpy.ndarray of shape (H x W x C).\"\"\"\n\n def __call__(self, tensor):\n image_array = tensor.numpy()\n if len(image_array.shape) == 4:\n image_array = image_array[0, :, :, :]\n\n image_array = np.transpose(image_array, (1, 2, 0))\n return image_array\n\n\nclass LogRMSELoss(_Loss):\n \"\"\"\n implements the log root mean square error\n \"\"\"\n\n def __init__(self, size_average=True, reduce=True):\n super(LogRMSELoss, self).__init__(size_average)\n self.reduce = reduce\n\n def forward(self, predicted, target):\n _assert_no_grad(target)\n diff = (torch.log1p(predicted) - torch.log1p(target)) ** 2\n if not self.reduce:\n return diff\n loss = torch.sqrt(torch.mean(diff)) if self.size_average else torch.sqrt(torch.sum(diff))\n return loss\n\n\nclass RMSELoss(_Loss):\n \"\"\"\n implements the root mean square error\n \"\"\"\n\n def __init__(self, size_average=True, reduce=True):\n super(RMSELoss, self).__init__(size_average)\n self.reduce = reduce\n\n def forward(self, predicted, target):\n _assert_no_grad(target)\n diff = (predicted - target) ** 2\n if not self.reduce:\n return diff\n loss = torch.sqrt(torch.mean(diff)) if self.size_average else torch.sqrt(torch.sum(diff))\n return loss\n\n\nclass ScaleInvMSELoss(_Loss):\n \"\"\"\n implements the scale invariant mean square error\n \"\"\"\n\n def __init__(self):\n super(ScaleInvMSELoss, self).__init__()\n\n def forward(self, predicted, target):\n _assert_no_grad(target)\n\n first_log = torch.log(predicted + 1e-6)\n second_log = torch.log(target + 1e-6)\n log_term = torch.mean(torch.pow(first_log - second_log, 2))\n sc_inv_term = torch.pow(torch.mean((first_log - second_log)), 2)\n loss = log_term - sc_inv_term\n\n return loss\n\n\nclass AbsRelativeLoss(_Loss):\n \"\"\"\n implements the absolute relative loss\n \"\"\"\n\n def __init__(self):\n super(AbsRelativeLoss, self).__init__()\n\n def forward(self, predicted, target):\n _assert_no_grad(target)\n diff = torch.div(torch.abs(target - predicted), predicted + 1)\n\n loss = torch.mean(diff)\n return loss\n\n\ndef same_scale(predicted, target, loss_function):\n \"\"\"\n :param predicted: predicted output\n :param target: real target\n :param loss_function: loss function to be used\n :return: return the loss of a sample, the target is resized\n to match the size of the predicted output\n \"\"\"\n\n _, _, h, w = predicted.size()\n th, tw = target.size()[-2:]\n target_scaled = target\n if h != th or w != tw:\n target_scaled = nn.functional.adaptive_avg_pool2d(target, (h, w))\n\n return loss_function(predicted, target_scaled)\n\n\ndef loss_metric(depth, target, loss='L1'):\n \"\"\"\n :param depth: predicted output\n :param target: real target\n :param loss: string identifier of the type of loss to use\n :return: loss value using a specific loss function\n \"\"\"\n\n if type(loss) is str:\n assert (loss in ['L1', 'MSE', 'SmoothL1', 'RMSE', 'logRMSE', 'scaleINV', 'absREL', 'cross_entropy'])\n\n if loss == 'L1':\n loss_function = nn.L1Loss()\n elif loss == 'MSE':\n loss_function = nn.MSELoss()\n elif loss == 'SmoothL1':\n loss_function = nn.SmoothL1Loss()\n elif loss == 'RMSE':\n loss_function = RMSELoss()\n elif loss == 'logRMSE':\n loss_function = LogRMSELoss()\n elif loss == 'scaleINV':\n loss_function = ScaleInvMSELoss()\n elif loss == 'absREL':\n loss_function = AbsRelativeLoss()\n elif loss == 'cross_entropy':\n loss_function = nn.CrossEntropyLoss()\n\n else:\n loss_function = loss\n\n loss_output = same_scale(depth, target, loss_function)\n return loss_output\n\n\ndef coeff_determination(predicted, target):\n \"\"\"\n computes the coefficient of determination\n as a metric for a regression task\n \"\"\"\n size_batch, _, height, width = predicted.size()\n\n # scaling the target size as the size of the predicted\n target = torch.nn.functional.adaptive_avg_pool2d(target, (height, width))\n\n batch_size = predicted.data.shape[0]\n predicted_np = predicted.data.view(batch_size, -1)\n target_np = target.data.view(batch_size, -1)\n\n r2_score_list = []\n for pred, targ in zip(predicted_np, target_np):\n # computing the coefficient of determination\n score = r2_score(pred, targ)\n r2_score_list.append(score)\n\n return np.mean(r2_score_list)\n\n\ndef plot_history(history):\n \"\"\"\n Plots accuracy and loss of training and validation sets wrt epochs\n \"\"\"\n n_epochs = len(history['train_loss'])\n\n fig, axes = plt.subplots(1, 2, figsize=(12, 6))\n ax_loss = axes[0]\n ax_acc = axes[1]\n ax_loss.plot(np.arange(0, n_epochs), history['train_loss'], label=\"train_loss\")\n ax_loss.plot(np.arange(0, n_epochs), history['val_loss'], label=\"val_loss\")\n ax_loss.set_title(\"Training and Validation loss\")\n ax_loss.set_xlabel(\"Epoch #\")\n ax_loss.set_ylabel(\"Loss\")\n ax_loss.legend()\n\n ax_acc.plot(np.arange(0, n_epochs), history['train_acc'], label=\"train_acc\")\n ax_acc.plot(np.arange(0, n_epochs), history['val_acc'], label=\"val_acc\")\n ax_acc.set_title(\"Training and Validation accuracy\")\n ax_acc.set_xlabel(\"Epoch #\")\n ax_acc.set_ylabel(\"Accuracy\")\n ax_acc.legend()\n\n plt.tight_layout()\n plt.show()\n\n\n","sub_path":"back_up_pytorch/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6361,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"329633631","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\n mslib.mswms.mpl_hsec_styles\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n Matplotlib horizontal section styles.\n\n In this module, the visualisation styles of the horizontal map products\n that can be provided through the WMS are defined. The styles are classes\n that are derived from MPLBasemapHorizontalSectionStyle (defined in\n mpl_hsec.py). If you want to define a new product, copy an existing\n implementation and modify it according to your needs.\n\n A few notes:\n\n 1) The idea: Each product defines the data fields it requires as NetCDF-CF\n compliant standard names in the variable 'required_datafields' (a list\n of tuples (leveltype, variablename), where leveltype can be ml (model levels),\n pl (pressure levels), or whatever you data source may provide. The data\n driver invoked by the WSGI module is responsible for loading the data.\n The superclass MPLBasemapHorizontalSectionStyle sets up the plot and\n draws the map. What is left to do for the product class is to implement\n specific post-processing actions on the data, and to do the visualisation\n on the map.\n\n 2) If your product requires some sort of post-processing (e.g. the derivation\n of potential temperature or any other parameter, place it in the\n _prepare_datafields() method.\n\n 3) All visualisation commands go to the _plot_style() method. In this\n method, you can assume that the data fields you have requested are available\n as 2D arrays in the 'self.data' field.\n\n 4) All defined products MUST define a name (the WMS layer name) and a title.\n\n 5) If you want to provide different styles according to the WMS standard,\n define the names of the styles in the 'styles' variable and check in\n _plot_style() for the 'self.style' variable to know which style to deliver.\n\n 6) Your products should consider the 'self.noframe' variable to place a\n legend and a title. If this variable is True (default WMS behaviour), plotting\n anything outside the map axis will lead to erroneous plots. Look at the\n provided styles to get a feeling of how title and legends can be best placed.\n\n This file is part of MSS.\n\n :copyright: Copyright 2008-2014 Deutsches Zentrum fuer Luft- und Raumfahrt e.V.\n :copyright: Copyright 2011-2014 Marc Rautenhaus (mr)\n :copyright: Copyright 2016-2023 by the MSS team, see AUTHORS.\n :license: APACHE-2.0, see LICENSE for details.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\"\"\"\n\nimport logging\nimport warnings\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport mpl_toolkits.axes_grid1.inset_locator\nimport matplotlib.colors\nimport mpl_toolkits.basemap\nfrom matplotlib import patheffects\n\nfrom mslib.mswms.mpl_hsec import MPLBasemapHorizontalSectionStyle\nfrom mslib.mswms.utils import make_cbar_labels_readable\nimport mslib.mswms.generics as generics\nfrom mslib.utils import thermolib\nfrom mslib.utils.units import convert_to\n\n\nclass HS_GenericStyle(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Horizontal section plotting layer for general quantities\n \"\"\"\n name = \"HS_GenericStyle\"\n styles = [\n (\"auto\", \"auto colour scale\"),\n (\"autolog\", \"auto logcolour scale\"), ]\n cbar_format = None\n\n def _plot_style(self):\n bm = self.bm\n ax = self.bm.ax\n\n show_data = np.ma.masked_invalid(self.data[self.dataname])\n # get cmin, cmax, cbar_log and cbar_format for level_key\n cmin, cmax = generics.get_range(self.dataname, self.level, self.name[-2:])\n cmin, cmax, clevs, cmap, norm, ticks = generics.get_style_parameters(\n self.dataname, self.style, cmin, cmax, show_data)\n\n if self.use_pcolormesh:\n tc = bm.pcolormesh(self.lonmesh, self.latmesh, show_data, cmap=cmap, norm=norm)\n else:\n tc = bm.contourf(self.lonmesh, self.latmesh, show_data, levels=clevs, cmap=cmap, extend=\"both\", norm=norm)\n\n for cont_data, cont_levels, cont_colour, cont_label_colour, cont_style, cont_lw, pe in self.contours:\n cs_pv = ax.contour(self.lonmesh, self.latmesh, self.data[cont_data], cont_levels,\n colors=cont_colour, linestyles=cont_style, linewidths=cont_lw)\n cs_pv_lab = ax.clabel(cs_pv, colors=cont_label_colour, fmt='%.0f')\n if pe:\n plt.setp(cs_pv.collections, path_effects=[\n patheffects.withStroke(linewidth=cont_lw + 2, foreground=\"w\")])\n plt.setp(cs_pv_lab, path_effects=[patheffects.withStroke(linewidth=1, foreground=\"w\")])\n\n # define position of the colorbar and the orientation of the ticks\n if self.crs.lower() == \"epsg:77774020\":\n cbar_location = 3\n tick_pos = 'right'\n else:\n cbar_location = 4\n tick_pos = 'left'\n\n # Format for colorbar labels\n cbar_label = self.title\n if self.cbar_format is None:\n cbar_format = generics.get_cbar_label_format(self.style, np.median(np.abs(clevs)))\n else:\n cbar_format = self.cbar_format\n\n if not self.noframe:\n self.fig.colorbar(tc, fraction=0.05, pad=0.08, shrink=0.7,\n label=cbar_label, format=cbar_format, ticks=ticks, extend=\"both\")\n else:\n axins1 = mpl_toolkits.axes_grid1.inset_locator.inset_axes(\n ax, width=\"3%\", height=\"40%\", loc=cbar_location)\n self.fig.colorbar(tc, cax=axins1, orientation=\"vertical\", format=cbar_format, ticks=ticks, extend=\"both\")\n axins1.yaxis.set_ticks_position(tick_pos)\n make_cbar_labels_readable(self.fig, axins1)\n\n\ndef make_generic_class(name, standard_name, vert, add_data=None, add_contours=None,\n fix_styles=None, add_styles=None, add_prepare=None, use_pcolormesh=False):\n \"\"\"\n This function instantiates a plotting class and adds it to the global name\n space of this module.\n\n Args:\n name (str): name of the class, under which it will be added to the module\n name space\n\n standard_name (str): CF standard_name of the main plotting target.\n This standard_name must be registered (by default or manually)\n within the mslib.mswms.generics module.\n\n vert (str): vertical level type, e.g. \"pl\"\n\n add_data (list, optional): List of tuples adding data to be read in and\n provide to the plotting class.\n E.g. [(\"pl\", \"ertel_potential_vorticity\", \"PVU\")]\n for ertel_potential_vorticity on pressure levels in PVU units.\n The vertical level type must be the one specified by the vert\n variable or \"sfc\".\n\n By default ertel_potential_vorticity in PVU is selected.\n\n add_contours (list, optional): List of tuples specifying contour lines\n to be plotted.\n E.g. [(\"ertel_potential_vorticity\", [2, 4, 8, 16], \"green\", \"red\", \"dashed\", 2, True)]\n causes PV to be plotted at 2, 4, 8, and 16 PVU with dashed green\n lines, red labels, and line width of 2. The last value defines\n whether a stroke effect shall be applied.\n\n fix_styles (list, optional): A list of plotting styles, which must\n be defined in the mslib.mswms.generics.STYLES dictionary.\n Defaults to a list of standard styles\n (\"auto\", \"logauto\", \"default\", \"nonlinear\") depending on which\n ranges and thresholds are defined for the main variable in the\n generics module. Further styles can be registered to that dict\n if desired.\n\n add_styles (list, optional): Similar to fix_styles, but *adds* the\n supplied styles to the list of support styles instead of\n overwriting them. If both add_styles and fix_styles are supplied,\n fix_styles takes precedence. Don't do this.\n\n Defaults to None.\n\n add_prepare (function, optional): a function to overwrite the\n _prepare_datafield method. Use this to add derived quantities based\n on those provided by the modes. For example 'horizontal_wind' could\n be computed from U and V in here.\n\n Defaults to None.\n\n use_pcolormesh (bool, optional): determines whether to use pcolormesh\n or plotting instead of the default \"contourf\" method. Use\n pcolormesh for data that contains a lot of fill values or NaNs,\n or to show the actual location of data.\n\n Defaults to False.\n\n Returns:\n The generated class. (The class is also placed in this module under the\n given name).\n \"\"\"\n if add_data is None:\n add_data = [(vert, \"ertel_potential_vorticity\", \"PVU\")]\n if add_contours is None:\n add_contours = [(\"ertel_potential_vorticity\", [2, 4, 8, 16], \"dimgrey\", \"dimgrey\", \"solid\", 2, True)]\n\n class fnord(HS_GenericStyle):\n \"\"\"\n Horizontal section plotting layer for quantity 'standard_name'\n \"\"\"\n name = f\"{standard_name}_{vert}\"\n dataname = standard_name\n title = generics.get_title(standard_name)\n long_name = standard_name\n units = generics.get_unit(standard_name)\n if units:\n title += f\" ({units})\"\n\n required_datafields = [(vert, standard_name, units)] + add_data\n contours = add_contours\n\n fnord.use_pcolormesh = use_pcolormesh\n fnord.__name__ = name\n fnord.styles = list(fnord.styles)\n if generics.get_thresholds(standard_name) is not None:\n fnord.styles += [(\"nonlinear\", \"nonlinear colour scale\")]\n if all(_x is not None for _x in generics.get_range(standard_name, None, vert)):\n fnord.styles += [\n (\"default\", \"fixed colour scale\"),\n (\"log\", \"fixed logarithmic colour scale\")]\n\n if add_styles is not None:\n fnord.styles += add_styles\n if fix_styles is not None:\n fnord.styles = fix_styles\n if add_prepare is not None:\n fnord._prepare_datafields = add_prepare\n globals()[name] = fnord\n\n return fnord\n\n\n# Generation of HS plotting layers for registered CF standard_names\nfor vert in [\"al\", \"ml\", \"pl\", \"tl\"]:\n for sn in generics.get_standard_names():\n make_generic_class(f\"HS_GenericStyle_{vert.upper()}_{sn}\", sn, vert)\n make_generic_class(\n f\"HS_GenericStyle_{vert.upper()}_{'equivalent_latitude'}\",\n \"equivalent_latitude\", vert, [], [],\n fix_styles=[(\"equivalent_latitude_nh\", \"northern hemisphere\"),\n (\"equivalent_latitude_sh\", \"southern hemisphere\")])\n make_generic_class(\n f\"HS_GenericStyle_{vert.upper()}_{'ertel_potential_vorticity'}\",\n \"ertel_potential_vorticity\", vert, [], [],\n fix_styles=[(\"ertel_potential_vorticity_nh\", \"northern hemisphere\"),\n (\"ertel_potential_vorticity_sh\", \"southern hemisphere\")])\n make_generic_class(\n f\"HS_GenericStyle_{vert.upper()}_{'square_of_brunt_vaisala_frequency_in_air'}\",\n \"square_of_brunt_vaisala_frequency_in_air\", vert, [], [],\n fix_styles=[(\"square_of_brunt_vaisala_frequency_in_air\", \"\")])\n\nmake_generic_class(\n \"HS_GenericStyle_SFC_tropopause_altitude\",\n \"tropopause_altitude\", \"sfc\", [],\n [(\"tropopause_altitude\", np.arange(5, 20.1, 0.500), \"yellow\", \"red\", \"solid\", 0.5, False)],\n fix_styles=[(\"tropopause_altitude\", \"tropopause_altitude\")])\n\n\nclass HS_CloudsStyle_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Surface Field: CLOUDS\n \"\"\"\n name = \"TCC\"\n title = \"Cloud Cover (0-1)\"\n styles = [\n (\"default\", \"Total Cloud Cover\"),\n (\"TOT\", \"Total Cloud Cover\"),\n (\"LOW\", \"Low Cloud Cover\"),\n (\"MED\", \"Medium Cloud Cover\"),\n (\"HIGH\", \"High Cloud Cover\")]\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n ('sfc', 'low_cloud_area_fraction', 'dimensionless'),\n ('sfc', 'medium_cloud_area_fraction', 'dimensionless'),\n ('sfc', 'high_cloud_area_fraction', 'dimensionless'),\n ('sfc', 'air_pressure_at_sea_level', 'hPa')]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n if self.style.lower() == \"default\":\n self.style = \"TOT\"\n if self.style in [\"LOW\", \"TOT\"]:\n lcc = bm.contourf(self.lonmesh, self.latmesh, data['low_cloud_area_fraction'],\n np.arange(0.2, 1.1, 0.1), cmap=plt.cm.autumn_r)\n self.add_colorbar(lcc, \"Cloud cover fraction in grid box (0-1)\")\n\n if self.style in [\"MED\", \"TOT\"]:\n mcc = bm.contourf(self.lonmesh, self.latmesh, data['medium_cloud_area_fraction'],\n np.arange(0.2, 1.1, 0.1), cmap=plt.cm.summer_r)\n self.add_colorbar(mcc, width=\"2%\" if self.style == \"TOT\" else \"3%\",\n cb_format='' if self.style == \"TOT\" else \"%.1f\")\n\n if self.style in [\"HIGH\", \"TOT\"]:\n hcc = bm.contourf(self.lonmesh, self.latmesh, data['high_cloud_area_fraction'],\n np.arange(0.2, 1.1, 0.1), cmap=plt.cm.Blues)\n bm.contour(self.lonmesh, self.latmesh, data['high_cloud_area_fraction'],\n [0.2], colors=\"blue\", linestyles=\"dotted\")\n self.add_colorbar(hcc, width=\"1%\" if self.style == \"TOT\" else \"3%\",\n cb_format='' if self.style == \"TOT\" else \"%.1f\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, data['air_pressure_at_sea_level'],\n np.arange(950, 1050, 4), colors=\"burlywood\", linewidths=2)\n ax.clabel(cs, fontsize=8, fmt='%.0f')\n\n titlestring = \"Total cloud cover (high, medium, low) (0-1)\"\n if self.style == \"LOW\":\n titlestring = \"Low cloud cover (0-1)\"\n elif self.style == \"MED\":\n titlestring = \"Medium cloud cover (0-1)\"\n elif self.style == \"HIGH\":\n titlestring = \"High cloud cover (0-1)\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_MSLPStyle_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Surface Field: Mean Sea Level Pressure\n \"\"\"\n name = \"MSLP\"\n title = \"Mean Sea Level Pressure (hPa)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"air_pressure_at_sea_level\", \"hPa\"),\n (\"sfc\", \"surface_eastward_wind\", \"knots\"),\n (\"sfc\", \"surface_northward_wind\", \"knots\")]\n\n def _plot_style(self):\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n thick_contours = np.arange(952, 1050, 8)\n thin_contours = [c for c in np.arange(952, 1050, 2)\n if c not in thick_contours]\n\n mslp = data['air_pressure_at_sea_level']\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, mslp,\n thick_contours, colors=\"darkblue\", linewidths=2)\n ax.clabel(cs, fontsize=12, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, mslp,\n thin_contours, colors=\"darkblue\", linewidths=1)\n\n # Convert wind data from m/s to knots.\n u = data['surface_eastward_wind']\n v = data['surface_northward_wind']\n\n # Transform wind vector field to fit map.\n lons2 = ((self.lons + 180) % 360) - 180\n lons2_ind = lons2.argsort()\n udat, vdat, xv, yv = bm.transform_vector(u[:, lons2_ind], v[:, lons2_ind],\n lons2[lons2_ind], self.lats,\n 16, 16, returnxy=True, masked=True)\n\n # Plot wind barbs.\n bm.barbs(xv, yv, udat, vdat,\n barbcolor='firebrick', flagcolor='firebrick', pivot='middle',\n linewidths=1)\n\n # Find local minima and maxima.\n # min_indices, min_values = local_minima(mslp.ravel(), window=50)\n # #min_indices, min_values = local_minima(mslp, window=(50,50))\n # minfits = minimum_filter(mslp, size=(50,50), mode=\"wrap\")\n # logging.debug(\"%s\", minfits)\n # #logging.debug(\"%s // %s // %s\", min_values, lonmesh_.ravel()[min_indices],\n # # self.latmesh_.ravel()[min_indices])\n\n # bm.scatter(lonmesh.ravel()[min_indices], self.latmesh.ravel()[min_indices],\n # s=20, c='blue', marker='s')\n\n titlestring = \"Mean sea level pressure (hPa) and surface wind\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_SEAStyle_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Surface Field: Solar Elevation Angle\n \"\"\"\n name = \"SEA\"\n title = \"Solar Elevation Angle (degrees)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"solar_elevation_angle\", \"degree\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n thick_contours = np.arange(-10, 95, 5)\n thin_contours = [c for c in np.arange(0, 90, 1)\n if c not in thick_contours]\n neg_thin_contours = [c for c in np.arange(-10, 0, 1)\n if c not in thick_contours]\n\n sea = data['solar_elevation_angle']\n\n # Filled contour plot.\n scs = bm.contourf(self.lonmesh, self.latmesh, sea,\n np.arange(0, 91, 1), cmap=plt.cm.nipy_spectral)\n self.add_colorbar(scs, label=\"Solar Elevation Angle (degrees)\")\n\n # Contour lines plot.\n # Colors in python2.6/site-packages/matplotlib/colors.py\n bm.contour(self.lonmesh, self.latmesh, sea,\n thick_contours, colors=\"saddlebrown\",\n linewidths=3, linestyles=\"solid\")\n cs2 = bm.contour(self.lonmesh, self.latmesh, sea,\n thin_contours, colors=\"white\", linewidths=1)\n cs2.clabel(cs2.levels, fontsize=14, fmt='%.0f')\n cs3 = bm.contour(self.lonmesh, self.latmesh, sea,\n neg_thin_contours, colors=\"saddlebrown\",\n linewidths=1, linestyles=\"solid\")\n cs3.clabel(fontsize=14, fmt='%.0f')\n\n # Plot title.\n titlestring = \"Solar Elevation Angle \"\n titlestring += f\"\\nValid: {self.valid_time.strftime('%a %Y-%m-%d %H:%M UTC')}\"\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_SeaIceStyle_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Surface Field: Sea Ice Cover\n \"\"\"\n name = \"CI\"\n title = \"Sea Ice Cover Fraction (0-1)\"\n\n styles = [\n (\"default\", \"pseudocolor plot\"),\n (\"PCOL\", \"pseudocolor plot\"),\n (\"CONT\", \"contour plot\")]\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"sea_ice_area_fraction\", 'dimensionless')]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n ice = data['sea_ice_area_fraction']\n\n if self.style.lower() == \"default\":\n self.style = \"PCOL\"\n\n # Filled contour plot.\n if self.style == \"PCOL\":\n scs = bm.pcolormesh(self.lonmesh, self.latmesh, ice,\n cmap=plt.cm.Blues,\n norm=matplotlib.colors.Normalize(vmin=0.1, vmax=1.0),\n shading=\"nearest\", edgecolors='none')\n else:\n scs = bm.contourf(self.lonmesh, self.latmesh, ice,\n np.arange(0.1, 1.1, .1), cmap=plt.cm.Blues)\n self.add_colorbar(scs, label=\"Sea Ice Cover Fraction (0-1)\")\n\n # Plot title.\n titlestring = \"Sea Ice Cover\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_TemperatureStyle_ML_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: Temperature\n \"\"\"\n name = \"MLTemp01\"\n title = \"Temperature (Model Level) (degC)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"ml\", \"air_temperature\", \"degC\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n cmin = -72\n cmax = 42\n thick_contours = np.arange(cmin, cmax, 6)\n thin_contours = [c for c in np.arange(cmin, cmax, 2)\n if c not in thick_contours]\n\n tempC = data['air_temperature']\n\n tc = bm.contourf(self.lonmesh, self.latmesh, tempC,\n np.arange(cmin, cmax, 2), cmap=plt.cm.nipy_spectral)\n self.add_colorbar(tc, \"Temperature (degC)\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n [0], colors=\"red\", linewidths=4)\n cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n thick_contours, colors=\"saddlebrown\", linewidths=2)\n ax.clabel(cs, fontsize=14, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n thin_contours, colors=\"saddlebrown\", linewidths=1)\n\n titlestring = f\"Temperature (degC) at model level {self.level}\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_TemperatureStyle_PL_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Pressure level version of the temperature style.\n \"\"\"\n name = \"PLTemp01\"\n title = \"Temperature (degC) and Geopotential Height (m)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"air_temperature\", \"degC\"),\n (\"pl\", \"geopotential_height\", \"m\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n cmin = -72\n cmax = 42\n thick_contours = np.arange(cmin, cmax, 6)\n thin_contours = [c for c in np.arange(cmin, cmax, 2)\n if c not in thick_contours]\n\n tempC = data['air_temperature']\n\n tc = bm.contourf(self.lonmesh, self.latmesh, tempC,\n np.arange(cmin, cmax, 2), cmap=plt.cm.nipy_spectral)\n self.add_colorbar(tc, \"Temperature (degC)\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n [0], colors=\"red\", linewidths=4)\n cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n thick_contours, colors=\"saddlebrown\",\n linewidths=2, linestyles=\"solid\")\n ax.clabel(cs, colors=\"black\", fontsize=14, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n thin_contours, colors=\"white\",\n linewidths=1, linestyles=\"solid\")\n\n # Plot geopotential height contours.\n gpm = self.data[\"geopotential_height\"]\n geop_contours = np.arange(400, 28000, 40)\n cs = bm.contour(self.lonmesh, self.latmesh, gpm,\n geop_contours, colors=\"black\", linewidths=1)\n if cs.levels[0] in geop_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, fontsize=10, fmt='%.0f')\n\n titlestring = \"Temperature (degC) and Geopotential Height (m) at \" \\\n f\"{self.level:.0f} hPa\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_GeopotentialWindStyle_PL(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: Geopotential and Wind\n \"\"\"\n name = \"PLGeopWind\"\n title = \"Geopotential Height (m) and Horizontal Wind (m/s)\"\n styles = [\n (\"default\", \"Wind Speed 10-85 m/s\"),\n (\"wind_10_105\", \"Wind Speed 10-105 m/s\"),\n (\"wind_10_65\", \"Wind Speed 10-65 m/s\"),\n (\"wind_20_55\", \"Wind Speed 20-55 m/s\"),\n (\"wind_15_55\", \"Wind Speed 15-55 m/s\")]\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"geopotential_height\", \"m\"),\n (\"pl\", \"eastward_wind\", \"m/s\"),\n (\"pl\", \"northward_wind\", \"m/s\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n # Compute wind speed.\n u = data[\"eastward_wind\"]\n v = data[\"northward_wind\"]\n wind = np.hypot(u, v)\n\n # Plot wind contours.\n # NOTE: Setting alpha=0.8 raises the transparency problem in the client\n # (the imshow issue, see ../issues/transparency; surfaces with alpha\n # values < 1 are mixed with grey). Hence, it is better to disable\n # alpha blending here until a fix has been found. (mr 2011-02-01)\n wind_contours = np.arange(10, 90, 5) # default wind contours\n if self.style.lower() == \"wind_10_65\":\n wind_contours = np.arange(10, 70, 5)\n elif self.style.lower() == \"wind_20_55\":\n wind_contours = np.arange(20, 60, 5)\n elif self.style.lower() == \"wind_15_55\":\n wind_contours = np.arange(15, 60, 5)\n elif self.style.lower() == \"wind_10_105\":\n wind_contours = np.arange(10, 110, 5)\n cs = bm.contourf(self.lonmesh, self.latmesh, wind,\n wind_contours, cmap=plt.cm.inferno_r)\n self.add_colorbar(cs, \"Wind Speed (m/s)\")\n\n # Plot geopotential height contours.\n gpm = self.data[\"geopotential_height\"]\n\n gpm_interval = 20\n if self.level <= 20:\n gpm_interval = 120\n elif self.level <= 100:\n gpm_interval = 80\n elif self.level <= 500:\n gpm_interval = 40\n\n geop_contours = np.arange(400, 55000, gpm_interval)\n cs = bm.contour(self.lonmesh, self.latmesh, gpm,\n geop_contours, colors=\"green\", linewidths=2)\n if cs.levels[0] in geop_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, fontsize=14, fmt='%.0f')\n\n # Convert wind data from m/s to knots for the wind barbs.\n uk = convert_to(u, \"m/s\", \"knots\")\n vk = convert_to(v, \"m/s\", \"knots\")\n\n # Transform wind vector field to fit map.\n lons2 = ((self.lons + 180) % 360) - 180\n lons2_ind = lons2.argsort()\n udat, vdat, xv, yv = bm.transform_vector(uk[:, lons2_ind], vk[:, lons2_ind],\n lons2[lons2_ind], self.lats,\n 16, 16, returnxy=True, masked=True)\n\n # Plot wind barbs.\n bm.barbs(xv, yv, udat, vdat,\n barbcolor='firebrick', flagcolor='firebrick', pivot='middle',\n linewidths=0.5, length=6, zorder=1)\n\n # Plot title.\n titlestring = \"Geopotential Height (m) and Horizontal Wind (m/s) \" \\\n f\"at {self.level:.0f} hPa\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_RelativeHumidityStyle_PL_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: Relative Humidity\n Relative humidity and geopotential on pressure levels.\n \"\"\"\n name = \"PLRelHum01\"\n title = \"Relative Humditiy (%) and Geopotential Height (m)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"air_temperature\", \"K\"),\n (\"pl\", \"geopotential_height\", \"m\"),\n (\"pl\", \"specific_humidity\", \"kg/kg\")]\n\n def _prepare_datafields(self):\n \"\"\"\n Computes relative humidity from p, t, q.\n \"\"\"\n pressure = convert_to(self.level, self.get_elevation_units(), \"Pa\")\n self.data[\"relative_humidity\"] = thermolib.rel_hum(\n pressure, self.data[\"air_temperature\"], self.data[\"specific_humidity\"])\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n filled_contours = np.arange(70, 140, 15)\n thin_contours = np.arange(10, 140, 15)\n\n rh = data[\"relative_humidity\"]\n\n rhc = bm.contourf(self.lonmesh, self.latmesh, rh,\n filled_contours, cmap=plt.cm.winter_r)\n self.add_colorbar(rhc, \"Relative Humidity (%)\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, rh,\n thin_contours, colors=\"grey\",\n linewidths=0.5, linestyles=\"solid\")\n ax.clabel(cs, colors=\"grey\", fontsize=10, fmt='%.0f')\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n cs = bm.contour(self.lonmesh, self.latmesh, rh,\n np.arange(100, 170, 15), colors=\"yellow\", linewidths=1)\n\n # Plot geopotential height contours.\n gpm = self.data[\"geopotential_height\"]\n gpm_interval = 40 if self.level <= 500 else 20\n geop_contours = np.arange(400, 28000, gpm_interval)\n cs = bm.contour(self.lonmesh, self.latmesh, gpm,\n geop_contours, colors=\"darkred\", linewidths=2)\n if cs.levels[0] in geop_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, fontsize=10, fmt='%.0f')\n\n titlestring = \"Relative Humditiy (%%) and Geopotential Height (m) at \" \\\n f\"{self.level:.0f} hPa\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_EQPTStyle_PL_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: Equivalent Potential Temperature\n Equivalent potential temperature and geopotential on pressure levels.\n \"\"\"\n name = \"PLEQPT01\"\n title = \"Equivalent Potential Temperature (degC) and Geopotential Height (m)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"air_temperature\", \"K\"),\n (\"pl\", \"geopotential_height\", \"m\"),\n (\"pl\", \"specific_humidity\", \"kg/kg\")]\n\n def _prepare_datafields(self):\n \"\"\"\n Computes relative humidity from p, t, q.\n \"\"\"\n pressure = convert_to(self.level, self.get_elevation_units(), \"Pa\")\n self.data[\"equivalent_potential_temperature\"] = thermolib.eqpt_approx(\n pressure, self.data[\"air_temperature\"], self.data[\"specific_humidity\"])\n self.data[\"equivalent_potential_temperature\"] = convert_to(\n self.data[\"equivalent_potential_temperature\"], \"K\", \"degC\")\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n filled_contours = np.arange(0, 72, 2)\n thin_contours = np.arange(-40, 100, 2)\n\n eqpt = data[\"equivalent_potential_temperature\"]\n eqptc = bm.contourf(self.lonmesh, self.latmesh, eqpt,\n filled_contours, cmap=plt.cm.gist_rainbow_r)\n self.add_colorbar(eqptc, \"Equivalent Potential Temperature (degC)\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, eqpt,\n thin_contours, colors=\"grey\",\n linewidths=0.5, linestyles=\"solid\")\n if cs.levels[0] in thin_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, colors=\"grey\", fontsize=10, fmt='%.0f')\n # cs = bm.contour(self.lonmesh, self.latmesh, eqpt,\n # np.arange(100, 170, 15), colors=\"yellow\", linewidths=1)\n\n # Plot geopotential height contours.\n gpm = self.data[\"geopotential_height\"]\n gpm_interval = 40 if self.level <= 500 else 20\n geop_contours = np.arange(400, 28000, gpm_interval)\n cs = bm.contour(self.lonmesh, self.latmesh, gpm,\n geop_contours, colors=\"white\", linewidths=2)\n if cs.levels[0] in geop_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, fontsize=10, fmt='%.0f')\n\n titlestring = \"Equivalent Potential Temperature (degC) and Geopotential Height (m) at \" \\\n f\"{self.level:.0f} hPa\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_WStyle_PL_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: Vertical Velocity\n Vertical velocity and geopotential on pressure levels.\n \"\"\"\n name = \"PLW01\"\n title = \"Vertical Velocity (cm/s) and Geopotential Height (m)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"lagrangian_tendency_of_air_pressure\", \"Pa/s\"),\n (\"pl\", \"air_temperature\", \"K\"),\n (\"pl\", \"geopotential_height\", \"m\")]\n\n def _prepare_datafields(self):\n \"\"\"\n Computes relative humidity from p, t, q.\n \"\"\"\n pressure = convert_to(self.level, self.get_elevation_units(), \"Pa\")\n self.data[\"upward_wind\"] = thermolib.omega_to_w(\n self.data[\"lagrangian_tendency_of_air_pressure\"],\n pressure, self.data[\"air_temperature\"])\n self.data[\"upward_wind\"] = convert_to(self.data[\"upward_wind\"], \"m/s\", \"cm/s\")\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n upward_contours = np.arange(-42, 46, 4)\n w = data[\"upward_wind\"]\n\n wc = bm.contourf(self.lonmesh, self.latmesh, w,\n upward_contours, cmap=plt.cm.bwr)\n self.add_colorbar(wc, \"Vertical velocity (cm/s)\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, w,\n [2], colors=\"red\",\n linewidths=0.5, linestyles=\"solid\")\n cs = bm.contour(self.lonmesh, self.latmesh, w,\n [-2], colors=\"blue\",\n linewidths=0.5, linestyles=\"solid\")\n # ax.clabel(cs, thin_contours[::2], colors=\"grey\", fontsize=10, fmt='%.0f')\n # cs = bm.contour(self.lonmesh, self.latmesh, w,\n # np.arange(100, 170, 15), colors=\"yellow\", linewidths=1)\n\n # Plot geopotential height contours.\n gpm = self.data[\"geopotential_height\"]\n gpm_interval = 40 if self.level <= 500 else 20\n geop_contours = np.arange(400, 28000, gpm_interval)\n cs = bm.contour(self.lonmesh, self.latmesh, gpm,\n geop_contours, colors=\"darkgreen\", linewidths=2)\n if cs.levels[0] in geop_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, fontsize=10, fmt='%.0f')\n\n titlestring = \"Vertical Velocity (cm/s) and Geopotential Height (m) at \" \\\n f\"{self.level:.0f} hPa\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_DivStyle_PL_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: Divergence\n Divergence and geopotential on pressure levels.\n \"\"\"\n name = \"PLDiv01\"\n title = \"Divergence and Geopotential Height (m)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"divergence_of_wind\", \"1/s\"),\n (\"pl\", \"geopotential_height\", \"m\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n pos_contours = np.arange(4, 42, 4)\n neg_contours = np.arange(-40, 0, 4)\n\n d = data[\"divergence_of_wind\"] * 1.e5\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, d,\n pos_contours, colors=\"red\",\n linewidths=2, linestyles=\"solid\")\n cs = bm.contour(self.lonmesh, self.latmesh, d,\n neg_contours, colors=\"blue\",\n linewidths=2, linestyles=\"solid\")\n\n # Plot geopotential height contours.\n gpm = self.data[\"geopotential_height\"]\n gpm_interval = 40 if self.level <= 500 else 20\n geop_contours = np.arange(400, 28000, gpm_interval)\n cs = bm.contour(self.lonmesh, self.latmesh, gpm,\n geop_contours, colors=\"darkgreen\", linewidths=2)\n if cs.levels[0] in geop_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, fontsize=10, fmt='%.0f')\n\n titlestring = \"Divergence (positive: red, negative: blue) and Geopotential Height (m) at \" \\\n f\"{self.level:.0f} hPa\"\n titlestring += f\"\\nValid: {self.valid_time.strftime('%a %Y-%m-%d %H:%M UTC')}\"\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_EMAC_TracerStyle_ML_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper Air Field: EMAC Tracer\n \"\"\"\n name = \"EMAC_Eyja_Tracer\"\n title = \"EMAC Eyjafjallajokull Tracer (Model Level) (relative)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"ml\", \"emac_R12\", 'dimensionless')]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n tracer = data[\"emac_R12\"] * 1.e4\n\n # Shift lat/lon grid for PCOLOR (see comments in HS_EMAC_TracerStyle_SFC_01).\n tc = bm.pcolormesh(self.lonmesh, self.latmesh, tracer,\n cmap=plt.cm.inferno_r,\n norm=matplotlib.colors.LogNorm(vmin=1., vmax=100.),\n shading='nearest', edgecolors='none')\n\n ac = bm.contour(self.lonmesh, self.latmesh, tracer,\n np.arange(1, 101, 1)[::2],\n colors=\"b\", linewidths=1)\n ax.clabel(ac, fontsize=10, fmt='%.0f')\n\n self.add_colorbar(tc, \"Tracer (relative)\")\n\n titlestring = f\"EMAC Eyjafjallajokull Tracer (relative) at model level {self.level:.0f}\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_EMAC_TracerStyle_SFC_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n 2D field: EMAC total column density\n \"\"\"\n name = \"EMAC_Eyja_TotalColumn\"\n title = \"EMAC Eyjafjallajokull Tracer Total Column Density (kg/m^2)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"emac_column_density\", \"kg/m^2\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n tracer = data[\"emac_column_density\"]\n\n tc = bm.pcolormesh(self.lonmesh, self.latmesh, tracer,\n cmap=plt.cm.inferno_r,\n norm=matplotlib.colors.LogNorm(vmin=0.05, vmax=0.5),\n shading=\"nearest\", edgecolors='none')\n\n ac = bm.contour(self.lonmesh, self.latmesh, tracer,\n np.arange(0.05, 0.55, 0.05),\n colors=\"b\", linewidths=1)\n ax.clabel(ac, fontsize=10, fmt='%.2f')\n\n self.add_colorbar(tc, \"column density (kg/m^2)\")\n\n titlestring = \"EMAC Eyjafjallajokull Tracer Total Column Density (kg/m^2)\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_PVTropoStyle_PV_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Dynamical (2PVU) Tropopause Fields\n Dynamical tropopause plots (2-PVU level). Three styles are available:\n Pressure, potential temperature, and geopotential height.\n \"\"\"\n name = \"PVTropo01\"\n title = \"Dynamical Tropopause\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pv\", \"air_potential_temperature\", \"K\"),\n (\"pv\", \"geopotential_height\", \"m\"),\n (\"pv\", \"air_pressure\", \"hPa\")]\n\n styles = [\n (\"default\", \"Pressure (hPa)\"),\n (\"GEOP\", \"Geopotential Height (m)\"),\n (\"PT\", \"Potential Temperature (K)\"),\n (\"PRES\", \"Pressure (hPa)\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n # Default style is pressure.\n if self.style.lower() == \"default\":\n self.style = \"PRES\"\n\n # Define colourbars and contour levels for the three styles. For\n # pressure and height, a terrain colourmap is used (bluish colours for\n # low altitudes, brownish colours for high altitudes). For potential\n # temperature, a rainbow colourmap is used (blue=low temps, red=hight\n # temps).\n if self.style == \"PRES\":\n filled_contours = np.arange(120, 551, 10)\n thin_contours = np.arange(100, 601, 40)\n vardata = data[\"air_pressure\"]\n label = \"Pressure (hPa)\"\n fcmap = plt.cm.terrain_r\n elif self.style == \"PT\":\n filled_contours = np.arange(280, 380, 2)\n thin_contours = np.arange(260, 440, 10)\n vardata = data[\"air_potential_temperature\"]\n label = \"Potential Temperature (K)\"\n fcmap = plt.cm.gist_rainbow_r\n elif self.style == \"GEOP\":\n filled_contours = np.arange(5000, 15000, 250)\n thin_contours = np.arange(5000, 15000, 500)\n vardata = data[\"geopotential_height\"]\n label = \"Geopotential Height (m)\"\n fcmap = plt.cm.terrain\n\n # Filled contour plot of pressure/geop./pot.temp. Extend the colourbar\n # to fill regions whose values exceed the colourbar range.\n contours = bm.contourf(self.lonmesh, self.latmesh, vardata,\n filled_contours, cmap=fcmap, extend=\"both\")\n self.add_colorbar(contours, label)\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, vardata,\n thin_contours, colors=\"yellow\",\n linewidths=0.5, linestyles=\"solid\")\n if cs.levels[0] in thin_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, colors=\"red\", fontsize=11, fmt='%.0f')\n\n if self.style == \"PRES\":\n titlestring = \"Dynamical Tropopause Pressure (hPa) at \" \\\n f\"{float(self.level):.1f} PVU\"\n elif self.style == \"PT\":\n titlestring = \"Dynamical Tropopause Potential Temperature (K) at \" \\\n f\"{float(self.level):.1f} PVU\"\n elif self.style == \"GEOP\":\n titlestring = \"Dynamical Tropopause Geopotential Height (m) at \" \\\n f\"{float(self.level):.1f} PVU\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_ThermalTropoStyle_SFC_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Dynamical (2PVU) Tropopause Fields\n Dynamical tropopause plots (2-PVU level). Three styles are available:\n Pressure, potential temperature, and geopotential height.\n \"\"\"\n name = \"ThermalTropo01\"\n title = \"Thermal Tropopause\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"tropopause_altitude\", \"km\"),\n (\"sfc\", \"secondary_tropopause_altitude\", \"km\"),\n ]\n\n styles = [\n (\"default\", \"Overview\"),\n (\"primary\", \"Primary Thermal Tropopause\"),\n (\"secondary\", \"Secondary Thermal Tropopause\"),\n ]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n # Define colourbars and contour levels for the three styles. For\n # pressure and height, a terrain colourmap is used (bluish colours for\n # low altitudes, brownish colours for high altitudes). For potential\n # temperature, a rainbow colourmap is used (blue=low temps, red=hight\n # temps).\n fcmap = plt.cm.terrain\n\n if self.style == \"default\":\n vardata = data[\"tropopause_altitude\"]\n label = \"Primary Tropopause (km)\"\n elif self.style == \"primary\":\n vardata = data[\"tropopause_altitude\"]\n label = \"Primary Tropopause (km)\"\n elif self.style == \"secondary\":\n vardata = data[\"secondary_tropopause_altitude\"]\n label = \"Secondary Tropopause (km)\"\n filled_contours = np.arange(5, 18, 0.25)\n thin_contours = np.arange(5, 18, 1.0)\n\n # Filled contour plot of pressure/geop./pot.temp. Extend the colourbar\n # to fill regions whose values exceed the colourbar range.\n contours = bm.contourf(self.lonmesh, self.latmesh, vardata,\n filled_contours, cmap=fcmap, extend=\"both\")\n\n data[\"secondary_tropopause_altitude\"] = np.ma.masked_invalid(data[\"secondary_tropopause_altitude\"])\n\n if self.style == \"default\":\n mask = ~data[\"secondary_tropopause_altitude\"].mask\n bm.contourf(self.lonmesh, self.latmesh, mask, [0, 0.5, 1.5], hatches=[\"\", \"xx\"], alpha=0)\n\n self.add_colorbar(contours, label)\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, vardata,\n thin_contours, colors=\"yellow\",\n linewidths=0.5, linestyles=\"solid\")\n if cs.levels[0] in thin_contours[::2]:\n lablevels = cs.levels[::2]\n else:\n lablevels = cs.levels[1::2]\n ax.clabel(cs, lablevels, colors=\"red\", fontsize=11, fmt='%.0f')\n\n\nclass HS_VIProbWCB_Style_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Surface Field: Probability of WCB\n Total column probability of WCB trajectory occurence, derived from\n Lagranto trajectories (TNF 2012 product).\n \"\"\"\n name = \"VIProbWCB\"\n title = \"Total Column Probability of WCB (%)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"air_pressure_at_sea_level\", \"hPa\"),\n (\"sfc\", \"vertically_integrated_probability_of_wcb_occurrence\", 'dimensionless')\n ]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n thick_contours = np.arange(952, 1050, 8)\n thin_contours = [c for c in np.arange(952, 1050, 2)\n if c not in thick_contours]\n\n mslp = data[\"air_pressure_at_sea_level\"]\n pwcb = 100. * data[\"vertically_integrated_probability_of_wcb_occurrence\"]\n\n # Contour plot of mean sea level pressure.\n cs = bm.contour(self.lonmesh, self.latmesh, mslp,\n thick_contours, colors=\"darkblue\", linewidths=2)\n ax.clabel(cs, fontsize=12, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, mslp,\n thin_contours, colors=\"darkblue\", linewidths=1)\n\n # Filled contours of p(WCB).\n contours = bm.contourf(self.lonmesh, self.latmesh, pwcb,\n np.arange(0, 101, 10), cmap=plt.cm.pink_r)\n self.add_colorbar(contours)\n\n titlestring = \"Mean sea level pressure (hPa) and total column probability of WCB (0-1)\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_LagrantoTrajStyle_PL_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Upper level Field: Lagranto WCB/INSITU/MIX trajectories\n Number of Lagranto trajectories per grid box for WCB, MIX, INSITU\n trajectories (ML-Cirrus 2014 product).\n \"\"\"\n name = \"PLLagrantoTraj\"\n title = \"Cirrus density, insitu red, mix blue, wcb colour (1E-6/km^2/hPa)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"pl\", \"number_of_wcb_trajectories\", 'dimensionless'),\n (\"pl\", \"number_of_insitu_trajectories\", 'dimensionless'),\n (\"pl\", \"number_of_mix_trajectories\", 'dimensionless')\n ]\n\n def _plot_style(self):\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n thin_contours = [0.1, 0.5, 1., 2., 3., 4., 5., 6., 7., 8.]\n\n nwcb = 1.E6 * data[\"number_of_wcb_trajectories\"]\n ninsitu = 1.E6 * data[\"number_of_insitu_trajectories\"]\n nmix = 1.E6 * data[\"number_of_mix_trajectories\"]\n\n # Contour plot of num(INSITU).\n # cs = bm.contour(self.lonmesh, self.latmesh, ninsitu,\n # thick_contours, colors=\"darkred\", linewidths=2)\n # ax.clabel(cs, fontsize=12, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, ninsitu,\n thin_contours, colors=\"red\", linewidths=1)\n ax.clabel(cs, fontsize=12, fmt='%.1f')\n\n # Contour plot of num(MIX).\n # cs = bm.contour(self.lonmesh, self.latmesh, nmix,\n # thick_contours, colors=\"darkblue\", linewidths=2)\n # ax.clabel(cs, fontsize=12, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, nmix,\n thin_contours, colors=\"darkblue\", linewidths=1)\n ax.clabel(cs, fontsize=12, fmt='%.1f')\n\n # Filled contours of num(WCB).\n contours = bm.contourf(self.lonmesh, self.latmesh, nwcb,\n thin_contours, cmap=plt.cm.gist_ncar_r, extend=\"max\")\n self.add_colorbar(contours)\n\n titlestring = \"Cirrus density, insitu red, mix blue, wcb colour (1E-6/km^2/hPa)\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_BLH_MSLP_Style_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Surface Field: Boundary Layer Height\n \"\"\"\n name = \"BLH\"\n title = \"Boundary Layer Height (m)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"air_pressure_at_sea_level\", \"hPa\"),\n (\"sfc\", \"atmosphere_boundary_layer_thickness\", \"m\")]\n\n def _plot_style(self):\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n thick_contours = np.arange(952, 1050, 8)\n thin_contours = [c for c in np.arange(952, 1050, 2)\n if c not in thick_contours]\n\n mslp = data[\"air_pressure_at_sea_level\"]\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n cs = bm.contour(self.lonmesh, self.latmesh, mslp,\n thick_contours, colors=\"darkred\", linewidths=2)\n ax.clabel(cs, fontsize=12, fmt='%.0f')\n cs = bm.contour(self.lonmesh, self.latmesh, mslp,\n thin_contours, colors=\"darkred\", linewidths=1)\n\n # Filled contours of BLH, interval 100m.\n blh = data[\"atmosphere_boundary_layer_thickness\"]\n contours = bm.contourf(\n self.lonmesh, self.latmesh, blh, np.arange(0, 3000, 100), cmap=plt.cm.terrain, extend=\"max\")\n self.add_colorbar(contours)\n\n # Labelled thin grey contours of BLH, interval 500m.\n cs = bm.contour(self.lonmesh, self.latmesh, blh,\n np.arange(0, 3000, 500), colors=\"grey\", linewidths=0.5)\n ax.clabel(cs, fontsize=12, fmt='%.0f')\n\n # Title\n titlestring = \"Boundary layer height (m) and mean sea level pressure (hPa)\"\n titlestring += f'\\nValid: {self.valid_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")}'\n if self.uses_inittime_dimension():\n time_step = self.valid_time - self.init_time\n time_step_hrs = (time_step.days * 86400 + time_step.seconds) // 3600\n titlestring += f' (step {time_step_hrs:d} hrs from {self.init_time.strftime(\"%a %Y-%m-%d %H:%M UTC\")})'\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n\n\nclass HS_Meteosat_BT108_01(MPLBasemapHorizontalSectionStyle):\n \"\"\"\n Meteosat brightness temperature\n \"\"\"\n name = \"MSG_BT108\"\n title = \"Brightness Temperature 10.8um (K)\"\n\n # Variables with the highest number of dimensions first (otherwise\n # MFDatasetCommonDims will throw an exception)!\n required_datafields = [\n (\"sfc\", \"msg_brightness_temperature_108\", \"K\")]\n\n def _plot_style(self):\n \"\"\"\n \"\"\"\n bm = self.bm\n ax = self.bm.ax\n data = self.data\n\n cmin = 230\n cmax = 300\n # thick_contours = np.arange(cmin, cmax, 6)\n # thin_contours = [c for c in np.arange(cmin, cmax, 2) \\\n # if c not in thick_contours]\n\n tempC = data[\"msg_brightness_temperature_108\"]\n\n logging.debug(\"Min: %.2f K, Max: %.2f K\", tempC.min(), tempC.max())\n\n tc = bm.contourf(self.lonmesh, self.latmesh, tempC,\n np.arange(cmin, cmax, 2), cmap=plt.cm.gray_r, extend=\"both\")\n self.add_colorbar(tc, \"Brightness Temperature (K)\")\n\n # Colors in python2.6/site-packages/matplotlib/colors.py\n # cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n # [0], colors=\"red\", linewidths=4)\n # cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n # thick_contours, colors=\"saddlebrown\", linewidths=2)\n # ax.clabel(cs, fontsize=14, fmt='%.0f')\n # cs = bm.contour(self.lonmesh, self.latmesh, tempC,\n # thin_contours, colors=\"saddlebrown\", linewidths=1)\n\n titlestring = \"10.8 um Brightness Temperature (K)\"\n titlestring += f\"\\nValid: {self.valid_time.strftime('%a %Y-%m-%d %H:%M UTC')}\"\n\n if not self.noframe:\n ax.set_title(titlestring,\n horizontalalignment='left', x=0, fontsize=14)\n else:\n ax.text(bm.llcrnrx, bm.llcrnry, titlestring,\n fontsize=10, bbox=dict(facecolor='white', alpha=0.6))\n","sub_path":"mslib/mswms/mpl_hsec_styles.py","file_name":"mpl_hsec_styles.py","file_ext":"py","file_size_in_byte":65015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560315836","text":"# coding: utf-8\nfrom flask import Flask, request, jsonify\nimport urllib\nimport base64\nimport json\nimport requests\nimport re\n\nimport google_cloud_vison\n\n\napp = Flask(__name__)\ndefault_port = 5000\n\n\n@app.route('/api/classify', methods=['POST'])\ndef classify ():\n tfpp_json = request.json\n \n if (tfpp_json is None):\n return jsonify(description='Bad request')\n res_json = google_cloud_vison.scan(tfpp_json)\n print(res_json)\n res_json = res_json['responses'][0]['textAnnotations']\n\n product_list = []\n pattern = '¥?[0-9]+\\Z'\n\n for i in range(1, len(res_json)):\n context = res_json[i]['description']\n repattern = re.compile(pattern)\n if repattern.match(context):\n if res_json[i-1]['description'].isalpha():\n product_contexts = {}\n product_contexts['name'] = res_json[i-1]['description']\n product_contexts['price'] = res_json[i]['description']\n product_list.append(product_contexts)\n print('mached!!: ' + res_json[i]['description'])\n if res_json[i-1]['description'] == '合計':\n break\n print(product_list)\n\n return jsonify({'image_classified': product_list})\n\napp.run(host=\"0.0.0.0\", port=default_port)\n\n","sub_path":"app3.py","file_name":"app3.py","file_ext":"py","file_size_in_byte":1280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"338064818","text":"from flask import Flask, render_template, request\nfrom selenium import webdriver\nfrom selenium.webdriver import FirefoxOptions\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\napp = Flask(__name__)\n\n@app.route(\"/\")\ndef form():\n return render_template(\"form.html\")\n\n@app.route(\"/\", methods=[\"POST\"])\ndef my_form_post():\n url = request.form[\"url\"]\n opts = FirefoxOptions()\n opts.add_argument(\"--headless\")\n browser = webdriver.Firefox(firefox_options=opts)\n _take = url\n removable1,removable2 = '''\"https://\",\"http://\"'''\n\n if removable1 in _take:\n _take = _take.replace('https://', '')\n elif removable2 in _take:\n _take = _take.replace('http://', '')\n else:\n _take = _take\n\n browser.get(\"https://\" + _take)\n\n time.sleep(3)\n # print(browser.current_url)\n _toMatch = browser.current_url\n time.sleep(1)\n browser.quit()\n\n # searching algorithm\n def KMPSearch(pat, txt):\n M = len(pat)\n N = len(txt)\n\n lps = [0] * M\n j = 0 # index for pat[]\n\n computeLPSArray(pat, M, lps)\n\n i = 0 # index for txt[]\n while i < N:\n if pat[j] == txt[i]:\n i += 1\n j += 1\n\n if j == M:\n # Return value to use it\n return 100\n j = lps[j - 1]\n\n elif i < N and pat[j] != txt[i]:\n\n if j != 0:\n j = lps[j - 1]\n else:\n i += 1\n\n def computeLPSArray(pat, M, lps):\n len = 0\n\n lps[0] # lps[0] is always 0\n i = 1\n\n while i < M:\n if pat[i] == pat[len]:\n len += 1\n lps[i] = len\n i += 1\n else:\n\n if len != 0:\n len = lps[len - 1]\n\n else:\n lps[i] = 0\n i += 1\n\n txt = str(_toMatch)\n pat = \"ngrok.io\"\n\n #KMPSearch(pat, txt)\n\n if KMPSearch(pat, txt) == 100:\n status = \"It is a Phishing Link, Don't visit\"\n else:\n status = \"It is a Safe Link, You can go\"\n\n urlInfo = _take\n\n return render_template(\"form.html\",\n urlUpdate=urlInfo,\n stausUpdate=status)\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"562709937","text":"# encoding: utf-8\nimport copy\nimport itertools\n\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nimport torch.utils.model_zoo as model_zoo\nimport random\nfrom scipy.spatial.distance import cdist\nfrom sklearn.preprocessing import normalize\nfrom torch import nn, optim\nfrom torch.utils.data import dataloader\nfrom torchvision import transforms\nfrom torchvision.models.resnet import Bottleneck, resnet50\nfrom torchvision.transforms import functional\n\nfrom .resnet import ResNet\n\ndef weights_init_kaiming(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_out')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('Conv') != -1:\n nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in')\n nn.init.constant_(m.bias, 0.0)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight, 1.0, 0.02)\n nn.init.constant_(m.bias, 0.0)\n\n\ndef weights_init_classifier(m):\n classname = m.__class__.__name__\n if classname.find('Linear') != -1:\n nn.init.normal_(m.weight, std=0.001)\n nn.init.constant_(m.bias, 0.0)\n\n\nclass PBN(nn.Module):\n def __init__(self, input_dim, num_classes, do_reduction=False, num_reduction=512):\n super(PBN, self).__init__()\n self.do_reduction = do_reduction\n self.input_dim = input_dim\n self.num_reduction = num_reduction\n\n self.avg_pool = nn.AdaptiveAvgPool2d((1,1))\n self.max_pool = nn.AdaptiveMaxPool2d((1,1))\n\n self.reduction = nn.Linear(input_dim, num_reduction) # 别忘了赋予参数\n self.reduction.apply(weights_init_kaiming)\n # self.reduction = reduction\n if do_reduction:\n self.bn = nn.BatchNorm1d(num_reduction)\n self.classifier = nn.Linear(num_reduction, num_classes)\n self.classifier.apply(weights_init_classifier)\n else:\n self.bn = nn.BatchNorm1d(input_dim)\n self.classifier = nn.Linear(input_dim, num_classes)\n self.classifier.apply(weights_init_classifier)\n\n # self.classifier = nn.Linear(self.num_reduction, num_classes)\n # self.classifier.apply(weights_init_classifier)\n # self.classifier = classifier\n\n def forward(self, x):\n # GAP + GMP\n x1 = self.avg_pool(x)\n x2 = self.max_pool(x)\n x3 = x1 + x2\n x4 = torch.squeeze(x3) # 去掉所有为1的维度 等价于x.view(x.size(0), x.size(1))\n # Reduction\n if self.do_reduction:\n x5 = self.reduction(x4) \n else:\n x5 = x4\n # BNNeck\n x6 = self.bn(x5)\n # Classification\n x7 = self.classifier(x6)\n return x5, x6, x7 # x5 for triplet; x6 for inference; x7 for softmax\n\n\nclass PBN_modify(nn.Module):\n def __init__(self, input_dim, num_classes, do_reduction=False, num_reduction=512):\n super(PBN_modify, self).__init__()\n self.do_reduction = do_reduction\n self.input_dim = input_dim\n self.num_reduction = num_reduction\n\n self.avg_pool = nn.AdaptiveAvgPool2d((1,1))\n self.max_pool = nn.AdaptiveMaxPool2d((1,1))\n\n self.reduction = nn.Linear(self.input_dim, self.num_reduction) # 别忘了赋予参数\n self.reduction.apply(weights_init_classifier)\n # self.reduction = reduction\n\n self.bn = nn.BatchNorm1d(self.num_reduction)\n\n self.classifier = nn.Linear(self.num_reduction, num_classes)\n self.classifier.apply(weights_init_classifier)\n # self.classifier = classifier\n\n def forward(self, x):\n # GAP + GMP\n x1 = self.avg_pool(x)\n x2 = self.max_pool(x)\n \n # ------ for x1 -------\n x1_1 = torch.squeeze(x1)\n if self.do_reduction:\n x1_2 = self.reduction(x1_1) \n else:\n x1_2 = x1_1\n self.num_reduction = self.input_dim\n x1_3 = self.bn(x1_2)\n x1_4 = self.classifier(x1_3)\n\n # ------ for x2 -------\n x2_1 = torch.squeeze(x2)\n if self.do_reduction:\n x2_2 = self.reduction(x2_1) \n else:\n x2_2 = x2_1\n self.num_reduction = self.input_dim\n x2_3 = self.bn(x2_2)\n x2_4 = self.classifier(x2_3) \n\n return x1_2, x1_3, x1_4, x2_2, x2_3, x2_4 # x5 for triplet; x6 for inference; x7 for softmax\n\nclass SELayer(nn.Module):\n def __init__(self, channel, reduction=16):\n super(SELayer, self).__init__()\n self.avg_pool = nn.AdaptiveAvgPool2d(1)\n self.fc = nn.Sequential(\n nn.Linear(channel, channel // reduction),\n nn.ReLU(inplace=True),\n nn.Linear(channel // reduction, channel),\n nn.Sigmoid()\n )\n\n def forward(self, x):\n b, c, _, _ = x.size()\n y = self.avg_pool(x).view(b, c)\n y = self.fc(y).view(b, c, 1, 1)\n return x * y\n\nclass BatchDrop(nn.Module):\n def __init__(self, h_ratio, w_ratio):\n super(BatchDrop, self).__init__()\n self.h_ratio = h_ratio\n self.w_ratio = w_ratio\n \n def forward(self, x):\n if self.training:\n h, w = x.size()[-2:]\n rh = round(self.h_ratio * h)\n rw = round(self.w_ratio * w)\n sx = random.randint(0, h-rh)\n sy = random.randint(0, w-rw)\n mask = x.new_ones(x.size())\n mask[:, :, sx:sx+rh, sy:sy+rw] = 0\n x = x * mask\n return x\n\nclass BatchCrop(nn.Module):\n def __init__(self, ratio):\n super(BatchCrop, self).__init__()\n self.ratio = ratio\n\n def forward(self, x):\n if self.training:\n h, w = x.size()[-2:]\n rw = int(self.ratio * w)\n start = random.randint(0, h-1)\n if start + rw > h:\n select = list(range(0, start+rw-h)) + list(range(start, h))\n else:\n select = list(range(start, start+rw))\n mask = x.new_zeros(x.size())\n mask[:, :, select, :] = 1\n x = x * mask\n return x\n\nclass ResNetBuilder(nn.Module):\n in_planes = 2048\n\n def __init__(self, num_classes=None, last_stride=1, pretrained=False):\n super().__init__()\n self.base = ResNet(last_stride)\n if pretrained:\n model_url = 'https://download.pytorch.org/models/resnet50-19c8e357.pth'\n self.base.load_param(model_zoo.load_url(model_url))\n\n self.num_classes = num_classes\n if num_classes is not None:\n self.bottleneck = nn.Sequential(\n nn.Linear(self.in_planes, 512),\n nn.BatchNorm1d(512),\n nn.LeakyReLU(0.1),\n nn.Dropout(p=0.5)\n )\n self.bottleneck.apply(weights_init_kaiming)\n self.classifier = nn.Linear(512, self.num_classes)\n self.classifier.apply(weights_init_classifier)\n\n def forward(self, x):\n global_feat = self.base(x)\n global_feat = F.avg_pool2d(global_feat, global_feat.shape[2:]) # (b, 2048, 1, 1)\n global_feat = global_feat.view(global_feat.shape[0], -1)\n if self.training and self.num_classes is not None:\n feat = self.bottleneck(global_feat)\n cls_score = self.classifier(feat)\n return [global_feat], [cls_score]\n else:\n return global_feat\n\n def get_optim_policy(self):\n base_param_group = self.base.parameters()\n if self.num_classes is not None:\n add_param_group = itertools.chain(self.bottleneck.parameters(), self.classifier.parameters())\n return [\n {'params': base_param_group},\n {'params': add_param_group}\n ]\n else:\n return [\n {'params': base_param_group}\n ]\n\nclass BFE(nn.Module):\n def __init__(self, num_classes, width_ratio=0.5, height_ratio=0.5):\n super(BFE, self).__init__()\n model = resnet50(pretrained=True)\n model.fc = nn.Sequential()\n self.model = model\n self.model.layer4[0].downsample[0].stride = (1,1)\n self.model.layer4[0].conv2.stride = (1,1)\n\n # layer3 branch\n self.bottleneck1 = Bottleneck(1024, 256)\n self.PBN1 = PBN(1024, num_classes, do_reduction=False)\n\n # global branch\n self.PBN2 = PBN(2048, num_classes, do_reduction=True, num_reduction=512) # 到这来\n\n # part1 branch\n self.bottleneck2 = Bottleneck(2048, 512)\n self.PBN3 = PBN(2048, num_classes, do_reduction=True, num_reduction=512)\n\n # part2 branch\n self.bottleneck3 = Bottleneck(2048, 512)\n self.PBN4 = PBN(2048, num_classes, do_reduction=True, num_reduction=512)\n\n\n def forward(self, x):\n x = self.model.conv1(x)\n x = self.model.bn1(x)\n x = self.model.relu(x)\n x = self.model.maxpool(x)\n\n x = self.model.layer1(x)\n x = self.model.layer2(x)\n x3 = self.model.layer3(x)\n x = self.model.layer4(x3)\n\n # --- layer3 分支 ---\n x3_1 = self.bottleneck1(x3)\n x3_2,x3_3,x3_4 = self.PBN1(x3_1)\n\n # --- global 分支 ---\n x4_1, x4_2, x4_3 = self.PBN2(x)\n\n # --- divide x into two parts ---\n # _, _,height, weight = x.shape # [N, C, H, W]\n height = x.shape[2]\n x_p1 = x[:, :, :int(height/2), :]\n x_p2 = x[:, :, int(height/2):, :]\n\n # --- part1 分支 ---\n x_p1_1 = self.bottleneck2(x_p1)\n x_p1_2, x_p1_3, x_p1_4 = self.PBN3(x_p1_1)\n\n # --- part2 分支 ---\n x_p2_1 = self.bottleneck3(x_p2)\n x_p2_2, x_p2_3, x_p2_4 = self.PBN4(x_p2_1)\n\n predict = []\n triplet_features = []\n softmax_features = []\n\n # add layer3 feature\n softmax_features.append(x3_4)\n triplet_features.append(x3_2)\n predict.append(x3_3)\n \n # add global feature\n softmax_features.append(x4_3)\n triplet_features.append(x4_1)\n predict.append(x4_2)\n \n # add part1 feature\n softmax_features.append(x_p1_4)\n triplet_features.append(x_p1_2)\n predict.append(x_p1_3)\n\n # add part2 feature\n softmax_features.append(x_p2_4)\n triplet_features.append(x_p2_2)\n predict.append(x_p2_3) \n\n if self.training:\n return triplet_features, softmax_features\n else:\n return torch.cat(predict, 1)\n\n def get_optim_policy(self):\n params = [\n {'params': self.backbone.parameters()},\n {'params': self.res_part.parameters()},\n {'params': self.global_reduction.parameters()},\n {'params': self.global_softmax.parameters()},\n {'params': self.res_part2.parameters()},\n {'params': self.reduction.parameters()},\n {'params': self.softmax.parameters()},\n ]\n return params\n\nclass Resnet(nn.Module):\n def __init__(self, num_classes, resnet=None):\n super(Resnet, self).__init__()\n if not resnet:\n resnet = resnet50(pretrained=True)\n self.backbone = nn.Sequential(\n resnet.conv1,\n resnet.bn1,\n resnet.relu,\n resnet.maxpool,\n resnet.layer1, # res_conv2\n resnet.layer2, # res_conv3\n resnet.layer3, # res_conv4\n resnet.layer4\n )\n self.global_avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.softmax = nn.Linear(2048, num_classes)\n\n def forward(self, x):\n \"\"\"\n :param x: input image tensor of (N, C, H, W)\n :return: (prediction, triplet_losses, softmax_losses)\n \"\"\"\n x = self.backbone(x)\n\n x = self.global_avgpool(x).squeeze()\n feature = self.softmax(x)\n if self.training:\n return [], [feature]\n else:\n return feature\n\n def get_optim_policy(self):\n return self.parameters()\n\nclass IDE(nn.Module):\n def __init__(self, num_classes, resnet=None):\n super(IDE, self).__init__()\n if not resnet:\n resnet = resnet50(pretrained=True)\n self.backbone = nn.Sequential(\n resnet.conv1,\n resnet.bn1,\n resnet.relu,\n resnet.maxpool,\n resnet.layer1, # res_conv2\n resnet.layer2, # res_conv3\n resnet.layer3, # res_conv4\n resnet.layer4\n )\n self.global_avgpool = nn.AvgPool2d(kernel_size=(12, 4))\n\n def forward(self, x):\n \"\"\"\n :param x: input image tensor of (N, C, H, W)\n :return: (prediction, triplet_losses, softmax_losses)\n \"\"\"\n x = self.backbone(x)\n\n feature = self.global_avgpool(x).squeeze()\n if self.training:\n return [feature], []\n else:\n return feature\n\n def get_optim_policy(self):\n return self.parameters()","sub_path":"备份/networks_02.py","file_name":"networks_02.py","file_ext":"py","file_size_in_byte":12892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"497547938","text":"# Annie Sauer\n# October 24, 2018\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Create Newton Method function\ndef newton_method(x0, t, n, f, g):\n # INPUTS\n # x0: initial guess\n # t: tolerance\n # n: maximum number of iterations\n # f: function f(x)\n # g: derivative of f(x)\n # OUTPUTS\n # Estimated x value such that f(x) = 0\n # Tolerance (t)\n # Scatter plot of iteration values\n \n # Initialize vector of iterations and count\n x_iterations = [x0]\n count = 0\n while (f(x0) > t or f(x0) < -t):\n # Update estimate\n x0 = x0 - f(x0)/g(x0)\n # Add one to count\n count = count + 1\n # Add updated estimate to iteration vector\n x_iterations.append(x0)\n # Break loop if maximum number of iterations is reached\n if count >= n:\n print('Maximum Iterations Reached')\n break\n # Print x and t\n print('Estimated zero at x = ', x0)\n print('Tolerance = ', t)\n print('Number of Iterations = ', count)\n return(x_iterations)\n\n# Original function f(x)\ndef f(x):\n return(3**(x) - np.sin(x) + np.cos(5*x))\n\n# Derivative function g(x)\ndef g(x):\n return(np.log(3)*3**(x) - np.cos(x) - 5*np.sin(5*x))\n\n# Initial guess\nx0 = 1\n\n# Tolerance\nt = 0.0001\n\n# Maximum number of iterations\nn = 1000\n\n# Call function\nx_iterations = newton_method(x0,t,n,f,g)\n\n# Plot\nplt.plot(range(len(x_iterations)), x_iterations)\n","sub_path":"gradientDescent.py","file_name":"gradientDescent.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"234016960","text":"# coding: utf-8\nimport sys\nimport MeCab\n\nf = open(\"temp/neko_mecab.txt\",encoding='UTF-8')\ndatas = f.read()\nf.close\n\nlines = datas.split(\"\\n\")\n\nsentence = []\nnowNum = 0\nmaxNum = 0\nmaxSentence = []\nfor l in lines:\n line = l.split(\"\\t\")\n if len((line)) == 6 and line[0]!=\"——\":\n lineMap = {'surface':line[0], 'base':line[2], 'pos':line[3], 'pos1':line[4]}\n if line[0] == \"。\":\n for s in range(0,(len(sentence)-4)):\n if sentence[s][\"pos\"].find(\"名詞\") >= 0:\n if sentence[s+1][\"pos\"].find(\"名詞\") >= 0:\n if sentence[s+2][\"pos\"].find(\"名詞\") >= 0:\n if sentence[s+3][\"pos\"].find(\"名詞\") >= 0:\n if sentence[s+4][\"pos\"].find(\"名詞\") >= 0:\n nowNum += 1\n if maxNum < nowNum:\n maxNum = nowNum\n for s2 in sentence:\n if s2[\"pos\"].find(\"名詞\") > -1:\n print(s2[\"base\"])\n nowNum = 0\n sentence = []\n else:\n sentence.append(lineMap)\n","sub_path":"language100/035.py","file_name":"035.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"232062411","text":"import os\nimport vtk\nimport pickle\nimport glob\nimport scipy.io as sio\nimport numpy as np\nimport json\nimport csv\nfrom collections import OrderedDict\nimport tensorflow as tf\n\nclass inputData():\n def __init__(self, parent = None, num_points_param = 1002, num_classes_param = 7):\n if parent:\n parent.title = \" \"\n\n self.NUM_POINTS = num_points_param\n self.NUM_CLASSES = num_classes_param\n self.NUM_FEATURES = 3 + self.NUM_CLASSES + 4 +19 # Normals + NUM_CLASSES + curvatures\n\n\n #\n # Function get_folder_classes_list(datasetPath)\n # \tFor a given folder, return the list of subfolders\n #\n def get_folder_classes_list(self, datasetPath):\n dataset_folders = [os.path.join(datasetPath, d) for d in sorted(os.listdir(datasetPath)) if os.path.isdir(os.path.join(datasetPath, d))]\n\n # Delete .DS_Store file if there is one\n if dataset_folders.count(str(datasetPath) + \".DS_Store\"):\n dataset_folders.remove(str(datasetPath) + \".DS_Store\")\n\n return dataset_folders\n\n def get_vtklist(self, classFolders):\n\n vtk_list_classes = OrderedDict()\n \n for classfolder in classFolders:\n vtk_list_classes[classfolder] = glob.glob(os.path.join(classfolder, \"*.vtk\"))\n\n \n return vtk_list_classes\n #\n # Gets all the scalar array names in the polydata for the points\n #\n def get_points_array_names(self, geometry):\n arraynames = []\n pointdata = geometry.GetPointData()\n for i in range(pointdata.GetNumberOfArrays()):\n arraynames.append(pointdata.GetArrayName(i))\n return np.array(arraynames)\n\n #\n # Gets all the scalar array names in the polydata for polys\n #\n def get_polys_array_names(self, geometry):\n arraynames = []\n celldata = geometry.GetCellData()\n for i in range(celldata.GetNumberOfArrays()):\n arraynames.append(celldata.GetArrayName(i))\n return np.array(arraynames)\n\n #\n # Function load_features(file)\n # Load the shape stored in the filename \"shape\" and extract features (normals + mean distances + curvatures), stored in a 2D array (currentData)\n # Features are normalized (normals are already done, in previous program SurfaceFeaturesExtractor with vtkPolyDataNormals)\n #\n def load_features(self, shape, feature_points = None, feature_polys = None):\n\n try:\n extracted_feature_info=dict()\n\n print(\"Reading:\", shape)\n reader_poly = vtk.vtkPolyDataReader()\n reader_poly.SetFileName(shape)\n # print \"shape : \" + shape\n\n reader_poly.Update()\n geometry = reader_poly.GetOutput()\n if not geometry.GetNumberOfPoints() == self.NUM_POINTS:\n raise Exception('Unexpected number of points in the shape: ' + str(geometry.GetNumberOfPoints()) + ' vs. ' + str(self.NUM_POINTS))\n\n if feature_points and feature_polys:\n print(\"WARNING!!! You have set both feature_points and feature_polys, extracting feature_points only!!!\")\n \n extracted_feature_points = []\n features = []\n\n if feature_points:\n\n #Initialize an array with the same number of points\n for i in range(geometry.GetNumberOfPoints()):\n features.append([])\n\n #Get all array names\n arraynames = self.get_points_array_names(geometry)\n\n # --------------------------------- #\n # ----- GET ARRAY OF FEATURES ----- #\n # --------------------------------- #\n\n #Iterate over the featues we want to extract from the polydata\n for feature_name in feature_points:\n\n if feature_name == \"Points\":\n extracted_feature_points.append(\"Points\")\n\n points=[]\n for i in range(geometry.GetNumberOfPoints()):\n scalartup = geometry.GetPoint(i)\n features[i].extend(scalartup)\n\n extracted_feature_info['Points']=dict()\n extracted_feature_info['Points']['length']=len(scalartup)\n \n # Get the 'real names' of the scalar arrays by matching the features_name to the real array name in arraynames'\n else:\n reallarraynames = [name for name in arraynames if feature_name in name]\n\n #When the real scalar array name is extracted, we iterate over the real ones, i.e., if the extract_features_name is 'distanceGroup',\n #the array has ['distanceGroup0', 'distanceGroup1', ...]\n for arrayname in reallarraynames:\n scalararray = geometry.GetPointData().GetScalars(arrayname)\n if scalararray:\n extracted_feature_points.append(arrayname)\n for i in range(0, scalararray.GetNumberOfTuples()):\n scalartup = scalararray.GetTuple(i)\n features[i].extend(scalartup)\n extracted_feature_info[arrayname]=dict()\n extracted_feature_info[arrayname]['length']=len(scalartup)\n\n extracted_feature_info['number_of_points']=geometry.GetNumberOfPoints()\n extracted_feature_info['extraction_order']=extracted_feature_points\n features = np.array(features)\n features=features.reshape(-1)\n print('\\tThe following features were extracted', extracted_feature_points)\n print('\\tfeatures shape',np.shape(features))\n\n return features , extracted_feature_info\n\n elif feature_polys:\n\n polys = geometry.GetPolys()\n\n pointidlist = vtk.vtkIdList()\n #Initialize an array with the same number of points\n for i in range(geometry.GetNumberOfCells()):\n features.append([])\n\n #Get all array names\n arraynames = self.get_polys_array_names(geometry)\n\n for feature_name in feature_polys:\n \n if feature_name == \"Points\":\n extracted_feature_points.append(\"Points\")\n\n for ci in range(geometry.GetNumberOfPolys()):\n geometry.GetCellPoints(ci, pointidlist)\n\n scalartup=[]\n for pid in range(pointidlist.GetNumberOfIds()):\n point = geometry.GetPoint(pointidlist.GetId(pid))\n scalartup.extend(point)\n\n features[ci].extend(scalartup)\n\n\n extracted_feature_info['Points']=dict()\n extracted_feature_info['Points']['length']=len(scalartup)\n \n # Get the 'real names' of the scalar arrays by matching the features_name to the real array name in arraynames'\n else:\n reallarraynames = [name for name in arraynames if feature_name in name]\n\n #When the real scalar array name is extracted, we iterate over the real ones, i.e., if the extract_features_name is 'distanceGroup',\n #the array has ['distanceGroup0', 'distanceGroup1', ...]\n for arrayname in reallarraynames:\n scalararray = geometry.GetCellData().GetScalars(arrayname)\n if scalararray:\n extracted_feature_points.append(arrayname)\n for ci in range(geometry.GetNumberOfPolys()):\n\n scalartup = scalararray.GetTuple(ci)\n features[ci].extend(scalartup)\n extracted_feature_info[arrayname]=dict()\n extracted_feature_info[arrayname]['length']=len(scalartup)\n\n extracted_feature_info['number_of_cells']=geometry.GetNumberOfCells()\n extracted_feature_info['extraction_order']=extracted_feature_points\n features = np.array(features)\n features=features.reshape(-1)\n print('\\tThe following features were extracted', extracted_feature_points)\n print('\\tfeatures shape',np.shape(features))\n return features ,extracted_feature_info\n\n else:\n raise Exception('You must set one of feature_polys or feature_polys to extract data from the shape')\n\n except IOError as e:\n print('Could not read:', shape, ':', e, '- it\\'s ok, skipping.')\n\n\n\n def load_features_class(self, vtklist, min_num_shapes=1, feature_points = None, feature_polys = None):\n\n vtk_filenames = vtklist\n dataset = []\n\n for shape in vtk_filenames:\n # Prepare data\n features ,extracted_feature_info = self.load_features(shape, feature_points=feature_points, feature_polys = feature_polys)\n if features is not None:\n dataset.append(features)\n \n dataset = np.array(dataset)\n\n if np.shape(dataset)[0] < min_num_shapes:\n raise Exception('Fewer samples than expected: %d < %d' % (np.shape(dataset)[0], min_num_shapes))\n \n return dataset, extracted_feature_info\n\n #\n # Function maybe_pickle(data_folders, min_num_shapes_per_class, force=False)\n # Pickle features array sorted by class\n #\n def maybe_pickle(self, classFolders, min_num_shapes_per_class, force=False, feature_points = None, feature_polys = False):\n \n dataset_names = []\n vtkdict = self.get_vtklist(classFolders)\n\n for classfolder, vtklist in vtkdict.items():\n set_filename = classfolder + '.pickle'\n dataset_names.append(set_filename)\n\n description_path=os.path.join(os.path.dirname(set_filename),'extraction_description.json')\n\n if os.path.exists(set_filename) and not force:\n # You may override by setting force=True.\n print('%s already present - Skipping pickling.' % set_filename)\n else:\n\n print('Pickling %s.' % set_filename)\n dataset,extracted_feature_info = self.load_features_class(vtklist, min_num_shapes_per_class, feature_points=feature_points, feature_polys=feature_polys)\n\n try:\n with open(set_filename, 'wb') as f:\n pickle.dump(dataset, f, pickle.HIGHEST_PROTOCOL)\n except Exception as e:\n print('Unable to save data to', set_filename, ':', e)\n\n\n \n try:\n with open(description_path, 'w') as f:\n json.dump(extracted_feature_info, f,indent = 4)\n except Exception as e:\n print('Unable to save extraction description to', description_path, ':', e)\n \n \n return dataset_names , description_path\n\n #\n # Function randomize(dataset, labels)\n # Randomize the data and their labels\n #\n def randomize(self,dataset, labels):\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation]\n\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\n def writeTFRecord(self,feature_data,path):\n #write a TFRecord at the defined path containing the features\n\n #convert features in the correct format\n for feature_name , value in feature_data.items():\n if value.dtype == 'float64':\n feature_data[feature_name]=self._float_feature(value.tolist())\n elif value.dtype == 'int64':\n feature_data[feature_name]=self._int64_feature(value.tolist())\n else :\n feature_data[feature_name]=self._bytes_feature(value.tolist())\n\n #Create a tensorflow global feature containing all the features\n features_data = tf.train.Features(feature=feature_data)\n\n #create the tensorflow example\n features_example=tf.train.Example(features=features_data)\n\n #write the example in a tfRecord\n try:\n os.mkdir(os.path.dirname(path))\n except:\n pass\n with tf.python_io.TFRecordWriter(path) as writer:\n writer.write(features_example.SerializeToString())\n print('Saved record: '+path)\n \n def writeRecords(self,record_dir,input_features,target_features,start_id=0,file_name_prefix='TFR_'):\n #write all TFRecords in the dataset_path in a folder named dir_name\n #each TFRecord contain a row of input_features and his associated row in target_features\n\n #create folder\n try:\n os.mkdir(os.path.dirname(record_dir))\n except:\n pass\n\n record_list=[]\n\n for i in range(input_features.shape[0]):\n #extract the features\n feature_dict=dict()\n feature_dict['input']=input_features[i,:]\n\n if len(target_features.shape)==1:\n feature_dict['output']=target_features[i]\n if len(target_features.shape)==2:\n feature_dict['output']=target_features[i,:]\n\n #generate file name\n record_path=os.path.join(record_dir,file_name_prefix+str(start_id+i)+'.tfrecord')\n self.writeTFRecord(feature_dict,record_path)\n\n record_list.append(record_path)\n\n return record_list\n\n #numpy array to tfrecord conversion\n def _int64_feature(self,value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(int64_list=tf.train.Int64List(value=value))\n def _float_feature(self,value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(float_list=tf.train.FloatList(value=value))\n def _bytes_feature(self,value):\n if not isinstance(value, list):\n value = [value]\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=value))\n\n def TFRecord_dataset(self, csvFile,records_path, min_num_shapes_per_class=5, force=False, feature_points = None, feature_polys = None,feature_points_output = None,feature_polys_output=None):\n #read CSV file, determine if classification or generation dataset\n with open(csvFile, mode='r') as csv_file:\n csv_reader = csv.DictReader(csv_file)\n\n column_names = []\n \n dataset_type=None\n\n for row in csv_reader:\n #get columns names and #identify dataset type\n if len(column_names)==0:\n for name,v in row.items():\n column_names.append(name) \n if 'VTK Files' in column_names and 'Group' in column_names:\n dataset_type='classification'\n print('dataset type : '+ dataset_type)\n class_names=[]\n dict_vtkfiles=dict()\n elif 'Input VTK Files' in column_names and 'Output VTK Files' in column_names:\n dataset_type='generation'\n print('dataset type : '+ dataset_type)\n vtkfiles_in=[]\n vtkfiles_out=[]\n else:\n print('Impossible to determine the dataset type!')\n print(\"For classification type, the csv file should contain a row named 'VTK Files' and 'Group'.\")\n print(\"For generation type, the csv file should contain a row named 'input VTK File' and 'output VTK File'.\")\n raise Exception('Unknown dataset type')\n return \n\n if dataset_type=='classification':\n group=row['Group']\n vtkfile=row['VTK Files']\n if group not in class_names:\n class_names.append(group)\n dict_vtkfiles[group]=[]\n dict_vtkfiles[group].append(vtkfile)\n\n\n elif dataset_type=='generation':\n vtkfiles_in.append(row['Input VTK Files'])\n vtkfiles_out.append(row['Output VTK Files'])\n \n\n\n \n if dataset_type=='classification':\n #associating numbers to class_names\n #class correspondance\n class_corres=dict()\n for label, name in enumerate(class_names):\n class_corres[name]=label\n\n #extract features for each class \n ##check for existing features\n if (feature_points == None and feature_polys == None):\n raise Exception('No Features specified')\n return\n\n ##extract feature points\n if (feature_points != None):\n data_feature_points=dict()\n for name in class_names:\n vtklist=dict_vtkfiles[name]\n dataset, extracted_feature_points_info=self.load_features_class(vtklist, min_num_shapes=min_num_shapes_per_class, feature_points = feature_points)\n data_feature_points[name]=dataset \n\n ##extract feature polys\n if (feature_polys != None):\n data_feature_polys=dict()\n for name in class_names:\n vtklist=dict_vtkfiles[name]\n dataset, extracted_feature_polys_info=self.load_features_class(vtklist, min_num_shapes=min_num_shapes_per_class, feature_polys = feature_polys)\n data_feature_polys[name]=dataset\n\n ##if necessary, concatenate data\n if (feature_polys != None and feature_points != None):\n data_feature=dict()\n for name in class_names:\n data_feature[name]=np.concatenate((data_feature_points[name],data_feature_polys[name]),axis=1)\n elif (feature_polys != None):\n data_feature=data_feature_polys\n elif (feature_points != None):\n data_feature=data_feature_points\n else:\n raise Exception(\"Unexpected error\")\n return\n\n\n #generate labels for each class\n data_labels=dict()\n for name in class_names:\n label = class_corres[name]\n shape_number=data_feature[name].shape[0]\n\n label_array=np.zeros((shape_number,))\n label_array+=label\n data_labels[name]=label_array\n\n #write TFRecords\n start_id=0\n dict_tfr=dict()\n for name in class_names:\n \n dict_tfr[name]=self.writeRecords(records_path,data_feature[name],data_labels[name],start_id=start_id)\n start_id += len(dict_tfr[name])\n\n #add tfrecord path to the input csv file, save it in records_path\n output_csv=os.path.join(records_path,os.path.basename(csvFile))\n with open(csvFile,'r') as csvinput:\n with open(output_csv, 'w') as csvoutput:\n writer = csv.writer(csvoutput, lineterminator='\\n')\n reader = csv.reader(csvinput)\n\n all = []\n row = next(reader)\n row.append('TFRecord Files')\n all.append(row)\n\n vtk_file_index=row.index('VTK Files')\n class_index=row.index('Group')\n for row in reader:\n vtk_file=row[vtk_file_index]\n group=row[class_index]\n tfrecord_index=dict_vtkfiles[group].index(vtk_file)\n row.append(dict_tfr[group][tfrecord_index])\n all.append(row)\n\n writer.writerows(all)\n\n #create dataset description\n totalnumber=0\n dict_example_number=dict()\n for name in class_names:\n dict_example_number[name]=len(dict_vtkfiles[name])\n totalnumber+=len(dict_vtkfiles[name])\n\n dict_dataset_description=dict()\n dict_dataset_description['dataset_type']=dataset_type\n dict_dataset_description['files_description']=output_csv\n dict_dataset_description['original_files_description']=csvFile\n dict_dataset_description['class_names']=class_names\n dict_dataset_description['class_correspondence']=class_corres\n dict_dataset_description['examples_number']=totalnumber\n dict_dataset_description['examples_per_class']=dict_example_number\n try:\n dict_dataset_description['extracted_feature_points_info']=extracted_feature_points_info\n except:\n dict_dataset_description['extracted_feature_points_info']= None\n try:\n dict_dataset_description['extracted_feature_polys_info']=extracted_feature_polys_info\n except:\n dict_dataset_description['extracted_feature_polys_info']=None\n\n description_path=os.path.join(records_path,'dataset_description.json')\n try:\n with open(description_path, 'w') as f:\n json.dump(dict_dataset_description, f,indent = 4)\n except Exception as e:\n print('Unable to save extraction description to', description_path, ':', e)\n\n return description_path\n\n\n\n elif dataset_type=='generation':\n #extract features for each shape\n ##check for existing features\n if (feature_points == None and feature_polys == None):\n raise Exception('No features to extract from input shape')\n return\n\n if (feature_points_output == None and feature_polys_output == None):\n raise Exception('No features to extract from output shape')\n return\n\n # dict_vtkfiles_in\n # dict_vtkfiles_out\n\n ##extract input feature points\n if (feature_points != None):\n input_data_feature_points=[]\n for file_name in vtkfiles_in:\n feature, extracted_input_feature_points_info=self.load_features(file_name, feature_points = feature_points)\n input_data_feature_points.append(feature) \n\n ##extract input feature polys\n if (feature_polys != None):\n input_data_feature_polys=[]\n for file_name in vtkfiles_in:\n feature, extracted_input_feature_polys_info=self.load_features(file_name, feature_polys = feature_polys)\n input_data_feature_polys.append(feature)\n\n ##extract output feature points\n if (feature_points_output != None):\n output_data_feature_points=[]\n for file_name in vtkfiles_out:\n feature, extracted_output_feature_points_info=self.load_features(file_name, feature_points = feature_points_output)\n output_data_feature_points.append(feature) \n\n ##extract output feature polys\n if (feature_polys_output != None):\n output_data_feature_polys=[]\n for file_name in vtkfiles_out:\n feature, extracted_output_feature_polys_info=self.load_features(file_name, feature_polys = feature_polys_output)\n output_data_feature_polys.append(feature)\n\n ##if necessary, concatenate data\n ##input\n if (feature_polys != None and feature_points != None):\n input_data_feature=[]\n for i in range(len(input_data_feature_polys)):\n input_data_feature.append(np.concatenate((input_data_feature_points[i],input_data_feature_polys[i]),axis=0))\n elif (feature_polys != None):\n input_data_feature=input_data_feature_polys\n elif (feature_points != None):\n input_data_feature=input_data_feature_points\n else:\n raise Exception(\"Unexpected error\")\n return\n ##output\n if (feature_polys_output != None and feature_points_output != None):\n output_data_feature=[]\n for i in range(len(input_data_feature_polys)):\n output_data_feature.append(np.concatenate((output_data_feature_points[i],output_data_feature_polys[i]),axis=0))\n elif (feature_polys_output != None):\n output_data_feature=output_data_feature_polys\n elif (feature_points_output != None):\n output_data_feature=output_data_feature_points\n else:\n raise Exception(\"Unexpected error\")\n return\n\n\n input_data_feature=np.array(input_data_feature)\n output_data_feature=np.array(output_data_feature)\n\n #write TFRecords\n\n \n tfr_list=self.writeRecords(records_path,input_data_feature,output_data_feature,start_id=0)\n\n #add tfrecord path to the input csv file, save it in records_path\n output_csv=os.path.join(records_path,os.path.basename(csvFile))\n with open(csvFile,'r') as csvinput:\n with open(output_csv, 'w') as csvoutput:\n writer = csv.writer(csvoutput, lineterminator='\\n')\n reader = csv.reader(csvinput)\n\n all = []\n row = next(reader)\n row.append('TFRecord Files')\n all.append(row)\n\n input_file=row.index('Input VTK Files')\n for row in reader:\n vtk_file=row[input_file]\n tfrecord_index=vtkfiles_in.index(vtk_file)\n row.append(tfr_list[tfrecord_index])\n all.append(row)\n\n writer.writerows(all)\n\n #create dataset description\n\n dict_dataset_description=dict()\n dict_dataset_description['dataset_type']=dataset_type\n dict_dataset_description['files_description']=output_csv\n dict_dataset_description['original_files_description']=csvFile\n dict_dataset_description['examples_number']=len(vtkfiles_in)\n try:\n dict_dataset_description['extracted_input_feature_points_info']=extracted_input_feature_points_info\n except:\n dict_dataset_description['extracted_input_feature_points_info']= None\n try:\n dict_dataset_description['extracted_input_feature_polys_info']=extracted_input_feature_polys_info\n except:\n dict_dataset_description['extracted_input_feature_polys_info']=None\n try:\n dict_dataset_description['extracted_output_feature_points_info']=extracted_output_feature_points_info\n except:\n dict_dataset_description['extracted_output_feature_points_info']= None\n try:\n dict_dataset_description['extracted_output_feature_polys_info']=extracted_output_feature_polys_info\n except:\n dict_dataset_description['extracted_output_feature_polys_info']=None\n\n\n\n description_path=os.path.join(records_path,'dataset_description.json')\n try:\n with open(description_path, 'w') as f:\n json.dump(dict_dataset_description, f,indent = 4)\n except Exception as e:\n print('Unable to save extraction description to', description_path, ':', e)\n\n return description_path\n\n\n\n\n\n return 'humm la bonne description'\n\n","sub_path":"src/py/generatelib/inputData.py","file_name":"inputData.py","file_ext":"py","file_size_in_byte":28273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"550276263","text":"import os\nimport subprocess\nimport re\nimport shutil\nimport sys\nimport socket\nimport time\nimport itertools\nimport bisect\n\nAPPLICATIONS = {\n 'H265': ['RVC', 'org.sc29.wg11.mpegh.part2.Top_mpegh_part2_main_no_md5'],\n 'H265-OPT': ['Research', 'org.ietr.mpegh.part2.Top_mpegh_part2_main_no_md5'],\n 'H264-CBP': ['RVC', 'org.sc29.wg11.mpeg4.part10.cbp.Top_mpeg4_part10_CBP_decoder'],\n 'H264-PHP': ['RVC', 'org.sc29.wg11.mpeg4.part10.php.Top_mpeg4_part10_PHP_decoder'],\n 'MPEG4-SP': ['RVC', 'org.sc29.wg11.mpeg4.part2.sp.Top_mpeg4_part2_SP_decoder'],\n 'MPEG4-SP-ALF': ['RVC', 'org.sc29.wg11.mpeg4.part2.sp.Top_mpeg4_part2_SP_decoder'],\n 'STEREO' : ['Stereo', 'stereo.Top_stereo_v2']\n}\n\n# Magic variable\nRESULTS_FOLDER_PREFIX = 'results-'\nRESULTS_FOLDER = RESULTS_FOLDER_PREFIX + socket.gethostname() + '-' + time.strftime(\"%Y%m%d-%H%M%S\")\n\nFIFO_SIZES = [8192, 65536, 262144]\nMAPPING_STRATEGIES = ['RR', 'WLB'] # KLR segfaults in the do_mapping function\nSCHEDULING_STRATEGIES = ['RR', 'DD']\n\nC_APPS = os.path.join('.' , 'applications/c-generated')\nORCC_APPS = 'orc-apps'\n\ndef get_all_apps():\n return get_apps_from_names(APPLICATIONS.keys(), FIFO_SIZES)\n \ndef get_apps_from_names(appNames, fifoSizes):\n apps = []\n for appName in appNames:\n appPath = os.path.join(C_APPS, appName)\n apps.append(App(appName,\n APPLICATIONS[appName][0],\n APPLICATIONS[appName][1],\n fifoSizes))\n return apps\n\nclass CommonEqualityMixin(object):\n\n def __eq__(self, other):\n return (isinstance(other, self.__class__)\n and self.__dict__ == other.__dict__)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n def __hash__(self):\n \"\"\"Override the default hash behavior (that returns the id or the object)\"\"\"\n return hash(tuple(sorted(self.__dict__.items())))\n\nclass App:\n \n def __init__(self, name, srcProjectName, network, fifoSizes):\n self.name = name\n self.fifoSizes = fifoSizes\n dirName = os.path.dirname(os.path.realpath(__file__))\n self.srcProjectName = srcProjectName\n self.srcProjectPath = os.path.join(dirName, 'applications/' + ORCC_APPS + '/' + srcProjectName)\n self.cGeneratedPath = os.path.join(dirName, 'applications/c-generated/' + self.name)\n self.network = network\n self.inputs = []\n self.symTab = {}\n self.addrToSym = {}\n self.symTabAddrs = {}\n\n # Parse txt file containing input files to be used\n inputsFile = open(os.path.join(self.cGeneratedPath, '.inputs'), 'r')\n cpt = 1\n for input in inputsFile:\n inputStripped = input.strip()\n i = os.path.join('.', 'inputs')\n for p in inputStripped.split('::'):\n i = os.path.join(i, p)\n self.inputs.append(os.path.abspath(i))\n cpt = cpt + 1\n inputsFile.close()\n\n # Build the symbol table using objdump\n ADDR_IDX = 0\n SEC_IDX = 3\n SIZE_IDX = 4\n NAME_IDX = 5\n for fifoSize in self.fifoSizes:\n if not self.__getBin(fifoSize):\n continue\n self.symTab[fifoSize] = []\n cmd = ['objdump', '-t', self.__getBin(fifoSize)]\n objdump = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = objdump.communicate()\n if objdump.returncode != 0:\n raise NameError('Error while building symbol table')\n stderr = out[1]\n stdout = out[0]\n stdout = stdout.decode('utf-8') # convert bytes stream returned by communicate to standard string\n for symb in stdout.split('\\n'):\n splited = ' '.join(symb.split()).split(' ')\n if len(splited) >= 6:\n self.symTab[fifoSize].append(Symbol(int(splited[ADDR_IDX], 16), splited[SEC_IDX], int(splited[SIZE_IDX], 16), splited[NAME_IDX]))\n self.addrToSym[fifoSize] = {sym.addr: sym for sym in self.symTab[fifoSize]}\n self.symTabAddrs[fifoSize] = sorted(sym.addr for sym in self.symTab[fifoSize])\n\n def __str__(self):\n msg = 'App: '\n msg = msg + self.name\n msg = msg + ', fs=' + str(self.fifoSizes)\n return msg\n\n def __getFifoFolder(self, fifoSize):\n fifoFolder = os.path.join(self.cGeneratedPath, str(fifoSize))\n return fifoFolder\n\n def __getBinFolder(self, fifoSize):\n fifoFolder = self.__getFifoFolder(fifoSize)\n return os.path.join(fifoFolder, 'bin')\n\n def __getBuildFolder(self, fifoSize):\n fifoFolder = self.__getFifoFolder(fifoSize)\n return os.path.join(fifoFolder, 'build')\n\n def __getBin(self, fifoSize):\n appBin = None\n binFolder = self.__getBinFolder(fifoSize)\n if not os.path.isdir(binFolder):\n return None\n for file_object in os.listdir(binFolder):\n if not file_object.startswith('.'):\n appBin = os.path.join(binFolder, file_object)\n appBin = os.path.abspath(appBin)\n break\n return appBin\n\n @staticmethod\n def __getWorkMessage(fifoSize, nbCores, ms):\n msg = 'cores : {0:2} mapping : {1:3} fifo size : {2:6} ...'.format(str(nbCores), ms, str(fifoSize))\n return msg\n\n @staticmethod\n def __getMappingFileName(ms, fifoSize, nbCores):\n return '.' + ms + '-' + str(nbCores) + '-' + str(fifoSize) + '.xml'\n\n def __getMappingFile(self, ms, fifoSize, nbCores):\n return os.path.join(self.cGeneratedPath, App.__getMappingFileName(ms, fifoSize, nbCores))\n\n @staticmethod\n def getAppByName(apps, name):\n for app in apps:\n if app.name == name:\n return app\n return None\n\n def buildBin(self, sse, silent=False):\n print('Building cmake and binaries file for ' + self.name + ':')\n for fifoSize in self.fifoSizes:\n build = self.__getBuildFolder(fifoSize)\n if not os.path.exists(build):\n print('Nothing to build for ' + self.name + ' (generate C files first with -gC)')\n return\n print(' fifo size : ' + str(fifoSize) + ' ...'),\n sys.stdout.flush()\n cmd = ['cmake', '..', '-DCMAKE_BUILD_TYPE=Release', '-DMEMORY_SAMPLING_ENABLE=True']\n if sse or self.name == 'H265-OPT':\n cmd.append('-DUSE_SSE=ON')\n if silent:\n try:\n subprocess.check_output(cmd, cwd=build, stderr=subprocess.STDOUT)\n except CalledProcessError as e:\n assert False, 'cmake return code is ' + e.returncode + e.output\n try:\n subprocess.check_output(['make', '-j', str(os.sysconf(\"SC_NPROCESSORS_ONLN\"))], cwd=build, stderr=subprocess.STDOUT)\n except CalledProcessError as e:\n assert False, 'make return code is ' + str(e.returncode) + ':\\n ' + e.output\n else:\n print(cmd)\n cmake = subprocess.Popen(cmd, cwd=build)\n assert cmake.wait() == 0\n make = subprocess.Popen(['make'], cwd=build)\n assert make.wait() == 0\n print('done')\n\n def cleanBin(self):\n print('Cleaning cmake and binaries file for ' + self.name + ' ...'),\n for fifoSize in self.fifoSizes:\n bin = self.__getBinFolder(fifoSize)\n build = self.__getBuildFolder(fifoSize)\n if not os.path.exists(bin):\n print('Nothing to clean for ' + self.name)\n return\n for file_object in os.listdir(bin):\n if not file_object.startswith('.'):\n file_object_path = os.path.join(bin, file_object)\n if os.path.isfile(file_object_path):\n os.unlink(file_object_path)\n else:\n shutil.rmtree(file_object_path)\n for file_object in os.listdir(build):\n if not file_object.startswith('.'):\n file_object_path = os.path.join(build, file_object)\n if os.path.isfile(file_object_path):\n os.unlink(file_object_path)\n else:\n shutil.rmtree(file_object_path)\n print('done')\n\n\n def cleanC(self):\n print('Cleaning all c files (generated by Eclipse) for ' + self.name + ' ...'),\n for fifoSize in self.fifoSizes:\n fifoFolder = self.__getFifoFolder(fifoSize)\n if os.path.isdir(fifoFolder):\n for file_object in os.listdir(fifoFolder):\n if not file_object.startswith('.'):\n file_object_path = os.path.join(fifoFolder, file_object)\n if os.path.isfile(file_object_path):\n os.unlink(file_object_path)\n else:\n shutil.rmtree(file_object_path)\n print('done')\n\n def cleanMappingFiles(self):\n print('Cleaning all mapping files for ' + self.name + ' ...'),\n for file_object in os.listdir(self.cGeneratedPath):\n if file_object.startswith('.RR-') or file_object.startswith('.WLB-'):\n file_object_path = os.path.join(self.cGeneratedPath, file_object)\n if os.path.isfile(file_object_path):\n os.unlink(file_object_path)\n else:\n shutil.rmtree(file_object_path)\n print('done')\n\n def genC(self, ci, inline, actionsProf, alignFifos, silent=False):\n print('Generating c files from RVC-Cal source code for ' + self.name + ':')\n work_dir = ci + '/headless_build/work_dir'\n for fifoSize in self.fifoSizes:\n fifoFolder = self.__getFifoFolder(fifoSize)\n if os.path.exists(fifoFolder):\n shutil.rmtree(fifoFolder)\n os.makedirs(fifoFolder)\n additionalOptions = '--fifo-size ' + str(fifoSize) + ' --mem-sampling'\n # + ' --prof'\n if inline:\n additionalOptions = additionalOptions + ' --inline-actor-only'\n if actionsProf:\n additionalOptions = additionalOptions + ' --prof'\n if alignFifos or self.name.endswith('-ALF'):\n additionalOptions = additionalOptions + ' --align-fifo'\n cmd = [ci + '/headless_build/orcc_run_compilation', work_dir, os.path.dirname(self.srcProjectPath), 'c', self.srcProjectName, self.network, fifoFolder, additionalOptions]\n\n print(' fifo size : ' + str(fifoSize) + ' ...'),\n sys.stdout.flush()\n if silent:\n app = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n print(cmd)\n app = subprocess.Popen(cmd)\n app.wait()\n print('done')\n\n def genMappingFiles(self, nbCores, nbFrames, mappings, silent=True):\n print('Generating mapping files for ' + self.name + ':')\n for core in range(2, max(nbCores) + 1):\n for ms in mappings:\n for fifoSize in self.fifoSizes:\n appBin = self.__getBin(fifoSize)\n path = 'tmp' + self.name\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n msFileName = App.__getMappingFileName(ms, fifoSize, core)\n cmd = [appBin, '-i', self.inputs[0], '-n', '-f', str(nbFrames), '-c', str(core), '-q', msFileName, '-s', ms]\n print(' ' + App.__getWorkMessage(fifoSize, core, ms)),\n sys.stdout.flush()\n if silent:\n app = subprocess.Popen(cmd, cwd=path, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n else:\n print(cmd)\n app = subprocess.Popen(cmd, cwd=path)\n app.wait()\n shutil.copyfile(os.path.join(path, msFileName), self.__getMappingFile(ms, fifoSize, core))\n print('done')\n if os.path.exists(path):\n shutil.rmtree(path)\n\n def run(self, appInstance, fifoProf, time, silent):\n\n fifoSize = appInstance.fifoSize\n input = self.inputs[appInstance.input - 1]\n memSampling = appInstance.memLoadsSamplingRate\n memBdwSampling = appInstance.memBdwSamplingRate\n nbFrames = appInstance.nbFrames\n nbCores = appInstance.nbCores\n mapStrategy = appInstance.map\n dataDriven = True if appInstance.scheduling == 'DD' else False,\n appBin = self.__getBin(fifoSize)\n\n path = './' + RESULTS_FOLDER + '/' + self.name + '/' + str(appInstance)\n if os.path.exists(path):\n shutil.rmtree(path)\n os.makedirs(path)\n cmd = [appBin, '-i', input, '-n', '-f', str(nbFrames)]\n if nbCores > 1:\n cmd = cmd + ['-c', str(nbCores)]\n if dataDriven:\n cmd = cmd + ['-e']\n if mapStrategy != 'no' and nbCores > 1:\n mapFile = self.__getMappingFile(mapStrategy, fifoSize, nbCores)\n assert os.path.isfile(mapFile), mapFile + ' doesn\\'t exist'\n cmd = cmd + ['-m', mapFile]\n cmd = cmd + ['-p', 'prof-results']\n if memBdwSampling:\n cmd = cmd + ['-g', str(memBdwSampling)]\n if memSampling:\n cmd = cmd + ['-j', 'mem-sampling-results']\n cmd = cmd + ['-k', str(memSampling)]\n if fifoProf:\n cmd = cmd + ['-b', str(1.0E6 / fifoProf)]\n if time:\n cmd = ['time', '-f', 'RES==%E'] + cmd\n if not silent:\n print(cmd)\n stdoutF = open(os.path.join(path, 'stdout'), 'w+')\n stderrF = open(os.path.join(path, 'stderr'), 'w+')\n app = subprocess.Popen(cmd, cwd=path, stdout=stdoutF, stderr=stderrF)\n stdoutF.flush()\n stderrF.flush()\n stdoutF.close()\n stderrF.close()\n if app.wait() != 0:\n stdoutF = open(os.path.join(path, 'stdout'), 'r')\n stderrF = open(os.path.join(path, 'stderr'), 'r')\n print('stdout :\\n' + stdoutF.read()),\n print('stderr :\\n' + stderrF.read())\n stdoutF.close()\n stderrF.close()\n sys.exit(0)\n if not silent:\n stdoutF = open(os.path.join(path, 'stdout'), 'r')\n stderrF = open(os.path.join(path, 'stderr'), 'r')\n print('stdout :\\n' + stdoutF.read()),\n print('stderr :\\n' + stderrF.read())\n stdoutF.close()\n stderrF.close()\n\n return path\n\nclass AppInstance(CommonEqualityMixin):\n\n discriminantFields = ['appName', 'map', 'fifoSize', 'scheduling', 'input', 'nbFrames', 'memLoadsSamplingRate', 'memBdwSamplingRate']\n\n def __init__(self,\n app,\n mapping,\n fifoSize,\n nbCores,\n scheduling,\n input, nbFrames,\n memLoadsSamplingRate,\n memBdwSamplingRate,\n appInstanceStr\n ):\n self.app = app\n\n if appInstanceStr == None:\n self.appName = app.name\n self.map = mapping\n self.scheduling = scheduling\n self.fifoSize = fifoSize\n self.input = input\n self.nbFrames = nbFrames\n self.nbCores = nbCores\n self.memLoadsSamplingRate = memLoadsSamplingRate\n self.memBdwSamplingRate = memBdwSamplingRate\n else:\n for attribute in appInstanceStr.split('_'):\n attributeSplit = attribute.split('=')\n attributeName = attributeSplit[0]\n attributeValue = attributeSplit[1]\n if attributeName == 'APP':\n self.appName = attributeValue\n if attributeName == 'M':\n self.map = attributeValue\n if attributeName == 'S':\n self.scheduling = attributeValue\n if attributeName == 'FS':\n self.fifoSize = int(attributeValue)\n if attributeName == 'I':\n self.input = int(attributeValue)\n if attributeName == 'FR':\n self.nbFrames = int(attributeValue)\n if attributeName == 'C':\n self.nbCores = int(attributeValue)\n if attributeName == 'MLS':\n self.memLoadsSamplingRate = int(attributeValue) if attributeValue != 'None' else None\n if attributeName == 'MBDWS':\n self.memBdwSamplingRate = int(attributeValue) if attributeValue != 'None' else None\n\n def __str__(self):\n msg = 'APP='\n msg = msg + self.appName + '_M=' + self.map + '_S=' + self.scheduling + '_FS=' + str(self.fifoSize)\n msg = msg + '_C=' + str(self.nbCores) + '_I=' + str(self.input)\n msg = msg + '_FR=' + str(self.nbFrames) + '_MBDWS=' + str(self.memBdwSamplingRate) + '_MLS=' + str(self.memLoadsSamplingRate)\n return msg\n \nclass Symbol:\n\n def __init__(self, addr, section, size, name):\n self.addr = addr\n self.section = section\n self.size = size\n self.name = name\n self.regexFifo = re.compile('fifo_(\\d+)')\n self.regexArray = re.compile('array_(\\d+)')\n self.regexReadind = re.compile('read_inds_(\\d+)')\n self.regexIndex = re.compile('_(\\d+)_(I|O)')\n \n def __str__(self):\n msg = self.name + ':'\n msg = msg + ' addr=' + str(self.addr) + ' section=' + str(self.section) + ' size=' + str(self.size)\n return msg\n \n def getFifo(self):\n m = self.regexFifo.search(self.name)\n if m:\n return int(m.groups()[0])\n m = self.regexArray.search(self.name)\n if m:\n return int(m.groups()[0])\n m = self.regexReadind.search(self.name)\n if m:\n return int(m.groups()[0])\n m = self.regexIndex.search(self.name)\n if m:\n return int(m.groups()[0])\n \n @staticmethod\n def getSym(addr, symTabAddrs, addrToSym):\n i = bisect.bisect_right(symTabAddrs, addr)\n if i:\n closest = symTabAddrs[i-1]\n sym = addrToSym[closest]\n if closest <= addr < closest + sym.size:\n return sym\n return None","sub_path":"bench/commons.py","file_name":"commons.py","file_ext":"py","file_size_in_byte":18648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"592686146","text":"from bokeh.io import show, curdoc,output_file\nfrom bokeh.plotting import figure\nfrom bokeh.layouts import widgetbox as wb, layout\nfrom bokeh.models import widgets as wd, ColumnDataSource\nfrom bokeh.core.properties import value\nimport string\nimport pymssql\n\nparagraph = wd.Paragraph(text=\"option\")\noptionGroup = wd.RadioGroup(labels=[\"and\", \"or\"], active=0, width=100, inline=True)\nbtnGroupLetters = wd.RadioButtonGroup(labels=list(string.ascii_uppercase), active=-1)\ntitle_input = wd.TextInput(value=\"\", title=\"Title:\", placeholder=\"comtains....\")\ndept_input = wd.TextInput(value=\"\",title=\"Department\", placeholder=\"contains....\")\nbtnGroupTitle = wd.RadioButtonGroup(name='title',\n labels=[\"begin with...\", \"...comtains...\", \"...ends with\"], active=1)\nrefresh = wd.Button(label=\"Refresh\")\nbtnGroupDept = wd.RadioButtonGroup(name='dept',\n labels=[\"begin with...\", \"...contains...\", \"...end with\"], active=1)\n\ndef connectSQLServer():\n attr = dict(\n server = '10.20.213.10',\n database = 'csc1002',\n user = 'csc1002',\n password = 'csc1002',\n port = 1433,\n as_dict = True\n )\n try:\n return pymssql.connect(**attr)\n except Exception as e:\n print(e)\n quit()\n\ntsql = \"SELECT dept_name FROM lgu.department\"\nsqlConn = connectSQLServer()\nwith sqlConn.cursor(as_dict=True) as cursor:\n cursor.execute(tsql)\n departments = cursor.fetchall()\ndeptlist = ['All'] \nfor dept in departments:\n deptlist.append(dept['dept_name'])\ndeptSelect = wd.Select(title='Department', value='All', options= deptlist)\n\nglobal idx_a, idx_b, idx_c, event_a, event_b\nevent_a = event_b = \"\"\nidx_a = idx_b = 1\nidx_c = 0 \n\ndef titleChoice(idx=-1):\n global idx_a\n idx_a = idx\n\ndef deptChoice(idx=-1):\n global idx_b\n idx_b = idx\n\ndef titleChange(attr, old, new):\n global event_a\n event_a = new.strip()\n\ndef deptChange(attr, old, new):\n global event_b\n event_b = new.strip()\n\ndef choose(idx):\n global idx_c\n idx_c = idx \n\n\nbtnGroupTitle.on_click(titleChoice)\nbtnGroupDept.on_click(deptChoice)\noptionGroup.on_click(choose)\n\ntitle_input.on_change(\"value\", titleChange)\ndept_input.on_change(\"value\", deptChange)\n\ncolumns = [\n wd.TableColumn(field=\"id\", title=\"Course ID\"),\n wd.TableColumn(field=\"title\", title=\"Title\"),\n wd.TableColumn(field=\"dept\", title=\"department\"),\n wd.TableColumn(field=\"credit\", title=\"Credit\"),\n wd.TableColumn(field=\"instructor\", title=\"Instructor\"),\n]\ntable = wd.DataTable(source= ColumnDataSource(),\n columns=columns, width=800)\n\ndef dataShow(idx):\n letter = btnGroupLetters.labels[idx]\n tsql = \"SELECT * FROM lgu.course where title like '{}%'\".format(letter)\n sqlConn = connectSQLServer()\n with sqlConn.cursor(as_dict=True) as cursor:\n cursor.execute(tsql)\n rows = cursor.fetchall()\n data={}\n data['id'] = [row['course_id'] for row in rows]\n data['title'] = [row['title'] for row in rows]\n data['instructor'] = [row['instructor'] for row in rows]\n data['credit'] = [row['credits'] for row in rows]\n data['dept'] = [row['dept_name'] for row in rows]\n table.source.data = data\noptionGroup.on_click(choose)\n\ndef checkZero(grade,count):\n list = ['A','A+','B','B+','C','C+','D','D+','F']\n for i in range(9):\n if list[i] not in grade :\n count.insert(i,0)\n return count\n\ndef selectOnChange(attr,old,new):\n tsql2015 = \"select gpa, count(*) as nums from lgu.student where year='2015' \"\n tsql2016 = \"select gpa, count(*) as nums from lgu.student where year='2016' \"\n tsql2017 = \"select gpa, count(*) as nums from lgu.student where year='2017' \"\n\n if new != 'All':\n tsql2015 += \"and dept_name = '{}' \".format(new)\n tsql2016 += \"and dept_name = '{}' \".format(new)\n tsql2017 += \"and dept_name = '{}' \".format(new)\n \n tsql2015 += 'group by gpa'\n tsql2016 += 'group by gpa'\n tsql2017 += 'group by gpa'\n\n sqlConn = connectSQLServer()\n with sqlConn.cursor(as_dict=True) as cursor:\n cursor.execute(tsql2015)\n gpa2015 = cursor.fetchall()\n cursor.execute(tsql2016)\n gpa2016 = cursor.fetchall()\n cursor.execute(tsql2017)\n gpa2017 = cursor.fetchall()\n \n grade2015 = list()\n grade2016 = list()\n grade2017 = list()\n count2015 = list()\n count2016 = list()\n count2017 = list()\n\n for row in gpa2015:\n grade2015.append(row['gpa'])\n count2015.append(row['nums'])\n for row in gpa2016:\n grade2016.append(row['gpa'])\n count2016.append(row['nums'])\n for row in gpa2017:\n grade2017.append(row['gpa'])\n count2017.append(row['nums'])\n \n data = {}\n data['gpa'] = ['A','A+','B','B+','C','C+','D','D+','F']\n data['2015'] = checkZero(grade2015,count2015)\n data['2016'] = checkZero(grade2016,count2016)\n data['2017'] = checkZero(grade2017,count2017)\n \n source.data = data\n\ndeptSelect.on_change(\"value\",selectOnChange)\n\n\n\n\ngpa = ['A+','A','B+','B','C+','C','D+','D','F']\nyears = ['2015','2016','2017'] \ncolors = [\"#c9d9d3\",\"#718dbf\",\"#e84d60\"] \n\ndata = {}\ndata['gpa'] = []\nfor yr in years:\n data[yr] = []\n\nsource = ColumnDataSource(data=data)\n\np = figure(x_range=gpa, plot_height=500, plot_width=800, title='GPA count by year',\n toolbar_location=None, tools=\"\")\n\np.vbar_stack(years,x='gpa', width=0.9, color=colors, source=source,\n legend=[value(x) for x in years])\n\np.y_range.start = 0\np.legend.location = \"top_center\"\np.legend.orientation = \"vertical\"\n\nlayout_query = layout(\n [\n [wb(btnGroupLetters, width=1000)],\n [wb(btnGroupTitle), wb(btnGroupDept)],\n [wb(title_input), wb(paragraph, optionGroup, width=100), wb(dept_input)],\n [wb(refresh, width=100)],\n [wb(table)],\n ]\n)\n\nlayout_chart = layout(\n [\n [wb(deptSelect)]\n ]\n)\n\ntab1 = wd.Panel(child=layout_query, title=\"Course Info\")\ntab2 = wd.Panel(child=layout_chart, title=\"Statistics\")\ntabs = wd.Tabs( tabs = [tab1,tab2] )\n\ncurdoc().add_root(tabs)","sub_path":"csc1002 A2.py","file_name":"csc1002 A2.py","file_ext":"py","file_size_in_byte":6057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"445831781","text":"\nfrom datetime import datetime\n\nfrom airflow import DAG\nimport airflow.operators.dummy_operator as dummy_op\nimport airflow.operators.python_operator as py_op\n\n#from operators import start_state_printer\n\n# Short task description:\n#\n# Modify code to create 3 DAGs that run 3 tasks, and depend on each other as 1>2>3 (1 - start first) :\n#\n# Print into log information “{dag_id} start processing tables in database: {database}” (use for that PythonOperator)\n# Dummy task (with DummyOperator) that mock a task for Insert new row in DB. It can be called something like “insert_new_row”\n# Dummy task (with DummyOperator) that mock a task for query table and get the result, it could be called like “query_the_table”\n\n\n#\ndef show_current_state(**kwargs):\n return \"DAG: {0}, DB: {1}, TABLE: {2}\".format(kwargs[\"dag_id\"], kwargs[\"target_database\"], kwargs[\"target_table\"])\n\n\n# config with dags parameters\ntoday = datetime.today().date()\ntoday_date = datetime(today.year, today.month, today.day)\n\nconfig = {\n \"dag_sql_print\": {\n \"schedule_interval\": \"@once\",\n \"start_date\": today_date,\n \"table_name\": \"some_table_1\"\n },\n \"dag_sql_insert\": {\n \"schedule_interval\": \"@once\",\n \"start_date\": today_date,\n \"table_name\": \"some_table_2\"\n },\n \"dag_sql_update\": {\n \"schedule_interval\": \"@once\",\n \"start_date\": today_date,\n \"table_name\": \"some_table_3\"\n }\n}\n\n# iterate through the dag options\nfor entry in config.items():\n # create dag from the entry parameters\n dag = DAG(\n dag_id=entry[0],\n start_date=entry[1][\"start_date\"],\n schedule_interval=entry[1][\"schedule_interval\"]\n )\n\n with dag:\n # python task\n state_printer_task = py_op.PythonOperator(\n task_id='on-start-state-printer',\n python_callable=show_current_state,\n op_kwargs={\n \"dag_id\": dag.dag_id,\n \"target_database\": \"SQLite\",\n \"target_table\": entry[1][\"table_name\"]\n }\n )\n # insert_new_row dummy task\n insert_new_row_task = dummy_op.DummyOperator(\n task_id='insert_new_row'\n )\n # query_the_table dummy task\n query_the_table_task = dummy_op.DummyOperator(\n task_id='query_the_table'\n )\n\n # order:\n # state_printer_task << insert_new_row_task << query_the_table_task\n insert_new_row_task.set_upstream(state_printer_task)\n query_the_table_task.set_upstream(insert_new_row_task)\n\n\n # set initiated dag as a global instance\n globals()[dag.dag_id] = dag","sub_path":"practice/jobs_dag_second.py","file_name":"jobs_dag_second.py","file_ext":"py","file_size_in_byte":2640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"637494753","text":"import collections\nimport os\nimport tempfile\n\nimport pytest\n\nfrom galaxy.tools.verify import (\n files_contains,\n files_diff,\n files_re_match,\n files_re_match_multiline,\n)\n\n\nF1 = b\"A\\nB\\nC\"\nF2 = b\"A\\nB\\nD\\nE\" * 61\nF3 = b\"A\\nB\\n\\xfc\"\nF4 = b\"A\\r\\nB\\nC\"\nMULTILINE_MATCH = b\".*\"\nTestFile = collections.namedtuple('TestFile', 'value path')\n\n\ndef generate_tests(multiline=False):\n files = []\n for b, ext in [(F1, '.txt'), (F2, '.txt'), (F3, '.pdf'), (F4, '.txt'), (MULTILINE_MATCH, '.txt')]:\n fd, path = tempfile.mkstemp(suffix=ext)\n with os.fdopen(fd, 'wb') as out:\n out.write(b)\n files.append(TestFile(b, path))\n f1, f2, f3, f4, multiline_match = files\n if multiline:\n tests = [(multiline_match, f1, {'lines_diff': 0, 'sort': True}, None)]\n else:\n tests = [(f1, f1, {'lines_diff': 0, 'sort': True}, None)]\n tests.extend([\n (f1, f2, {'lines_diff': 0, 'sort': True}, AssertionError),\n (f1, f3, None, AssertionError),\n (f1, f4, None, None),\n ])\n return tests\n\n\n@pytest.mark.parametrize('file1,file2,attributes,expect', generate_tests())\ndef test_files_contains(file1, file2, attributes, expect):\n if expect is not None:\n with pytest.raises(expect):\n files_contains(file1.path, file2.path, attributes)\n else:\n files_contains(file2.path, file2.path, attributes)\n\n\n@pytest.mark.parametrize('file1,file2,attributes,expect', generate_tests())\ndef test_files_diff(file1, file2, attributes, expect):\n if expect is not None:\n with pytest.raises(expect):\n files_diff(file1.path, file2.path, attributes)\n else:\n files_diff(file1.path, file2.path, attributes)\n\n\n@pytest.mark.parametrize('file1,file2,attributes,expect', generate_tests())\ndef test_files_re_match(file1, file2, attributes, expect):\n if expect is not None:\n with pytest.raises(expect):\n files_re_match(file1.path, file2.path, attributes)\n else:\n files_re_match(file1.path, file2.path, attributes)\n\n\n@pytest.mark.parametrize('file1,file2,attributes,expect', generate_tests(multiline=True))\ndef test_files_re_match_multiline(file1, file2, attributes, expect):\n if expect is not None:\n with pytest.raises(expect):\n files_re_match_multiline(file1.path, file2.path, attributes)\n else:\n files_re_match_multiline(file1.path, file2.path, attributes)\n","sub_path":"test/unit/test_verify.py","file_name":"test_verify.py","file_ext":"py","file_size_in_byte":2414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"204780324","text":"#!/usr/bin/env python\n\n# package: engine\n\n# Future place of the A-star path finding algorithm\nclass Pathfinding(object):\n \n # The lists used for finding things.\n class PathNode(object): \n cost = float()\n loc = []\n\n def __init__(self, x, y, cost): \n self.loc = [x, y]\n self.cost = cost\n \n def __init__(self,Game):\n self.Game=Game\n self.openlist = []\n self.closedlist = []\n\n # Used for tracking the currently selected node and a shortcut to the currently selected unit.\n #self.unit = units.Base()\n #self.current = self.PathNode()\n self.distance = int()\n\n # Output information\n self.mapGame = []\n self.maphits = []\n\n # View Tile Cost/Hits and when the last time something was updated.\n self.ShowCost = bool()\n self.ShowHits = bool()\n self.LastChanged = 1\n \n def SwitchParentClosed(self, node):\n cost = self.Game.gamegameMap.mapGameGame[node.loc[0]][node.loc[1]].speed() + self.current.cost\n if cost < node.cost:\n node.cost = int(cost * 100.0) / 100.0\n self.closedlist.remove(node)\n self.openlist.append(node)\n \n def SwitchParent(self, node):\n cost = self.Game.gameMap.mapGame[node.loc[0]][node.loc[1]].speed() + self.current.cost\n if cost < node.cost:\n node.cost = int(cost * 100.0) / 100.0\n\n def SwitchParentClosed(self, node):\n \"\"\" generated source for method SwitchParentClosed \"\"\"\n cost = self.Game.gameMap.mapGame[node.loc[0]][node.loc[1]].speed() + self.current.cost\n if cost < node.cost:\n node.cost = int(cost * 100.0) / 100.0\n self.closedlist.remove(node)\n self.openlist.append(node)\n\n def FindCost(self, x, y, parent):\n cost = self.Game.gameMap.mapGame[y][x].speed()\n if parent != None:\n cost += parent.cost\n return int(cost * 100.0) / 100.0\n\n \n\n def InOpen(self, x, y):\n\n for node in self.openlist:\n if node.loc[0] == x and node.loc[1] == y:\n self.SwitchParent(node)\n return True\n return False\n\n def InClosed(self, x, y):\n for node in self.closedlist:\n if node.loc[0] == x and node.loc[1] == y:\n self.SwitchParentClosed(node)\n return True\n return False\n def AddNode(self, x, y):\n if not self.InClosed(x, y):\n if not self.InOpen(x, y):\n cost = int(self.FindCost(x,y,self.current)*100.0) / 100.0\n if cost <= self.distance:\n self.openlist.append(self.PathNode(x, y, cost))\n \n \n def SwitchParent(self, node):\n cost = self.Game.gameMap.mapGame[node.loc[1]][node.loc[0]].speed() + self.current.cost\n if cost < node.cost:\n node.cost = int(cost * 100.0) / 100.0\n\n def SwitchParentClosed(self, node):\n cost = self.Game.gameMap.mapGame[node.loc[1]][node.loc[0]].speed() + self.current.cost\n if cost < node.cost:\n node.cost = int(cost * 100.0) / 100.0\n self.closedlist.remove(node)\n self.openlist.append(node)\n\n def FindCost(self, x, y, parent):\n cost = self.Game.gameMap.mapGame[y][x].speed()\n if parent != None:\n cost += parent.cost\n return int(cost * 100.0) / 100.0\n\n def RemoveOccupied(self):\n if len(self.map) <= 1:\n return\n i = 1\n while i < len(self.map):\n if self.Occupied(self.map[i][0], self.map[i][1]):\n self.map.remove(i)\n i += 1\n\n def Occupied(self, x, y):\n for unit in self.Game.units:\n if self.unit.x == x and self.unit.y == y:\n return True\n return False\n\n def LowestCostOpen(self):\n if len(self.openlist)==0:\n return None\n lowest = self.openlist[0]\n for node in self.openlist:\n if node.cost > lowest.cost:\n lowest = node\n return lowest\n\n \n\n def FindNodes(self):\n if self.current.loc[0] - 1 >= 0 and self.unit.PathCheck(self.current.loc[0] - 1, self.current.loc[1]):\n self.AddNode(self.current.loc[0] - 1, self.current.loc[1])\n if self.current.loc[1] - 1 >= 0 and self.unit.PathCheck(self.current.loc[0], self.current.loc[1] - 1):\n self.AddNode(self.current.loc[0], self.current.loc[1] - 1)\n if self.current.loc[0] + 1 < self.Game.gameMap.width and self.unit.PathCheck(self.current.loc[0] + 1, self.current.loc[1]):\n self.AddNode(self.current.loc[0] + 1, self.current.loc[1])\n if self.current.loc[1] + 1 < self.Game.gameMap.height:\n if self.unit.PathCheck(self.current.loc[0], self.current.loc[1] + 1):\n self.AddNode(self.current.loc[0], self.current.loc[1] + 1)\n \n def FindPath(self, unit, _range):\n self.maphits = [ [0 for y in range(self.Game.gameMap.width)] for x in range(self.Game.gameMap.height)]\n self.openlist = []\n self.closedlist = []\n self.map = []\n self.distance = _range\n self.unit = unit\n self.openlist.append(self.PathNode(unit.x, unit.y, 0))\n print(\"self.openlist %s\" %self.openlist[0].loc)\n self.current = self.openlist[0]\n #print(\"%s\" %self.current)\n while True:\n if len(self.openlist)==0:\n break\n self.FindNodes()\n self.closedlist.append(self.current)\n self.openlist.remove(self.current)\n \n self.maphits[self.current.loc[1]][self.current.loc[0]] += 1\n self.current = self.LowestCostOpen()\n for node in self.closedlist:\n self.map.append((node.loc[0], node.loc[1]))\n\t #print(\"%s\" % self.map)\n #unit.LastPathed = System.currentTimeMillis()\n self.RemoveOccupied()\n return self.map\n","sub_path":"src/engine/Pathfinding.py","file_name":"Pathfinding.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"43058513","text":"#The following Python script puts global-settings info for the BigIP using\n#the iControl REST API. This also uses the Requests library.\n#It issues a warning for using an unverified HTTPS request. \nimport requests\ng=open('test.json','r', encoding=\"utf-8\")\ng.read()\nr=requests.put('https://10.145.193.169/mgmt/tm/sys/global-settings', auth=('admin', 'admin'), verify=False, data=g)\nf=open('doodle.json', 'w', encoding=\"utf-8\")\nf.write(r.text)\n\n","sub_path":"python/Python_put/indexP.py","file_name":"indexP.py","file_ext":"py","file_size_in_byte":442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"619563297","text":"from styx_msgs.msg import TrafficLight\nfrom keras.models import Model\nfrom keras.layers import Dense, GlobalAveragePooling2D\nfrom keras.applications.resnet50 import ResNet50, preprocess_input\nimport os\nimport tensorflow as tf\nimport numpy as np\nimport cv2\nimport rospy\n\nSPARSE_TO_IDX = {0:0, 1:1, 2:2, 3:4}\nMODEL_PICTURE_SIZE = (224, 224)\n\nclass TLClassifier(object):\n def __init__(self, model_weights_file):\n rospy.loginfo(model_weights_file)\n base_path = os.path.dirname(os.path.abspath(__file__))\n model_weights = os.path.join(base_path, model_weights_file)\n self.model = self.create_model()\n self.model.load_weights(model_weights)\n self.graph = tf.get_default_graph()\n\n def create_model(self):\n base_model = ResNet50(weights=None, include_top=False)\n x = base_model.output\n x = GlobalAveragePooling2D()(x)\n predictions = Dense(4, activation='softmax')(x)\n model = Model(inputs=base_model.input, outputs=predictions)\n return model\n\n def get_classification(self, image):\n x = cv2.resize(image, MODEL_PICTURE_SIZE) \n x = np.expand_dims(x, axis=0)\n x = np.float64(x)\n x = preprocess_input(x)\n with self.graph.as_default():\n logits = self.model.predict(x)\n maxindex = np.argmax(logits)\n color = SPARSE_TO_IDX[maxindex]\n rospy.loginfo('COLOR {}'.format(color))\n tl = TrafficLight()\n tl.state = color\n return tl\n","sub_path":"ros/src/tl_detector/light_classification/tl_classifier.py","file_name":"tl_classifier.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"628759945","text":"# encoding: utf-8\n'''\nCreated on 30 мар. 2017 г.\nНаблюдатель за системным процессом\n@author: av.Kustov\n'''\nfrom shell.processors.AbstractProcessor import AbstractProcessor\nfrom shell.utils.Observer import Publisher\nfrom shell.event.MailProcessEvent import MailProcessEvent\nfrom shell.event.KillProcessEvent import KillProcessEvent\nfrom shell.event.SystemEvent import SystemEvent\nfrom shell.processors.RealProces import RealProces\n\nimport psutil\nfrom psutil import NoSuchProcess\nimport logging\n\nlogger = logging.getLogger(__name__)\n\nclass SystemJobProcessor(AbstractProcessor):\n '''\n Наблюдатель за системным процессом\n '''\n #отобранные процессы\n bad_process = []\n #публикатор\n pub = None\n \n def __init__(self, config):\n self.config = config\n super(AbstractProcessor,self).__init__()\n #регистрируем подписчиков\n self.pub = Publisher()\n self.pub.register(KillProcessEvent(self.config))\n self.pub.register(SystemEvent(self.config))\n self.pub.register(MailProcessEvent(self.config))\n \n \n def findProcess(self, name):\n '''\n отобрать процессы по имени\n name - имя процесса\n '''\n list_process = []\n for proces in psutil.process_iter():\n if proces.name() == name:\n list_process.append(proces)\n return list_process\n \n def chekprocess(self, proces):\n '''\n отобрать процессы не укладывающиеся в метрики с разницей maxcpuprocent , maxmemoryprocent\n proces - процесс\n ''' \n maxcpuprocent = int(self.config.maxcpuprocent)\n maxmemoryprocent = int(self.config.maxmemoryprocent)\n real = RealProces(proces)\n \n logger.debug('Проверка процесса %s CPU %s MEM %s' % (proces.__str__(), real.old_cpu_percent.__str__(), real.old_memory_percent.__str__(),))\n if (real.old_cpu_percent>maxcpuprocent) or (real.old_memory_percent>maxmemoryprocent):\n message = 'Внимание системное событие превышения метрик по процессу %s: загрузка CPU %s , загрузка памяти MEM %s ' % (proces.__str__(), real.old_cpu_percent.__str__(), real.old_memory_percent.__str__())\n logger.error(message)\n self.bad_process.append(real)\n \n #proces.username()\n #proces.memory_full_info()\n \n def execute(self): \n logger.debug(\"Обработка системных событий\")\n self.bad_process = []\n try:\n #str1=\" \".join(str(x) for x in plist)\n process = self.findProcess(self.config.process_name)\n if len(process) == 0 : raise NoSuchProcess(0, self.config.process_name ,'Не найден процесс %s' % self.config.process_name) \n for proces in process: \n self.chekprocess(proces) \n \n if len(self.bad_process) !=0 :\n #публикация события на обработчики\n self.pub.dispatch(self.bad_process)\n \n except Exception as error:\n logger.error(error.__str__()) ","sub_path":"shell/processors/SystemJobProcessor.py","file_name":"SystemJobProcessor.py","file_ext":"py","file_size_in_byte":3522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"384152727","text":"n=int(input(\"\"))\nif(n<100000):\n a=list(map(int,str(n)))\n b=list(map(lambda x:x**3,a))\n if(sum(b)==n):\n \tprint(\"yes\")\n else:\n print(\"no\")\nelse:\n\tprint(\"invalid number\")\n","sub_path":"beginnerlevel/armstrongornot.py","file_name":"armstrongornot.py","file_ext":"py","file_size_in_byte":194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633366754","text":" #!/usr/bin/python\n # -*- coding: utf-8 -*-\n\nfrom application import app, request, redirect, escape, session, url_for, db, bcrypt, render_template, g, flash\nfrom application.database.database import User\nfrom application.views.user import *\nfrom application.views.upload import *\nfrom application.views.decorators.decorators import *\nfrom application.functions.functions import *\nfrom functools import wraps\n\n\n@app.before_request\ndef before_request():\n\tg.year = datetime.now().year\n\tg.siteName = 'Johannes\\' Flask Boilerplate'\n\tg.baseUrl = url_for('index')\n\n\tif 'username' in session:\n\t\tuser = session['username']\n\telse:\n\t\tuser = None\n\n\tif 'LoggedIn' in session:\n\t\tg.userIsloggedIn = True\n\telse:\n\t\tg.userIsloggedIn = False\n\n\tg.user = user\n\n\n\t\n\n@app.errorhandler(404)\ndef page_not_found(error):\n\treturn render_template('index.html', input_var=error)\n \n\n@app.route('/')\ndef index():\n\t#cookie login remeber me - \n\tif g.userIsloggedIn:\n\t\treturn render_template('dashboard.html')\n\telse:\n\t\treturn render_template('index.html')\n\n\n@app.route('/cookies')\ndef cookies():\n\t#cookie login remeber me - \n return render_template('cookies.html')\n\n\n@app.route('/secret')\n@login_required\ndef denHemmligeSide():\n\treturn render_template('secret.html', input_var=session)","sub_path":"application/views/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"560060097","text":"import numpy as np\n\nnp.set_printoptions(threshold=np.inf)\n\n\n# 行列変換ライブラリとして使用\ndef grid_points(shape, dtype=np.float32):\n \"\"\"Return the grid points of a given shape with distance `1`.\"\"\"\n\n \"\"\"\n x, y = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]))\n y = y.flatten()\n x = x.flatten()\n\n z = np.empty((shape[0] * shape[1], 2), dtype)\n z[:, 0] = y\n z[:, 1] = x\n return z\n \"\"\"\n z = []\n for i in range(shape[0]):\n for j in range(shape[1] - 1):\n z.append((i * 32 + j, i * 32 + j + 1))\n for i in range(shape[0] - 1):\n for j in range(shape[1]):\n z.append((i * 32 + j, (i + 1) * 32 + j))\n return z\n\n# grid = grid_points((32, 32))\n# print(grid)\n","sub_path":"adjacency.py","file_name":"adjacency.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"190859701","text":"# Programmed by IL WON SON\n# CECS 424 Assignment 2 - Python\n# Professor Anthony Giacalone\n# Part 1: Coin Arranger Game\n\nresponse = True\nwhile response:\n move = 0\n coins = \"HHHHHTTTTT\"\n\n def split(string): # Convert String to Array.\n return list(string)\n\n def join(list): # Convert Array to String,\n return ''.join(list)\n\n print(\"\\n---Coin Arrange Game---\\n\")\n\n # First move starts here.\n # Display number of moves user have taken.\n print(\"Move #\", move + 1)\n\n # User selecting left of two coins.\n ## There is no two-coin gap initially, can only move coins to the end of the rows after you pick them up.\n \n # Check if selected two coins are touching.\n while True:\n \n # Display coins and get user input.\n # Store the index of left of two coins user selected.\n getHere = len(input(coins + '\\n'))\n \n # Copy index for later, to fill-in the two-coin gap.\n twoCoinGap = getHere\n\n # Check if selection is out of bound.\n if getHere > 10:\n print(\"You can't select there! That's out of bound.\\nPlease try again.\")\n \n # User cannot take coins on the edges at the initial first move, for it is not touching two coins.\n ## If user enters zero spaces, it implies to take '_H' which holds only one coin.\n ## If user enters ten spaces, it implies to take 'T_' which is not two touching coins.\n elif getHere < 1 or getHere > 9:\n # Display error message.\n print(\"Must select left of two touching coins!\\nPlease try again.\")\n\n # Otherwise, all other user selected coins are valid.\n else:\n break # Out of while-loop.\n\n coins = split(coins)\n\n # Copy user selected two coins to twoCoins. coins[indexinclusive:indexexclusive]\n twoCoins = coins[getHere - 1 : getHere + 1]\n # Put placeholder '_' on each copied coins to indicate 'two-coin gap'.\n coins[getHere - 1], coins[getHere] = '_', '_'\n\n coins = join(coins)\n\n\n # User selecting the position to where the two coins should be moved.\n ## Check for invalid user placement position of two coins when there is no two-coin gap.\n while True:\n \n # Display coins and get user input.\n # Store the index where the left position to where two coins should be moved.\n putHere = len(input(coins + '\\n'))\n\n # Enter here only if user selects one end of the row.\n if putHere == 0 or putHere == 11:\n\n # If user chooses to place at the left edge, initial two-coin gap placeholder will shift by 2.\n if putHere == 0:\n twoCoinGap = twoCoinGap + 2\n \n coins = split(coins)\n \n # Inserts left of two coins and right of two coins.\n coins.insert(putHere, twoCoins[0])\n coins.insert(putHere+1, twoCoins[1])\n \n coins = join(coins)\n break # Out of while-loop.\n\n # Display error message if user wants to place it on the gap.\n elif coins[putHere-1] == '_':\n print(\"You can't fill there, that's the placeholder!\\nPlease try again.\")\n\n # Display error message.\n else:\n print(\"There is no two-coin gap!\\nPlease try again.\")\n\n # Increment move.\n move = move + 1\n # End of Move 1.\n\n\n\n # Moves 2 to 5 while-loops from here.\n while(move < 5):\n print(\"Move #\", move + 1)\n\n # User selecting left of two coins.\n ## There is two-coin gap, can only move the two coins into the gap.\n while True:\n \n # Display coins and get user input.\n # Store the index of left of two coins user selected.\n getHere = len(input(coins + '\\n'))\n\n # Check if selection is out of bound.\n if getHere > 12:\n print(\"You can't select there! That's out of bound.\\nPlease try again.\")\n \n # Check for selection case '_L' whre L is a letter.\n elif coins[getHere-1] == '_' and coins[getHere] != '_':\n print(\"Must select left of two touching coins!\\nPlease try again.\")\n\n # Check for selection case '__' two-coin gap placeholder.\n elif coins[getHere-1] == '_':\n print(\"You can't take that, it's placeholder!\\nPlease try again.\")\n\n # Check for selection case compose of 'left edged coin' or 'right edged coin'\n elif getHere < 1 or getHere > 11:\n print(\"Must select left of two touching coins!\\nPlease try again.\")\n \n # Check for selection case 'L_' where L is a letter.\n elif coins[getHere] == '_':\n print(\"Must select left of two touching coins!\\nPlease try again.\")\n \n else:\n break # Out of while-loop.\n \n # Mechanism to oscilate tow-coin gaps as the move progresses.\n if move%2 == 1:\n twoCoinGap2 = getHere\n \n if move%2 == 0:\n twoCoinGap = getHere\n \n coins = split(coins)\n # Copy two coins to twoCoins list. coins[indexinclusive:indexexclusive]\n twoCoins = coins[getHere - 1 : getHere + 1]\n # Put placeholder '_' on each coins to be moved.\n coins[getHere - 1], coins[getHere] = '_', '_'\n coins = join(coins)\n \n # User selecting the position to where the two coins should be moved.\n ## Check until user selects the two-coin gap to place previously selected two coins.\n while True:\n\n # Display coins and get user input.\n # Store the index of left of two coins user selected.\n putHere = len(input(coins + '\\n'))\n\n # User can only place it into the two-coin gap.\n # As the move progress, previous two coin gap gets filled and new two coin gap gets created.\n if putHere == twoCoinGap and move%2 == 1:\n coins = split(coins)\n coins[twoCoinGap - 1], coins[twoCoinGap] = twoCoins[0], twoCoins[1]\n coins = join(coins)\n break\n \n elif putHere == twoCoinGap2 and move%2 == 0:\n coins = split(coins)\n coins[twoCoinGap2 - 1], coins[twoCoinGap2] = twoCoins[0], twoCoins[1]\n coins = join(coins)\n break\n \n else:\n print(\"You can't fill there!\\nYou must place two coins into the two-coin gap!!!\\nPlease try again.\")\n \n # Increment move.\n move = move + 1\n # End of while-loop 4 other moves.\n\n print(\"Final string:\", coins)\n\n # User selection to play again or not.\n while True:\n r = input(\"Play again? (y/n): \")\n if r == 'y' or r == 'Y':\n response = True\n break\n elif r == 'n' or r == 'N':\n response = False\n break\n else:\n print(\"'y' or 'n' only\")\n","sub_path":"Part One/CoinArrangerGame.py","file_name":"CoinArrangerGame.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478428053","text":"# Metatdata.py class, to store sample related metadata\n# Sam Gorman , 2013\n# Sam Hile 2013\n# Charley Peng 2013\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA\n\nfrom instrument import Instrument\nimport types\nimport logging\nfrom qt import msleep\n\nclass x_Panel(Instrument):\n '''\n This is the python driver for the variable panel faux-instrument\n\n Usage:\n Initialize with\n = instruments.create('name', 'x_Panel', lab, safety_func)\n \n Autogenerate GUI for lab variables \n '''\n\n def __init__(self, name, lab, safety=None, reset=False):\n '''\n Initializes the panel thingy. Add this at the end of your setup phase, \n as variables need to have been defined before initialising this\n\n Input:\n name (string) : name of the instance\n lab (Laboritory) : lab object containing established variables\n safety_func (func_ptr) : a function to make things safe - ie set gates to 0V etc\n \n Output:\n None\n '''\n logging.info(__name__ + ' : Initializing panel')\n Instrument.__init__(self, name, tags=['virtual'])\n \n self.list = lab.get_variables()\n self.safety = safety\n \n for i in self.list:\n #link in the do_set/get functions\n setattr(self, 'do_get_%s'%i.get_name(),i.get_value)\n setattr(self, 'do_set_%s'%i.get_name(),i.set_value)\n \n #generate a parameter for each variable\n self.add_parameter(i.get_name(), type=types.FloatType,\n flags=Instrument.FLAG_GETSET | Instrument.FLAG_GET_AFTER_SET)\n #and some conveniences\n if self.safety is not None:\n self.add_function('make_safe')\n \n if reset:\n self.reset()\n self.get_all()\n else:\n self.get_all()\n\n def get_all(self):\n '''\n Reads out all variables.\n\n Input:\n None\n\n Output:\n None\n '''\n logging.info('reading all variables from the lab') \n for i in self.list:\n msleep(0.1)\n logging.info('getting %s...'%i.get_name())\n getattr(self, 'get_%s'%i.get_name())()\n \n def make_safe(self):\n '''\n Safety function. Typically sets voltages to zero, or turns outputs off.\n\n Input:\n None\n\n Output:\n None\n '''\n logging.info('making safe') \n if self.safety is not None:\n self.safety()\n self.get_all()\n\n","sub_path":"bilby/instrument_plugins/x_Panel.py","file_name":"x_Panel.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"317008383","text":"import os\n\nTRAIN_DIR = \"../data/train\"\nVALID_DIR = \"../data/valid\"\n\nclasses = os.listdir(TRAIN_DIR)\n\n\ndef percentage(percent, whole):\n return (percent * whole) / 100.0\n\n\nif not os.path.exists(VALID_DIR):\n os.mkdir(VALID_DIR)\n\nfor c in classes:\n image_dir = \"{}/{}/\".format(TRAIN_DIR, c)\n images = os.listdir(image_dir)\n num_of_images_to_move = int(percentage(20, len(images)))\n\n valid_class_dir = VALID_DIR + \"/\" + c\n train_class_dir = TRAIN_DIR + \"/\" + c\n\n if not os.path.exists(valid_class_dir):\n os.mkdir(valid_class_dir)\n\n for image in images[-num_of_images_to_move:]:\n new_file_name = valid_class_dir + \"/\" + image\n old_file_name = train_class_dir + \"/\" + image\n print('moving', old_file_name, 'to', new_file_name)\n os.rename(old_file_name, new_file_name)\n","sub_path":"scripts/create-validation-data.py","file_name":"create-validation-data.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"379853195","text":"import os\nimport random\nimport requests\nfrom decimal import Decimal\nfrom ..utils.time import (\n local_string_to_utc_string,\n format_standard,\n local_string_to_weathercom_string,\n parse_weathercom,\n)\nfrom ..utils.conversions import farenheit_to_celcius, celcius_to_farenheit\nfrom ..utils.api_keys import find_key\nfrom ..utils.browser_profiles import Browser\nfrom ..exceptions import HttpError, BadResponse, OutOfRange, UnexpectedFormat\n\n\nENDPOINT = \"https://api.weather.com/v2/turbo/vt1hourlyForecast?apiKey={api_key}&format=json&geocode={latitude}%2C{longitude}&language=en-US&units={units}\"\n\nDECIMAL_PLACES = int(os.getenv(\"DECIMAL_PLACES\", 2))\nSERVICE_NAME = \"Weather.com\"\n\n\ndef fetch(location_object, units=\"m\"):\n \"\"\"\n Input:\n units\n \"m\" (metric, celcius)\n \"e\" (imperial, farenheit)\n \"\"\"\n if os.getenv(\"WEATHERCOM_API_KEY_ALT\"):\n api_keys = [\"WEATHERCOM_API_KEY\", \"WEATHERCOM_API_KEY_ALT\"]\n api_key_name = random.choice(api_keys)\n api_key = find_key(api_key_name)\n else:\n api_key = find_key(\"WEATHERCOM_API_KEY\")\n latitude, longitude = location_object[\"coordinates\"]\n browser_profile = Browser()\n headers = browser_profile.headers\n r = requests.get(\n ENDPOINT.format(\n latitude=latitude, longitude=longitude, api_key=api_key, units=units\n ),\n headers=headers,\n )\n if r.ok:\n response = r.json()\n if not response:\n response_excerpt = r.text[:100]\n raise BadResponse(\n {\n \"service\": SERVICE_NAME,\n \"message\": \"Empty page with code {r._status_code}. Full response: {response_excerpt}...\",\n }\n )\n else:\n return response\n else:\n raise HttpError({\"service\": SERVICE_NAME, \"response\": r.status_code})\n\n\ndef retrieve_document(location_object):\n \"\"\"Calls the API to fetch the document once only\n May perform additional transformation depending on the service\n\n For Weather.com, the retrieve document call with fetch twice\n for both celcius and farenheit\n\n It'll return temperatures that are an average of both:\n 1) Fetch Celcius\n Convert to Farenheit\n 2) Fetch Farenheit\n 3) Average\n 4) Convert to celcius\n \"\"\"\n doc_farenheit = fetch(location_object, units=\"e\")\n doc_celcius = fetch(location_object, units=\"m\")\n\n temperatures = {}\n temperatures[\"farenheit\"] = doc_farenheit.get(\"vt1hourlyForecast\", {}).get(\n \"temperature\"\n )\n temperatures[\"celcius\"] = doc_celcius.get(\"vt1hourlyForecast\", {}).get(\n \"temperature\"\n )\n if not temperatures[\"farenheit\"] or not temperatures[\"celcius\"]:\n raise BadResponse(\n {\"service\": SERVICE_NAME, \"message\": \"vt1hourlyForecast > temperature\"}\n )\n\n # Converting all to decimals\n for u in [\"farenheit\", \"celcius\"]:\n temperatures[u] = [Decimal(str(elt)) for elt in temperatures[u]]\n # 1) Converting celcius to Farenheits\n temperatures[\"celcius\"] = [\n celcius_to_farenheit(elt) for elt in temperatures[\"celcius\"]\n ]\n\n # 3) Averaging\n farenheit_averaged = []\n for celcius_in_farenheit, farenheit in zip(\n temperatures[\"celcius\"], temperatures[\"farenheit\"]\n ):\n farenheit_averaged.append((celcius_in_farenheit + farenheit) / 2)\n\n # 4) Back to celcius\n celcius_average = [farenheit_to_celcius(elt) for elt in farenheit_averaged]\n\n doc_celcius[\"vt1hourlyForecast\"][\"temperature\"] = celcius_average\n\n return doc_celcius\n\n\ndef find_in_document(location_object, target_local_time, document):\n \"\"\"Finds the forecast for the desired time from the supplied API response\n\n Input:\n location_object\n {\n \"coordinates\": (-33.86, 151.21),\n \"accuweather_key\": 12481,\n \"timezone\": \"Australia/Sydney\",\n \"bom.gov.au\": \"http://www.bom.gov.au/places/nsw/sydney/forecast/detailed/\"\n }\n target_local_time\n '2020-04-11T09:00'\n\n Output:\n {\n \"ok\": True,\n \"time_utc\": \"\",\n \"temperature_celcius\": \"\"\n }\n \"\"\"\n target_time_utc = local_string_to_utc_string(\n time_local=target_local_time,\n timezone=location_object[\"timezone\"],\n format_func=format_standard,\n )\n target_time_formatted = local_string_to_weathercom_string(\n time_local=target_local_time, timezone=location_object[\"timezone\"]\n )\n\n forecasts = document.get(\"vt1hourlyForecast\", {})\n hours_local, temperatures = (\n forecasts.get(\"processTime\"),\n forecasts.get(\"temperature\"),\n )\n if not hours_local:\n raise BadResponse(\n {\"service\": SERVICE_NAME, \"message\": \"vt1hourlyForecast > processTime\"}\n )\n if not temperatures:\n raise BadResponse(\n {\"service\": SERVICE_NAME, \"message\": \"vt1hourlyForecast > temperatures\"}\n )\n if len(hours_local) != len(temperatures):\n raise UnexpectedFormat(\n {\n \"service\": SERVICE_NAME,\n \"message\": \"Different number of hours and temperatures\",\n }\n )\n\n for hour, temperature in zip(hours_local, temperatures):\n if hour == target_time_formatted:\n temperature = round(Decimal(temperature), DECIMAL_PLACES)\n return {\n \"ok\": True,\n \"time_utc\": target_time_utc,\n \"temperature_celcius\": temperature,\n \"forecast_age_hours\": None,\n \"forecast_issue_time\": None,\n }\n else:\n # Exhausted list of forecasts\n latest_time = format_standard(parse_weathercom(hour))\n raise OutOfRange(\n {\n \"service\": SERVICE_NAME,\n \"message\": f\"Could not find a forecast for {target_time_utc}. Latest is {latest_time}.\",\n }\n )\n","sub_path":"pyweather/pyweather/api/weathercom.py","file_name":"weathercom.py","file_ext":"py","file_size_in_byte":5968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"390303228","text":"#=============================================\r\n#Author: Eric Zhou\r\n#Date: 11/6/2017\r\n#Purpose: To create a date class\r\n#Input: Keyboard\r\n#Output: Date\r\n#=============================================\r\n\r\nfrom tkinter import *\r\n\r\n#=========================================================\r\n#Author: Eric Zhou\r\n#Date: 11/6/2017\r\n#Purpose: a date class which holds components of the date\r\n#Input: self, month, day, year\r\n#Output: No output\r\n#=========================================================\r\n\r\nclass Date():\r\n def __init__(self, month=1, day=1, year=2000):\r\n self.month = month\r\n self.day = day\r\n self.year = year\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Gets the date in name/word form\r\n#Input: self\r\n#Output: Returns date\r\n#===============================================================\r\n\r\n def __str__(self):\r\n dayNumber = self.calcZeller()\r\n dayName = self.returnDayName()\r\n monthName = self.returnMonthName()\r\n year = str(self.year)\r\n day = str(self.day)\r\n return (dayName + ', ' + day + ' ' + monthName + ' ' + year)\r\n \r\n#=========================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Returns the month in the name\r\n#Input: self\r\n#Output: Month Name\r\n#=========================================================\r\n \r\n def returnMonthName(self):\r\n \r\n if self.month == 1:\r\n monthName = \"January\"\r\n elif self.month == 2:\r\n monthName = \"February\" \r\n elif self.month == 3:\r\n monthName = \"March\"\r\n elif self.month == 4:\r\n monthName = \"April\" \r\n elif self.month == 5:\r\n monthName = \"May\"\r\n elif self.month == 6:\r\n monthName = \"June\"\r\n elif self.month == 7:\r\n monthName = \"July\"\r\n elif self.month == 8:\r\n monthName = \"August\"\r\n elif self.month == 9:\r\n monthName = \"September\"\r\n elif self.month == 10:\r\n monthName = \"October\"\r\n elif self.month == 11:\r\n monthName = \"November\"\r\n elif self.month == 12:\r\n monthName = \"December\"\r\n\r\n return monthName\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Sees if leap year is true or not with a given date\r\n#Input: self\r\n#Output: Returns leap year == True or False\r\n#===============================================================\r\n\r\n def returnLeapYear(self):\r\n if self.year % 4 == 0 and self.year % 100 != 0 or self.year % 400 == 0:\r\n leapYear = True\r\n else:\r\n leapYear = False\r\n \r\n return leapYear\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Sees the max day of a given month\r\n#Input: self\r\n#Output: Returns the max day of that month\r\n#===============================================================\r\n\r\n def returnMaxDay(self):\r\n month = self.month\r\n leapYear = self.returnLeapYear()\r\n \r\n if month == 9 or month == 6 or month == 4 or month == 11:\r\n maxDay = 30\r\n elif month == 2 and leapYear == True:\r\n maxDay = 29\r\n elif month == 2 and leapYear == False:\r\n maxDay = 28\r\n else:\r\n maxDay = 31\r\n \r\n return maxDay\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Sees what the weekday of a given date is\r\n#Input: self\r\n#Output: Returns the weekday of a given date\r\n#===============================================================\r\n\r\n def calcZeller(self):\r\n month = int(self.month)\r\n year = int(self.year)\r\n day = int(self.day)\r\n \r\n m = month - 2\r\n y = year\r\n if m <=0:\r\n m += 12\r\n y -= 1\r\n p = y // 100\r\n r = y % 100\r\n\r\n return (day + (26*m -2) //10 + r + r // 4 + p // 4 + 5 * p) % 7\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Returns the First Day of that Month given\r\n#Input: self\r\n#Output: Returns the weekday of that month's first day\r\n#===============================================================\r\n\r\n def returnFirstDayMonth(self):\r\n month = int(self.month)\r\n year = int(self.year)\r\n day = 1\r\n \r\n m = month - 2\r\n y = year\r\n if m <=0:\r\n m += 12\r\n y -= 1\r\n p = y // 100\r\n r = y % 100\r\n\r\n return (day + (26*m -2) //10 + r + r // 4 + p // 4 + 5 * p) % 7\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Sees the day name of the given date\r\n#Input: self\r\n#Output: Returns the day name of the given date\r\n#===============================================================\r\n\r\n def returnDayName(self):\r\n day = self.calcZeller()\r\n if day == 0:\r\n dayName = \"Sunday\"\r\n elif day == 1:\r\n dayName = \"Monday\"\r\n elif day == 2:\r\n dayName = \"Tuesday\"\r\n elif day == 3:\r\n dayName = \"Wednesday\"\r\n elif day == 4:\r\n dayName = \"Thursday\"\r\n elif day == 5:\r\n dayName = \"Friday\"\r\n elif day == 6:\r\n dayName = \"Saturday\"\r\n \r\n return dayName\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Display a calender of a date given\r\n#Input: self\r\n#Output: Returns calendar of the given date\r\n#===============================================================\r\n\r\n def displayCalendar(self):\r\n date = self.__str__()\r\n countDays = 1\r\n useDay = self.day\r\n self.day = 1\r\n\r\n print(\"\")\r\n print(\"%27s\" % date)\r\n print(\"\")\r\n print(\"Sun\", end = \"\")\r\n print(\"%5s\" % \"Mon\", end = \"\")\r\n print(\"%5s\" % \"Tue\", end = \"\")\r\n print(\"%5s\" % \"Wed\", end = \"\")\r\n print(\"%5s\" % \"Thu\", end = \"\")\r\n print(\"%5s\" % \"Fri\", end = \"\")\r\n print(\"%5s\" % \"Sat\")\r\n\r\n for count in range(self.calcZeller()):\r\n print(\"%5s\" % \"\", end = \"\")\r\n for count in range(5 - self.calcZeller() + 1):\r\n print(str(countDays) + \"%4s\" % \"\", end = \"\")\r\n countDays += 1\r\n print(str(countDays))\r\n countDays += 1\r\n while (countDays < self.returnMaxDay()):\r\n for count in range(6):\r\n if (countDays < self.returnMaxDay()):\r\n if (countDays < 10):\r\n print(str(countDays) + \"%4s\" % \"\", end = \"\")\r\n countDays += 1\r\n else:\r\n print(str(countDays) + \"%3s\" % \"\", end = \"\")\r\n countDays += 1\r\n print(str(countDays))\r\n countDays += 1\r\n \r\n while (countDays <= self.returnMaxDay()):\r\n print(str(countDays) + \"%3s\" % \"\")\r\n countDays += 1\r\n\r\n print(\"\")\r\n\r\n self.day = useDay \r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Finds the # of days in the year of a given date\r\n#Input: self\r\n#Output: Returns the # of days in the year\r\n#===============================================================\r\n \r\n def dayOfYear(self):\r\n day = self.day\r\n month = self.month\r\n year = self.year\r\n leapYear = self.returnLeapYear()\r\n sMonth = self.month\r\n \r\n totalMonth = 1\r\n totalDays = 0\r\n while (totalMonth != (month)):\r\n self.month = totalMonth\r\n maxDay = self.returnMaxDay()\r\n totalDays += maxDay\r\n totalMonth += 1\r\n\r\n if totalMonth == month:\r\n totalDays += day\r\n self.month += 1\r\n\r\n return totalDays\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Sees if the date given is valid\r\n#Input: self\r\n#Output: Returns valid is True or False\r\n#===============================================================\r\n\r\n def calcValid(self):\r\n valid = True\r\n month = self.month\r\n day = self.day\r\n year = self.year\r\n\r\n \r\n intMonth = int(month) \r\n if not intMonth in range(1,13):\r\n print(\"Your last month input was not in the range 1 - 12\")\r\n valid = False\r\n\r\n if valid == True:\r\n leapYear = self.returnLeapYear()\r\n maxDay = self.returnMaxDay()\r\n intDay = int(day)\r\n\r\n if month ==2 and (intDay not in range(1, maxDay+2)) and leapYear == True:\r\n print(\"Your last day input was not in the range 1 - \" + str(maxDay+1))\r\n valid = False\r\n \r\n if (intDay not in range(1, maxDay+1)) and leapYear == False:\r\n print(\"Your last day input was not in the range 1 - \" + str(maxDay))\r\n valid = False\r\n\r\n if not (year in range(1600, 10000)):\r\n valid = False\r\n print(\"Your last year input was not in range year 1600-9999\")\r\n \r\n return valid\r\n\r\n#===============================================================\r\n#Author: Eric Zhou\r\n#Date: 11/7/2017\r\n#Purpose: Gets date\r\n#Input: self\r\n#Output: Returns date object\r\n#===============================================================\r\n\r\n def getDate(self):\r\n Date = False\r\n validMonth = False\r\n while validMonth == False:\r\n month = input(\"Enter the month (Numeric only): \")\r\n if month.isdigit():\r\n validMonth = True\r\n else:\r\n print(\"Your last input was not a valid positive integer\")\r\n \r\n\r\n validDay = False\r\n while validDay == False:\r\n day = input(\"Enter the day (Numeric only): \")\r\n if day.isdigit():\r\n validDay = True \r\n else:\r\n print(\"Your last input was not a valid positive integer\")\r\n validYear = False\r\n while validYear == False:\r\n year = input(\"Enter the year (Numeric only): \")\r\n if year.isdigit():\r\n validYear = True\r\n \r\n if validDay == True and validMonth == True and validYear == True:\r\n \r\n Date = True\r\n self.day = int(day)\r\n self.month = int(month)\r\n self.year = int(year)\r\n\r\n return Date\r\n \r\n\r\n#-------------- MAIN CODE -------------------------------------------\r\nredo = True\r\nwhile redo == True:\r\n myDate = Date()\r\n myDate.getDate()\r\n checkDate = myDate.calcValid()\r\n if checkDate == True:\r\n strDate = myDate.__str__()\r\n myCalender = myDate.displayCalendar()\r\n numberOfDays = myDate.dayOfYear()\r\n print(\"The total days of the day entered is \" + str(numberOfDays) + \" days!\")\r\n print(\"\")\r\n print(\"Enter anything if you want to run the program!\")\r\n tryAgain = input(\"Type 'N' if you do not want to rerun the program : \")\r\n if tryAgain == \"n\" or tryAgain == \"N\":\r\n redo = False\r\n else:\r\n print(\"Invalid Date! Retry again!\")\r\n tryAgain = input(\"Would you like to try again? Y/N : \")\r\n if tryAgain == \"n\" or tryAgain == \"N\":\r\n redo = False\r\n\r\n\r\n#---------------------------------------------------------------------\r\n\r\n\r\n\r\n \r\n","sub_path":"PY6 - OOP1 - Introduction/Calendar.py","file_name":"Calendar.py","file_ext":"py","file_size_in_byte":11753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"104292259","text":"from confluent_kafka import avro\nfrom confluent_kafka.avro import AvroProducer\nfrom conf.config import KAFKA_CONFIG\n\navroProducer = AvroProducer(dict(KAFKA_CONFIG))\n\nvalue_schema_str = \"\"\"\n{\n \"type\": \"record\",\n \"name\": \"AuthorEvent\",\n \"namespace\": \"com.pratilipi\",\n \"fields\": [\n {\n \"name\": \"event\",\n \"type\": \"string\"\n },\n {\n \"name\": \"authorId\",\n \"type\": \"long\"\n }\n ]\n}\n\"\"\"\n\nkey_schema_str = \"\"\"\n{\n \"name\": \"AuthorEventKey\",\n \"type\": \"string\"\n}\n\"\"\"\n\nvalue_schema = avro.loads(value_schema_str)\nkey_schema = avro.loads(key_schema_str)\n\n\nclass KafkaProducer:\n def __init__(self):\n \"\"\"init\"\"\"\n setattr(self, 'producer', avroProducer)\n setattr(self, 'valueSchema', value_schema)\n setattr(self, 'keySchema', key_schema)\n\n def produce(self, topic, value, key):\n self.producer.produce(topic=topic, value=value, value_schema=self.valueSchema, key=key,\n key_schema=self.keySchema)\n self.producer.flush()\n","sub_path":"src/kafka_producer.py","file_name":"kafka_producer.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"62916817","text":"import json\nfrom json import JSONDecodeError\nfrom urllib.parse import urlencode\nimport multiprocessing\nimport re\nimport requests\nfrom bs4 import BeautifulSoup\nfrom requests import RequestException\nfrom config import *\nimport pymongo\n\nclient = pymongo.MongoClient(MONGO_URL)\ndb = client[MONGO_DB]\n\n\ndef get_index(offset,keyword):\n data = {\n 'offset':offset,\n 'format':'json',\n 'keyword':keyword,\n 'autoload':'true',\n 'count':'20',\n 'ur_tab':3\n }\n url = 'https://www.toutiao.com/search_content/?' + urlencode(data)\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.text\n return None\n except RequestException as e:\n print('访问索引页失败',e)\n return None\n\ndef parse_index(html):\n try:\n r = json.loads(html)\n if r and 'data' in r.keys():\n for item in r.get('data'):\n yield item.get('article_url')\n except JSONDecodeError:\n pass\n\ndef get_url_detail(url):\n try:\n r = requests.get(url)\n if r.status_code == 200:\n return r.text\n return None\n except RequestException as e:\n print('请求详情页失败',e)\n return None\n\ndef parse_url_detail(html,url):\n soup = BeautifulSoup(html,'lxml')\n title = soup.select('title')[0].get_text()\n images_patten = re.compile('var gallery = (.*?);',re.S)\n images = re.search(images_patten,html)\n if images:\n data = json.loads(images.group(1))\n if data and 'sub_images' in data.keys():\n img = [item.get('url') for item in data.get('sub_images')]\n return {\n 'title':title,\n 'img':img,\n 'url':url\n }\ndef save_to_mongodb(data):\n if db[MONGO_TABLE].insert(data):\n print('存储到Mongodb成功',data)\n return True\n return False\n\n\nif __name__ == '__main__':\n for i in range(0,200,20):\n html = get_index(i,'街拍')\n for url in parse_index(html):\n html = get_url_detail(url)\n if html:\n result = parse_url_detail(html,url)\n if result:\n save_to_mongodb(result)\n\n","sub_path":"jiepai/spider_jiepai.py","file_name":"spider_jiepai.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"288446207","text":"import math\n\ndef find_factors(number):\n factors = []\n tmp = number\n \n while (True):\n f = 2\n root = math.ceil(math.sqrt(tmp))\n while (f <= root): # If reached root, tmp is prime. <= instead of < to cover if tmp == 4\n if tmp % f == 0:\n #f is a prime factor of tmp\n factors.append(f)\n tmp = tmp / f\n break\n \n else:\n f += 1\n \n if f >= root: # >= instead of == covers the case when tmp == 1\n factors.append(tmp)\n break\n \n return factors\n\nprint(find_factors(600851475143))\n","sub_path":"problems/003/python/solution003.py","file_name":"solution003.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"53589790","text":"\n# Copyright (c) 2016. Mount Sinai School of Medicine\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nCommandline arguments for loading genomic variants\n\"\"\"\n\nfrom __future__ import print_function, division, absolute_import\n\nfrom pyensembl import genome_for_reference_name\nfrom varcode import Variant, VariantCollection, load_vcf, load_maf\n\ndef add_variant_args(arg_parser):\n variant_arg_group = arg_parser.add_argument_group(\n title=\"Variants\",\n description=\"Genomic variant files\")\n\n variant_arg_group.add_argument(\n \"--vcf\",\n default=[],\n action=\"append\",\n help=\"Genomic variants in VCF format\")\n\n variant_arg_group.add_argument(\n \"--maf\",\n default=[],\n action=\"append\",\n help=\"Genomic variants in TCGA's MAF format\",)\n\n variant_arg_group.add_argument(\n \"--variant\",\n default=[],\n action=\"append\",\n nargs=4,\n metavar=(\"CHR\", \"POS\", \"REF\", \"ALT\"),\n help=\"Individual variant as 4 arguments giving chromsome, position, ref, \"\n \"and alt. Example: chr1 3848 C G. Use '.' to indicate empty alleles for \"\n \"insertions or deletions.\")\n\n variant_arg_group.add_argument(\n \"--reference-name\",\n type=str,\n help=(\n \"What reference assembly your variant coordinates are using. \"\n \"Examples: 'hg19', 'GRCh38', or 'mm9'. \"\n \"This argument is ignored for MAF files, since each row includes \"\n \"the reference. \"\n \"For VCF files, this is used if specified, and otherwise is guessed from \"\n \"the header. For variants specfied on the commandline with --variant, \"\n \"this option is required.\"))\n\n variant_arg_group.add_argument(\n \"--json-variant-files\",\n default=[],\n action=\"append\",\n help=\"Path to Varcode.VariantCollection object serialized as a JSON file.\")\n\n return variant_arg_group\n\n\ndef variant_collection_from_args(args):\n variant_collections = []\n\n if args.reference_name:\n genome = genome_for_reference_name(args.reference_name)\n else:\n # no genome specified, assume it can be inferred from the file(s)\n # we're loading\n genome = None\n\n for vcf_path in args.vcf:\n vcf_variants = load_vcf(vcf_path, genome=genome)\n variant_collections.append(vcf_variants)\n for maf_path in args.maf:\n maf_variants = load_maf(maf_path)\n variant_collections.append(maf_variants)\n\n if args.variant:\n if not genome:\n raise ValueError(\n \"--reference-name must be specified when using --variant\")\n\n variants = [\n Variant(\n chromosome,\n start=position,\n ref=ref,\n alt=alt,\n ensembl=genome)\n for (chromosome, position, ref, alt)\n in args.variant\n ]\n variant_collection = VariantCollection(variants)\n variant_collections.append(variant_collection)\n\n if len(variant_collections) == 0:\n raise ValueError(\n \"No variants loaded (use --maf, --vcf, or --variant options)\")\n\n for json_path in args.json_variant_files:\n with open(json_path, 'r') as f:\n json_string = f.read()\n variant_collections.append(\n VariantCollection.from_json(json_string))\n if len(variant_collections) == 0:\n raise ValueError(\n \"No variants loaded (use --maf, --vcf, --json-variants options)\")\n elif len(variant_collections) == 1:\n return variant_collections[0]\n else:\n combined_variants = []\n for variant_collection in variant_collections:\n combined_variants.extend(list(variant_collection))\n return VariantCollection(combined_variants)\n","sub_path":"topiary/commandline_args/variants.py","file_name":"variants.py","file_ext":"py","file_size_in_byte":4322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"585160691","text":"# Untitled - By: Robocup - Пн янв 27 2020\n\nimport sensor, image, time\n\nclock = time.clock()\n\n\nsensor.reset()\nsensor.set_pixformat(sensor.RGB565)\nsensor.set_framesize(sensor.QVGA)\nsensor.set_auto_exposure(False)\nsensor.set_auto_whitebal(False)\nsensor.skip_frames(time = 2000)\nsensor.set_auto_gain(False, gain_db = 0)\nsensor.set_auto_whitebal(False, (-6.02073, -5.11, 1.002))\nsensor.set_auto_exposure(False, 1800)\n\nprint('be ready')\nsensor.skip_frames(time = 5000)\n\nk = 0\nwhile(True):\n clock.tick()\n img = sensor.snapshot().lens_corr(strength=1.25, zoom = 1.0)\n img.save('calibration/images/kek'+str(k)+'.jpg')\n k +=1\n time.sleep(250)\n print(clock.fps())\n","sub_path":"tools/log_for_cal.py","file_name":"log_for_cal.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"337495526","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom ykdl.extractor import VideoExtractor\nfrom ykdl.videoinfo import VideoInfo\nfrom ykdl.util.html import get_content, add_header, fake_headers, get_location\nfrom ykdl.util.match import match1, matchall\nfrom xml.dom.minidom import parseString\n\n\ndef parse_cid_playurl(xml):\n urls = []\n size = 0\n doc = parseString(xml.encode('utf-8'))\n ext = doc.getElementsByTagName('format')[0].firstChild.nodeValue\n qlt = doc.getElementsByTagName('quality')[0].firstChild.nodeValue\n aqlts = doc.getElementsByTagName('accept_quality')[0].firstChild.nodeValue.split(',')\n for durl in doc.getElementsByTagName('durl'):\n urls.append(durl.getElementsByTagName('url')[0].firstChild.nodeValue)\n size += int(durl.getElementsByTagName('size')[0].firstChild.nodeValue)\n return urls, size, ext, qlt, aqlts\n\nclass BiliBase(VideoExtractor):\n #supported_stream_profile = [u'蓝光', u'超清', u'高清', u'流畅']\n profile_2_type = {u'蓝光': 'BD', u'超清': 'TD', u'高清': 'HD', u'流畅' :'SD'}\n qlt_2_profile = {'80': u'蓝光', '64': u'超清', '48': u'高清', '32': u'高清', '16': u'流畅'}\n\n def prepare(self):\n info = VideoInfo(self.name)\n add_header(\"Referer\", \"https://www.bilibili.com/\")\n info.extra[\"referer\"] = \"https://www.bilibili.com/\"\n info.extra[\"ua\"] = fake_headers['User-Agent']\n\n self.vid, info.title = self.get_vid_title()\n\n assert self.vid, \"can't play this video: {}\".format(self.url)\n\n def get_video_info(qn=0):\n api_url = self.get_api_url(qn)\n html = get_content(api_url)\n self.logger.debug(\"HTML> {}\".format(html))\n code = match1(html, '([^<])')\n if code:\n return\n urls, size, fmt, qlt, aqlts = parse_cid_playurl(html)\n if 'mp4' in fmt:\n ext = 'mp4'\n elif 'flv' in fmt:\n ext = 'flv'\n\n prf = self.qlt_2_profile[qlt]\n st = self.profile_2_type[prf]\n if st not in info.streams:\n info.stream_types.append(st)\n info.streams[st] = {'container': ext, 'video_profile': prf, 'src' : urls, 'size': size}\n\n if qn == 0:\n aqlts.remove(qlt)\n for aqlt in aqlts:\n get_video_info(aqlt)\n\n get_video_info()\n\n assert len(info.stream_types), \"can't play this video!!\"\n return info\n","sub_path":"ykdl/extractors/bilibili/bilibase.py","file_name":"bilibase.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"607192412","text":"import re\r\ndef main():\r\n content=input(\"Enter String:\")\r\n target=input(\"Enter What Needs To Be Removed (or leave blank for whitespace):\")\r\n RegStrip(target,content)\r\ndef RegStrip(target,content):\r\n newTarget=\"\"\r\n if(target==\"\"):\r\n newTarget+=\"\\s*\"\r\n else:\r\n target=re.escape(target)\r\n for i in target:\r\n if(i!=\"\\\\\"):\r\n newTarget=newTarget+i+\"*\"\r\n else:\r\n newTarget+=i\r\n \r\n print(newTarget)\r\n startR=re.compile(rf'^{newTarget}')\r\n endR=re.compile(rf'{newTarget}$')\r\n x=startR.sub(\"\",content)\r\n Result=endR.sub(\"\",x)\r\n print(\"Here is your string:\"+Result)\r\n\r\nmain()","sub_path":"StripRegex.py","file_name":"StripRegex.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124582078","text":"import numpy as np\nimport random\nfrom collections import Counter\nfrom abjad import *\nfrom copy import deepcopy\n\n# return a list of the two most common intervals and then all other possible intervals\ndef getIntervals(scale):\n\tall_intervals = []\n\tfor i in range(len(scale)):\n\t\tfor j in range(len(scale)):\n\t\t\tif (i != j):\n\t\t\t\tall_intervals.append(abs(scale[i] - scale[j]))\n\tcntr = Counter(all_intervals)\n\n\tmain_intervals = [cntr.most_common(2)[0][0], cntr.most_common(2)[1][0]]\n\talt_intervals = [interval for interval in all_intervals if interval not in main_intervals]\n\n\treturn main_intervals, alt_intervals\n\n# intervals\n# SCALE = [0,2,4,7,9]\n# MAIN_INTERVALS, EXTRA_INTERVALS = getIntervals(SCALE)\nMAIN_INTERVALS = [4,5]\nEXTRA_INTERVALS = [3,5,6,6,7,7,8,9]\n\n# constants\nRANGE = {\"bottom\": 6, \"top\": 15}\nRANGE_UPDATE = 1\nNUM_NOTES_RANGE_UPDATE = 10\nNUM_REPEATED_PHRASES = 5\nREPEATED_PHRASE_LENGTHS = [3,5,7,9]\nSTARTING_NOTE = 12\nNUM_NOTES = 8 * 48\nTEMPO = 190\nP_BASELINE = {\"up_p\": .5, \"half_p\": .5, \"intervalic_p\": .3, \"rest_p\": .000, \"rep_p\": 0}\nP_UPDATES = {\"up_p\": .4, \"half_p\": .4, \"intervalic_p\": .15, \"rest_p\": .000, \"rep_p\": 0}\n\n# varyings\nup_p = P_BASELINE[\"up_p\"]\nhalf_p = P_BASELINE[\"half_p\"]\nintervalic_p = P_BASELINE[\"intervalic_p\"]\nrest_p = P_BASELINE[\"rest_p\"]\nrep_p = P_BASELINE[\"rep_p\"]\ncounter = 0\ngo_up = True\n\n# initials\ndirection = 1 if random.random() < up_p else -1\nstep_size = MAIN_INTERVALS[0] if random.random() < half_p else MAIN_INTERVALS[1]\n\n# abjad initialization\ninstruments = set(['Flute'])\nparts = {instrument: Staff([], name=instrument) for instrument in instruments}\ncurrent_note = STARTING_NOTE\n\n# generate initial set of notes\ncontainer = []\nfor i in range(NUM_NOTES):\n\tcurrent_note = current_note + step_size * direction\n\tcontainer.append(Note(current_note, 1/8.))\n\n\t# update probabilities and varyings: keep within range\n\tup_p = up_p - P_UPDATES[\"up_p\"] if direction == 1 else up_p + P_UPDATES[\"up_p\"]\n\tif (step_size == MAIN_INTERVALS[0]):\n\t\thalf_p = half_p - P_UPDATES[\"half_p\"]\n\telif (step_size == MAIN_INTERVALS[1]):\n\t\thalf_p = half_p + P_UPDATES[\"half_p\"]\n\tintervalic_p = intervalic_p + P_UPDATES[\"intervalic_p\"]\n\trest_p = rest_p + P_UPDATES[\"rest_p\"]\n\n\t# direction\n\tif (current_note <= RANGE[\"bottom\"]):\n\t\tup_p = 1\n\telif (current_note >= RANGE[\"top\"]):\n\t\tup_p = 0\n\tdirection = 1 if random.random() < up_p else -1\t\n\n\t# main step\n\tstep_size = MAIN_INTERVALS[0] if random.random() < half_p else MAIN_INTERVALS[1]\n\n\t# extra step\n\tif (random.random() < intervalic_p):\n\t\tstep_size = random.choice(EXTRA_INTERVALS)\n\t\tintervalic_p = P_BASELINE[\"intervalic_p\"]\n\n\t# rest\n\tif (random.random() < rest_p):\n\t\tcontainer.append(Rest('r8'))\n\t\trest_p = P_BASELINE[\"rest_p\"]\n\n\t# range\n\tif i % NUM_NOTES_RANGE_UPDATE == 0:\n\t\tif go_up:\n\t\t\tRANGE[\"bottom\"] = RANGE[\"bottom\"] + RANGE_UPDATE\n\t\t\tRANGE[\"top\"] = RANGE[\"top\"] + RANGE_UPDATE\n\t\t\tcounter = counter + 1\n\t\telse:\n\t\t\tRANGE[\"bottom\"] = RANGE[\"bottom\"] - RANGE_UPDATE\n\t\t\tRANGE[\"top\"] = RANGE[\"top\"] - RANGE_UPDATE\t\n\t\t\tcounter = counter - 1\n\t\t# if random.random() <= .5:\n\t\t# \tgo_up = True\n\t\t# else:\n\t\t# \tgo_up = False\n\t\tif counter % 10 == 0:\n\t\t\tgo_up = True\n\t\telif counter % 10 == 5:\n\t\t\tgo_up = False\n\n# repeat phrases (constant probability)\nrepeated_container = deepcopy(container)\nfor j in range(len(container) - 2*(max(REPEATED_PHRASE_LENGTHS)-1) - 1):\n\tif random.random() <= rep_p:\n\t\tphrase_length = random.choice(REPEATED_PHRASE_LENGTHS)\n\t\tfor k in range(phrase_length):\n\t\t\trepeated_container[j + k + phrase_length] = container[j + k]\n\n# make/show score\nparts['Flute'].extend(repeated_container)\nscore = Score([parts[instrument] for instrument in parts], name=\"poopy\")\nattach(MetronomeMark((1,4), TEMPO), parts['Flute'][0])\nshow(score)\n\n# # out to Sibelius?\n# should_show = raw_input(\"Out to sibelius? (y/n)\")\n# if should_show == 'y':\n# \ttopleveltools.play(score)\n","sub_path":"trash/scripts/gen_alt.py","file_name":"gen_alt.py","file_ext":"py","file_size_in_byte":3840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"258925426","text":"\"\"\"\nMath 590\nProject 1 - Sorting Algorithms\nFall 2018\n\nPartner 1: Yunhe Wang (yw314)\nPartner 2: Haili Wu (hw210)\nDate: 2018/10/28\n\"\"\"\n\n# Import time, random, plotting, stats, and numpy.\nimport time\nimport random\nimport matplotlib.pyplot as plt\nimport scipy.stats as stats\nimport numpy\n\n\"\"\"\nSelectionSort\n\"\"\"\n# SelectionSort(A):\n# The function iteratively search the unsorted components of the array for the \n# minimum element, and switch it with the first element of that unsorted\n# components and make it as \"sorted components\". Finally, as there is no elements\n# left in the unsorted components, the sorting is end and the original array\n# is sorted. \n# Input: A(a potentially unsorted array)\n# Output: A(a sorted array)\n\ndef SelectionSort(A):\n # NeedSwap is a variable to check if there is a necessity of swapping elements\n NeedSwap = 0\n # last element of array cannot compare with the elements after that,\n # so range(len(A)-1)\n for i in range(len(A)-1):\n # record the first index of \"unsorted array\"\n MinIdx = i\n for j in range(i+1, len(A)):\n if A[j] < A[MinIdx]:\n # the index of minimum element is identified\n MinIdx = j\n NeedSwap+=1\n # NeedSwap = 0 indicates that no need of swapping\n if NeedSwap:\n A[i], A[MinIdx] = A[MinIdx], A[i]\n NeedSwap = 0\n return A\n\n\"\"\"\nInsertionSort\n\"\"\"\n# InsertionSort(A):\n# The function stores the index of the array, starting at the front (k = 0), and\n# iteratively insert the element at k + 1 into the previous components. The \n# element go back the array one by one until it meets the element smaller than \n# it, or reach the beginning of the array which make the previous components to \n# be \"sorted components\".\n# Input: A(a potentially unsorted array)\n# Output: A(a sorted array)\n\ndef InsertionSort(A):\n for i in range(1, len(A)):\n # starting index of judgement of insertion\n j = i - 1\n while j >= 0:\n # use j+1 as index to make it able to continuously swap the elements\n if A[j] > A[j+1]:\n A[j], A[j+1] = A[j+1], A[j]\n else:\n # if swap is not necessary, just go to next step\n break\n j-=1 \n return A\n\n\"\"\"\nBubbleSort:\n\"\"\"\n# BubbleSort(A):\n# The function iterate through the array and compare every two adjacent elements,\n# and if they are out of order, swap them. Repeat swapping and when no more swap \n# is needed, the array is sorted.\n# Input: A(a potentially unsorted array)\n# Output: A(a sorted array)\n\ndef BubbleSort(A):\n # flag is a variable to judge if the program can get out of the loop\n flag = 0\n swapped = 0\n while not flag:\n # start check elements from A[0] to A[len(A)-1] \n for i in range(len(A)-1):\n #compare consective 2 elements and swap them if left one < right one\n if A[i+1] 1:\n # divided the array to the half: left part and right part\n mid = len(A)//2\n left = A[:mid]\n right = A[mid:]\n # recursive call\n left = MergeSort(left)\n right = MergeSort(right)\n # recursively merge left part and right part\n # Attention: Need copy operation to make the original array A get changed\n A[0:len(A)] = Merge(left, right)[0:len(A)]\n return A\n\n\"\"\"\nQuickSort\n\"\"\"\n# QuickSort(A,i,j)\n# The function performs the quick sort operation based on the call of helper(A,i,j)\n# function.It chooses the element on the right end as the pivot then divides current\n# array by the pivot.Then call the function recursively until to trigger the base case\n# of the helper function.\n# Input: A(a potentially unsorted array), \n# i(start index of current array(inclusive)), \n# j(end index of current array(inclusive)) \n# Output: A(a sorted array)\n# Sort a list A with the call QuickSort(A, 0, len(A)).\n\ndef helper(A,i,j):\n # base case: if the length of current array is 1,not need to sort \n if j - i < 1:\n return A\n # choose the right end element of current array as the pivot\n pivot = A[j] \n # use two pointers ,use right to traverse all element in current array\n left = i \n # use left to track boundary of sorted elements\n right = j - 1 \n while left < right:\n if A[right] >= pivot:\n right-=1\n else:\n # put all elements smaller than pivot to the left side of current array\n A[left] ,A[right] = A[right], A[left] \n left+=1\n if A[left] < pivot:\n left+=1\n A[j],A[left] = A[left], A[j]\n # divide current array by the pivot\n helper(A,i,left -1) \n # conquer two children array by recurively calling helper function\n helper(A,left + 1,j) \n return A\n\ndef QuickSort(A,i,j):\n return helper(A,i,j - 1)\n\n\"\"\"\nisSorted\n\nThis function will take in an original unsorted list and a sorted version of\nthat same list, and return whether the list has been properly sorted.\n\nNote that this function does not change the unsorted list.\n\nINPUTS\nunA: the original unsorted list\nsA: the supposedly sorted list\n\nOUTPUTS\nreturns true or false\n\"\"\"\ndef isSorted(unA, sA):\n # Copy the unsorted list.\n temp = unA.copy()\n \n # Use python's sort.\n temp.sort()\n\n # Check equality.\n return temp == sA\n\n\"\"\"\ntestingSuite\n\nThis function will run a number of tests using the input algorithm, check if\nthe sorting was successful, and print which tests failed (if any).\n\nThis is not an exhaustive list of tests by any means, but covers the edge\ncases for your sorting algorithms.\n\nINPUTS\nalg: a string indicating which alg to test, the options are:\n 'SelectionSort'\n 'InsertionSort'\n 'BubbleSort'\n 'MergeSort'\n 'QuickSort'\n\nOUTPUTS\nPrinted statements indicating which tests passed/failed.\n\"\"\"\ndef testingSuite(alg):\n # First, we seed the random number generator to ensure reproducibility.\n random.seed(1)\n\n # List of possible algs.\n algs = ['SelectionSort', 'InsertionSort', \\\n 'BubbleSort', 'MergeSort', 'QuickSort']\n\n # Make sure the input is a proper alg to consider.\n if not alg in algs:\n raise Exception('Not an allowed algorithm. Value was: {}'.format(alg))\n \n # Create a list to store all the tests.\n tests = []\n\n # Create a list to store the test names.\n message = []\n\n # Test 1: singleton array\n tests.append([1])\n message.append('singleton array')\n\n # Test 2: repeated elements\n tests.append([1,2,3,4,5,5,4,3,2,1])\n message.append('repeated elements')\n\n # Test 3: all repeated elements\n tests.append([2,2,2,2,2,2,2,2,2,2])\n message.append('all repeated elements')\n\n # Test 4: descending order\n tests.append([10,9,8,7,6,5,4,3,2,1])\n message.append('descending order')\n\n # Test 5: sorted input\n tests.append([1,2,3,4,5,6,7,8,9,10])\n message.append('sorted input')\n\n # Test 6: negative inputs\n tests.append([-1,-2,-3,-4,-5,-5,-4,-3,-2,-1])\n message.append('negative inputs')\n\n # Test 7: mixed positive/negative\n tests.append([1,2,3,4,5,-1,-2,-3,-4,-5,0])\n message.append('mixed positive/negative')\n\n # Test 8: array of size 2^k - 1\n temp = list(range(0,2**6-1))\n random.shuffle(temp)\n tests.append(temp)\n message.append('array of size 2^k - 1')\n\n # Test 9: random real numbers\n tests.append([random.random() for x in range(0,2**6-1)])\n message.append('random real numbers')\n\n # Store total number of passed tests.\n passed = 0\n\n # Loop over the tests.\n for tInd in range(0,len(tests)):\n # Copy the test for sorting.\n temp = tests[tInd].copy()\n\n # Try to sort, but allow for errors.\n try:\n # Do the sort.\n eval('%s(tests[tInd])' % alg) if alg != 'QuickSort' \\\n else eval('%s(tests[tInd],0,len(tests[tInd]))' % alg)\n \n # Check if the test succeeded.\n if isSorted(temp, tests[tInd]):\n print('Test %d Success: %s' % (tInd+1, message[tInd]))\n passed += 1\n else:\n print('Test %d FAILED: %s' % (tInd+1, message[tInd]))\n\n # Catch any errors.\n except Exception as e:\n print('')\n print('DANGER!')\n print('Test %d threw an error: %s' % (tInd+1, message[tInd]))\n print('Error: ')\n print(e)\n print('')\n\n # Done testing, print and return.\n print('')\n print('%d/9 Tests Passed' % passed)\n return\n\n\"\"\"\nmeasureTime\n\nThis function will generate lists of varying lengths and sort them using your\nimplemented fuctions. It will time these sorting operations, and store the\naverage time across 30 trials of a particular size n. It will then create plots\nof runtime vs n. It will also output the slope of the log-log plots generated\nfor several of the sorting algorithms.\n\nINPUTS\nsortedFlag: set to True to test with only pre-sorted inputs\n (default = False)\nnumTrials: the number of trials to average timing data across\n (default = 30)\n\nOUTPUTS\nA number of genereated runtime vs n plot, a log-log plot for several\nalgorithms, and printed statistics about the slope of the log-log plots.\n\"\"\"\ndef measureTime(sortedFlag = False, numTrials = 30):\n # Print whether we are using sorted inputs.\n if sortedFlag:\n print('Timing algorithms using only sorted data.')\n else:\n print('Timing algorithms using random data.')\n print('')\n print('Averaging over %d Trials' % numTrials)\n print('')\n \n # First, we seed the random number generator to ensure consistency.\n random.seed(1)\n\n # We now define the range of n values to consider.\n if sortedFlag:\n # Need to look at larger n to get a good sense of runtime.\n # Look at n from 20 to 980.\n # Note that 1000 causes issues with recursion depth...\n N = list(range(1,50))\n N = [20*x for x in N]\n else:\n # Look at n from 10 to 500.\n N = list(range(1,51))\n N = [10*x for x in N]\n\n # Store the different algs to consider.\n algs = ['SelectionSort', 'InsertionSort', \\\n 'BubbleSort', 'MergeSort', \\\n 'QuickSort', 'list.sort']\n\n # Preallocate space to store the runtimes.\n tSelectionSort = N.copy()\n tInsertionSort = N.copy()\n tBubbleSort = N.copy()\n tMergeSort = N.copy()\n tQuickSort = N.copy()\n tPython = N.copy()\n\n # Create some flags for whether each sorting alg works.\n correctFlag = [True, True, True, True, True, True]\n\n # Loop over the different sizes.\n for nInd in range(0,len(N)):\n # Get the current value of n to consider.\n n = N[nInd]\n \n # Reset the running sum of the runtimes.\n timing = [0,0,0,0,0,0]\n \n # Loop over the 30 tests.\n for test in range(1,numTrials+1):\n # Create the random list of size n to sort.\n A = list(range(0,n))\n A = [random.random() for x in A]\n\n if sortedFlag:\n # Pre-sort the list.\n A.sort()\n\n # Loop over the algs.\n for aI in range(0,len(algs)):\n # Grab the name of the alg.\n alg = algs[aI]\n\n # Copy the original list for sorting.\n B = A.copy()\n \n # Time the sort.\n t = time.time()\n eval('%s(B)' % alg) if aI!=4 else eval('%s(B,0,len(B))' % alg)\n t = time.time() - t\n\n # Ensure that your function sorted the list.\n if not isSorted(A,B):\n correctFlag[aI] = False\n\n # Add the time to our running sum.\n timing[aI] += t\n\n # Now that we have completed the numTrials tests, average the times.\n timing = [x/numTrials for x in timing]\n\n # Store the times for this value of n.\n tSelectionSort[nInd] = timing[0]\n tInsertionSort[nInd] = timing[1]\n tBubbleSort[nInd] = timing[2]\n tMergeSort[nInd] = timing[3]\n tQuickSort[nInd] = timing[4]\n tPython[nInd] = timing[5]\n\n # If there was an error in one of the plotting algs, report it.\n for aI in range(0,len(algs)-1):\n if not correctFlag[aI]:\n print('%s not implemented properly!!!' % algs[aI])\n print('')\n\n # Now plot the timing data.\n for aI in range(0,len(algs)):\n # Get the alg.\n alg = algs[aI] if aI != 5 else 'Python'\n\n # Plot.\n plt.figure()\n eval('plt.plot(N,t%s)' % alg)\n plt.title('%s runtime versus n' % alg)\n plt.xlabel('Input Size n')\n plt.ylabel('Runtime (s)')\n if sortedFlag:\n plt.savefig('%s_sorted.png' % alg, bbox_inches='tight')\n else:\n plt.savefig('%s.png' % alg, bbox_inches='tight')\n\n # Plot them all together.\n plt.figure()\n fig, ax = plt.subplots()\n ax.plot(N,tSelectionSort, label='Selection')\n ax.plot(N,tInsertionSort, label='Insertion')\n ax.plot(N,tBubbleSort, label='Bubble')\n ax.plot(N,tMergeSort, label='Merge')\n ax.plot(N,tQuickSort, label='Quick')\n ax.plot(N,tPython, label='Python')\n legend = ax.legend(loc='upper left')\n plt.title('All sorting runtimes versus n')\n plt.xlabel('Input Size n')\n plt.ylabel('Runtime (s)')\n if sortedFlag:\n plt.savefig('sorting_sorted.png', bbox_inches='tight')\n else:\n plt.savefig('sorting.png', bbox_inches='tight')\n\n # Now look at the log of the sort times.\n logN = [(numpy.log(x) if x>0 else -6) for x in N]\n logSS = [(numpy.log(x) if x>0 else -6) for x in tSelectionSort]\n logIS = [(numpy.log(x) if x>0 else -6) for x in tInsertionSort]\n logBS = [(numpy.log(x) if x>0 else -6) for x in tBubbleSort]\n logMS = [(numpy.log(x) if x>0 else -6) for x in tMergeSort]\n logQS = [(numpy.log(x) if x>0 else -6) for x in tQuickSort]\n\n # Linear regression.\n mSS, _, _, _, _ = stats.linregress(logN,logSS)\n mIS, _, _, _, _ = stats.linregress(logN,logIS)\n mBS, _, _, _, _ = stats.linregress(logN,logBS)\n\n # Plot log-log figure.\n plt.figure()\n fig, ax = plt.subplots()\n ax.plot(logN,logSS, label='Selection')\n ax.plot(logN,logIS, label='Insertion')\n ax.plot(logN,logBS, label='Bubble')\n legend = ax.legend(loc='upper left')\n plt.title('Log-Log plot of runtimes versus n')\n plt.xlabel('log(n)')\n plt.ylabel('log(runtime)')\n if sortedFlag:\n plt.savefig('log_sorted.png', bbox_inches='tight')\n else:\n plt.savefig('log.png', bbox_inches='tight')\n\n # Print the regression info.\n print('Selection Sort log-log Slope (all n): %f' % mSS)\n print('Insertion Sort log-log Slope (all n): %f' % mIS)\n print('Bubble Sort log-log Slope (all n): %f' % mBS)\n print('')\n\n # Now strip off all n<200...\n logN = logN[19:]\n logSS = logSS[19:]\n logIS = logIS[19:]\n logBS = logBS[19:]\n logMS = logMS[19:]\n logQS = logQS[19:]\n\n # Linear regression.\n mSS, _, _, _, _ = stats.linregress(logN,logSS)\n mIS, _, _, _, _ = stats.linregress(logN,logIS)\n mBS, _, _, _, _ = stats.linregress(logN,logBS)\n mMS, _, _, _, _ = stats.linregress(logN,logMS)\n mQS, _, _, _, _ = stats.linregress(logN,logQS)\n\n # Print the regression info.\n print('Selection Sort log-log Slope (n>%d): %f' \\\n % (400 if sortedFlag else 200, mSS))\n print('Insertion Sort log-log Slope (n>%d): %f' \\\n % (400 if sortedFlag else 200, mIS))\n print('Bubble Sort log-log Slope (n>%d): %f' \\\n % (400 if sortedFlag else 200, mBS))\n print('Merge Sort log-log Slope (n>%d): %f' \\\n % (400 if sortedFlag else 200, mMS))\n print('Quick Sort log-log Slope (n>%d): %f' \\\n % (400 if sortedFlag else 200, mQS))\n\n # Close all figures.\n plt.close('all')\n","sub_path":"ECE 590/project1/project1-FINAL.py","file_name":"project1-FINAL.py","file_ext":"py","file_size_in_byte":17762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"206701689","text":"#!/usr/bin/env python\n# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4\n\nimport os\nimport sys\nimport argparse\nimport time\n\nclass Controller:\n\n def __init__(self):\n self.demultiplexed_location = \"./processed_SE/\"\n self.bwa_mem_location = \"./BWA_mem_SE_sams/\"\n self.ref_map_location = \"./genotypes/catalog/\"\n self.denovo_map_location = \"./genotypes/noref/\"\n self.genotype_location = \"./genotypes/\"\n \n def get_raw_data(self, args):\n returning = []\n fi = open(args.raw_data, 'r')\n for data in fi.readlines():\n returning.append(data.strip())\n return returning\n\n def get_index_files(self, args):\n returning = []\n fi = open(args.index_file, 'r')\n for data in fi.readlines():\n returning.append(data.strip())\n return returning\n\n def get_string_of_files(self, prefix, file_type):\n all_files = \"\"\n all_fqgz_files = os.listdir(prefix[3:])\n for one_file in all_fqgz_files:\n num = 0 - len(file_type)\n if one_file[num:] == file_type:\n all_files += prefix + one_file + \" \"\n return all_files\n\n ## Demultiplex raw data\n def demultiplex_data(self, args):\n if args.single_end_data:\n ## Demultiplex SINGLE-END samples. This will result in one .fastq.gz file for each individual in library.\n if args.raw_data[-4:] == '.txt':\n # many .fq samples\n index = self.get_index_files(args)\n data = self.get_raw_data(args)\n self.create_dir(self.demultiplexed_location)\n self.create_dir(self.demultiplexed_location + args.species_name)\n for i, single_data in enumerate(data):\n command = \"process_radtags -f \" + single_data + \" -i gzfastq -o \" + self.demultiplexed_location + args.species_name + \"/ -b \" + index[i] + \" -e nlaIII -r -c -q\"\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n else: \n # one .fq sample\n command = \"process_radtags -f \" + args.raw_data + \" -i gzfastq -o \" + self.demultiplexed_location + args.species_name + \"/ -b \" + args.index_file + \" -e nlaIII -r -c -q\"\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n \n elif args.paired_end_data:\n ## Demultiplex PAIRED-END samples. This will result in two .fastq.gz (R1 and R2) files for each individual in library.\n if args.raw_data[-4:] == '.txt':\n # many .fq samples\n data = self.get_raw_data(args)\n index = self.get_index_files(args)\n self.demultiplexed_location = \"./processed_PE/\"\n self.create_dir(self.demultiplexed_location)\n self.create_dir(self.demultiplexed_location + args.species_name)\n # use paired sets of data in command\n for i in range(int(len(data)/2)): \n command = \"process_radtags -P -1 \" + data[i*2] + \" -2 \" + data[i*2+1] + \" -o \" + self.demultiplexed_location + args.species_name + \" -b \" + index[i] + \" --renz_1 nlaIII --renz_2 mluCI -c -q -r --inline_null\"\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n\n else:\n print(\"you must choose single-ended(-s) or paired-ended (-p)\")\n sys.exit()\n\n ## Index (if needed) reference assembly to prepare for mapping reads.\n def index_reference_genome(self, args):\n \"\"\"add check to see if indexing is needed\"\"\"#TODO\n p = args.reference_genome.split(\"/\")[-1]\n p = p[:-3]\n command = \"bwa index -p \" + p + \" -a is \" + args.reference_genome\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n\n ## Map reads to reference assembly.\n def map_reads_to_reference(self, args):\n p = args.reference_genome.split(\"/\")[-1]\n p = p[:-3]\n all_fqgz_files = os.listdir(self.demultiplexed_location + args.species_name + \"/\")\n self.create_dir(self.bwa_mem_location)\n for one_file in all_fqgz_files:\n if one_file[-6:] == \".fq.gz\":\n command = \"bwa mem -t 32 \" + p + \" \" + self.demultiplexed_location + args.species_name + \"/\" + one_file + \" > \" + self.bwa_mem_location + one_file[:-6] + \".sam\"\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n\n ## Make stacks and generate SNP catalogs\n def generate_loci_catalog(self, args):\n all_files = self.get_string_of_files(\"-s \" + self.bwa_mem_location, \".sam\")\n self.create_dir(self.genotype_location)\n self.create_dir(self.ref_map_location)\n command = \"ref_map.pl -S \" + all_files + \" -o \" + self.ref_map_location + \" -m 5 -b \" + str(args.batch_id if args.batch_id else 1) + \" -T 32 -n 1 -O \" + args.population_map\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command) \n\n ## Make stacks and map denovo\n def map_reads_and_generate_loci_catalog(self, args):\n all_files = self.get_string_of_files(\"-s \" + self.demultiplexed_location + args.species_name + \"/\", \".fq.gz\")\n self.create_dir(self.genotype_location)\n self.create_dir(self.denovo_map_location)\n self.create_dir(self.denovo_map_location + \"log/\")\n command = \"denovo_map.pl \" + all_files + \" -o \" + self.denovo_map_location + \" -m 3 -M 3 -n 3 -t -T 32 -S -b \" + str(args.batch_id if args.batch_id else 1) + \" -O \" + args.population_map + \" &>>log/denovo_map1.log\"\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n\n ## Call SNPs\n def call_SNPs(self, args):\n command = \"populations -b \" + str(args.batch_id if args.batch_id else 1) + \" -M \" + args.population_map + \" -P \" + self.ref_map_location + \" -m 10 -p \" + str(int(args.num_populations)-1) + \" -r 0.5 -e nlaIII -t 32 -k -f p_value --vcf --plink --genepop --genomic --ordered_export --write_single_snp > population_log.txt\"\n print(\"\\n\\nPySTACKS: \" + command + \"\\n\")\n os.system(command)\n\n ## Create output directory\n def create_dir(self, dir_name):\n if not os.path.exists(dir_name):\n os.system('mkdir ' + dir_name)\n\n ## Main execution\n def execute(self, args):\n\n # start pipeline\n self.demultiplex_data(args)\n if args.reference_genome:\n self.index_reference_genome(args)\n self.map_reads_to_reference(args)\n self.generate_loci_catalog(args)\n else:\n self.map_reads_and_generate_loci_catalog(args)\n self.call_SNPs(args)\n","sub_path":"src/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":6712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"229603563","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#Mohamed Abedelmalik ma3495\n# Assignment 6 Part 1\n# ENGI E1006\n#\n#nn_tester.py\n#This module is in charge of formatting breast cancer data,generating synthetic\n#data and testing the classifier's accuracy on both datasets.\n#############################################################################\n\n\nimport nn\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef main():\n '''\n Creates synthetic data then runs the nvalidator on the synthetic and breast\n cancer data.\n '''\n \n #Importing and modifying cancer dataset. \n file = open(\"wdbc.data.txt\", 'r')\n A = np.fromfile(file, sep=\" \")\n A.shape = (569,32)\n Bdata = np.delete(A,0,1)\n training = Bdata\n \n #Create Synthetic dataset\n m1 = [2.5,3.5]\n m2 = [.5,1]\n \n #Covariance matrices\n cov1 = [[1,1],[1,4.5]]\n cov2 = [[2,0],[0,1]]\n \n #Made 300 points\n x1 = np.random.multivariate_normal(m1, cov1, 300)\n x2 = np.random.multivariate_normal(m2, cov2, 300)\n \n \n #plt.plot(x1[:,0],x1[:,1],\"or\")\n #plt.plot(x2[:,0],x2[:,1],\"ob\")\n \n #Make synthetic labels\n labels = np.ones(300)\n labels.shape = (300 , 1)\n \n labels2 = 2 * np.ones(300)\n labels2.shape = (300 , 1)\n \n #Add synthetic labels to data.\n x1_label = np.hstack((labels , x1))\n x2_label = np.hstack((labels2 , x2))\n \n #Combines all of the data together.\n data = np.vstack((x1_label, x2_label))\n np.random.shuffle(data)\n \n #checks the accuracy of the classifier on the syntheic data\n print(nn.n_validator(data, 9 , nn.KNNclassifier, 3,\n distType = \"sqeuclidean\"))\n \n #checks the accuracy of the classifier on the breast cancer data\n print(nn.n_validator(training, 5 , nn.KNNclassifier, 3,\n distType = \"cityblock\"))\n \n #bestK(data, [\"euclidean\", \"cityblock\", \"sqeuclidean\"],15)\n #bestK(training, [\"euclidean\", \"cityblock\", \"sqeuclidean\"],15)\n\ndef bestK(data, distanceTypes,highestK):\n \"\"\"Finds the best K to be used for each distance type.\"\"\"\n \n \n for dT in distanceTypes:\n for i in range(highestK+1)[1::2]:\n resultSet = np.zeros(20)\n for j in range(20):\n resultSet[j] = nn.n_validator(data, 10 , nn.KNNclassifier,\n i, distType = dT)\n print(i,\" \", dT,\" \", np.average(resultSet))\n\n\nmain()\n \n \n ","sub_path":"Case Study 3/nn_tester.py","file_name":"nn_tester.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"627643353","text":"import option\r\nimport os, time, shutil\r\nimport utils_map, size_ap_v2\r\n\r\n\r\nstart = time.time()\r\nopt = option.options\r\n\r\ngt_json_path = opt.gt_json_path\r\ndr_json_path = opt.dr_json_path\r\n\r\n# if there are no classes to ignore then replace None by empty list\r\nif opt.ignore is None:\r\n opt.ignore = []\r\n\r\n# make sure that the cwd() is the location of the python script (so that every path makes sense)\r\nos.chdir(os.path.dirname(os.path.abspath(__file__)))\r\n\r\nresults_files_path = \"results\"\r\nif not os.path.exists(results_files_path):\r\n os.makedirs(results_files_path)\r\nresult_file_path = results_files_path + \"/results\" + \".txt\"\r\n\r\nplot_result_path = \"plot_figures\"\r\nif opt.draw_plot:\r\n if opt.plot_save:\r\n if os.path.exists(plot_result_path):\r\n shutil.rmtree(plot_result_path)\r\n os.makedirs(plot_result_path)\r\n else:\r\n os.makedirs(plot_result_path)\r\n\r\nclass_dict = utils_map.make_gt_list(gt_json_path)\r\ngt_counter_per_class, counter_images_per_class, gt_counter_per_size, counter_images_per_size, gt \\\r\n = utils_map.get_gt_match(gt_json_path, class_dict)\r\n\r\n\r\ngt_classes = list(class_dict.values())\r\n# sort classes alphabetically\r\n\r\ngt_classes = sorted(gt_classes)\r\nn_classes = len(gt_classes)\r\n\r\nif opt.set_class_iou is not None:\r\n utils_map.check_format_class_iou(opt, gt_classes)\r\ndet_counter_per_classes, dr = utils_map.dr_json(dr_json_path, class_dict)\r\ndr_classes = list(det_counter_per_classes.keys())\r\ndr_sizes = [\"small\", \"medium\", \"large\"]\r\n\r\n\r\ncount_true_positives = \\\r\n utils_map.calculate_ap(result_file_path, plot_result_path, gt_classes, opt, gt_counter_per_class, dr, gt)\r\n\r\nsize_count_true_positives = \\\r\n size_ap_v2.calculate_ap(gt_classes, opt, dr, gt)\r\n\r\nwith open(result_file_path, 'a') as results_file:\r\n\r\n '''ap for classes'''\r\n results_file.write(\"\\n# Number of gt objects per class\\n\")\r\n for class_name in sorted(gt_counter_per_class):\r\n results_file.write(class_name + \": \" + str(gt_counter_per_class[class_name]) + \"\\n\")\r\n\r\n for class_name in dr_classes:\r\n if class_name not in gt_classes:\r\n count_true_positives[class_name] = 0\r\n\r\n results_file.write(\"\\n# Number of detected objects per class\\n\")\r\n for class_name in sorted(gt_classes):\r\n try: n_det = det_counter_per_classes[class_name]\r\n except: n_det = 0 # If there is no gt class in dt, n_dt = 0\r\n text = class_name + \": \" + str(n_det)\r\n text += \" (tp:\" + str(count_true_positives[class_name]) + \"\"\r\n text += \", fp:\" + str(n_det - count_true_positives[class_name]) + \")\\n\"\r\n results_file.write(text)\r\n\r\n '''ground truth & detection number for sizes'''\r\n results_file.write(\"\\n# Number of gt objects per size\\n\")\r\n for class_name in gt_counter_per_size:\r\n results_file.write(class_name + \": \" + str(gt_counter_per_size[class_name]) + \"\\n\")\r\n\r\n results_file.write(\"\\n# Number of detected objects per size\\n\")\r\n for class_name in dr_sizes:\r\n text = class_name + \": \" + str(size_count_true_positives[class_name]) + \"\\n\"\r\n results_file.write(text)\r\n\r\nutils_map.print_configuration(result_file_path, opt)\r\n\r\nfinish = time.time()\r\nprint(\"time: \", finish - start)\r\n\r\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"611073975","text":"#! /usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n @Author : hucheng-self\n @Time : 2018/8/4 17:08\n\"\"\"\n\nclass Response:\n def __init__(self,status=True,message='',model_view=None):\n self.status = status\n self.message = message\n self.model_view = model_view\n\n","sub_path":"Service/caimenu/response.py","file_name":"response.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653116986","text":"\nfrom django.db import models\nfrom django.contrib.auth.models import User\n\n# Create your models here.\nclass Category(models.Model):\n nombre = models.CharField(max_length=200, verbose_name=\"Nombre \")\n created = models.DateTimeField(auto_now_add=True, verbose_name=\"Creado\")\n updated = models.DateTimeField(auto_now=True, verbose_name=\"Editado\")\n \n class Meta:\n verbose_name=\"Categoria\"\n verbose_name_plural=\"Categorias\"\n ordering= [\"-created\"]\n \n def __str__(self):\n return self.nombre\n \n \nclass PostAlquiler(models.Model):\n balcon=(('Con Balcon','Con Balcon'),('Sin Balcon','Sin Balcon'))\n balcon = models.CharField(max_length=100, choices = balcon,default=\"Con Balcon\", null=True, blank=True)\n frente= (('Externo','Externo'),('Contrafrente','Contrafrente'))\n frente = models.CharField(max_length=100,choices=frente, default = \"Externo\")\n title= models.CharField(max_length=200, verbose_name=\"Titulo \")\n content = models.TextField(verbose_name=\"Contenido\",max_length=800,)\n imagen = models.ImageField(verbose_name=\"Imagen\", blank =True ,null =True,upload_to=\"Alquileres\")\n price = models.IntegerField(verbose_name=\"Precio\")\n ambientes = models.IntegerField(verbose_name=\"Dormitorios o Ambientes\",blank =True ,null =True)\n superficie= models.IntegerField(verbose_name=\"Superficie (locales u oficinas)\",blank =True,null =True )\n baños= models.IntegerField(verbose_name=\"Baños\",blank = True,null =True)\n plantas= models.IntegerField(verbose_name=\"Plantas (locales u oficinas)\", blank = True,null =True)\n autor = models.ForeignKey(User,verbose_name=\"Autor\", on_delete=models.CASCADE)\n categories= models.ManyToManyField(Category, verbose_name=\"Categoria\")\n created = models.DateTimeField(auto_now_add=True, verbose_name=\"Creado\")\n updated = models.DateTimeField(auto_now=True, verbose_name=\"Editado\")\n class Meta:\n verbose_name=\"Publicacion\"\n verbose_name_plural=\"Plublicaciones\"\n ordering= [\"-created\"]\n def __str__(self):\n return self.title\n\nclass PostImagenes(models.Model):\n post= models.ForeignKey(PostAlquiler, default =None ,on_delete=models.CASCADE)\n image = models.ImageField(upload_to =\"Alquileres\",verbose_name=\"Imagenes\") \n class Meta:\n verbose_name=\"Agregar una Imagen\"\n verbose_name_plural=\"Agregar mas Imagenes\"\n def __str__(self):\n return self.post.title\n \n ","sub_path":"Circulo/Alquileres/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"300191127","text":"import socket\r\nimport ssl\r\nimport logging.handlers\r\n\r\nclass NGFConn():\r\n\r\n def __init__(self,ip,user,password):\r\n self.logger = logging.getLogger('MainNGFConn')\r\n self.sock=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\r\n self.sslSock=None\r\n self.boxip=ip\r\n self.boxuser=user\r\n self.boxpassword=password\r\n self.isBoxSrv = 0\r\n\r\n def connect(self,port):\r\n self.sock.connect((self.boxip,port))\r\n self.logger.info('connecting now to ngf on ip:'+self.boxip+' and port:'+str(port))\r\n data = self.sock.recv(1024)\r\n help = str(data)\r\n self.sock.send(b'dossl\\n')\r\n data = self.sock.recv(1024)\r\n help = str(data)\r\n self.logger.debug('connect received: '+help)\r\n if(help.find('+ ok to switch')==-1):\r\n self.logger.error('ngf does not send \"ok to switch\" return here')\r\n return -1\r\n self.sslSock=ssl.wrap_socket(self.sock)\r\n\r\n def login(self):\r\n self.logger.info('login now ')\r\n connStr = str('login '+self.boxuser+' '+self.boxpassword+'\\n').encode()\r\n self.sslSock.send(bytes(connStr))\r\n data = self.sslSock.recv(1024)\r\n help = str(data)\r\n self.logger.debug('login received: '+help)\r\n if(help.find('logged in')==-1):\r\n self.logger.error('login failed return here')\r\n return -1\r\n\r\n def disconnect(self):\r\n connStr = str('quit\\n').encode()\r\n self.sslSock.send(bytes(connStr))\r\n data = self.sslSock.recv(1024)\r\n help = str(data)\r\n self.logger.debug('disconnect received: '+help)\r\n if(help.find('bye bye')==-1):\r\n self.logger.error('disconnect failed return here')\r\n return -1\r\n\r\n def flushcache(self,type):\r\n connStr = str('flushcache '+type+'\\n').encode()\r\n self.sslSock.send(bytes(connStr))\r\n data = self.sslSock.recv(1024)\r\n help = str(data)\r\n self.logger.debug('flushcache received: '+help)\r\n if(help.find('flushed')==-1):\r\n self.logger.error('flushing failed return here')\r\n return -1\r\n\r\n def FWadmincommand(self,command):\r\n outputlist = []\r\n connStr = str(command).encode()\r\n self.sslSock.send(bytes(connStr))\r\n help = ''\r\n while (help.find('.\\\\n')==-1):\r\n data = self.sslSock.recv(1024)\r\n help = str(data)\r\n outputlist.append(help)\r\n self.logger.debug('FWadmincommand received: '+help)\r\n return outputlist","sub_path":"cmtools/src/scripts-ips/IPSTestSuite/src/NGFConn.py","file_name":"NGFConn.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41118720","text":"from flask import Flask, render_template\r\n\r\napp = Flask(__name__)\r\n\r\n\r\n\r\n@app.route('/')\r\ndef sampleJinja():\r\n lst = ['Item', 'Item', 'Item', 'Item']\r\n name = \"Babu\"\r\n type = 123\r\n return render_template(\"sample.html\", name = name, type = lst)\r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True, port=5100)","sub_path":"001_for_Jinja2_temp.py","file_name":"001_for_Jinja2_temp.py","file_ext":"py","file_size_in_byte":325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"599840547","text":"#Assignment 2b\r\n#Write a program which takes input from user and identify that the given number is even or odd?\r\n#Author Salman Moin\r\nno1 = input('Enter first number: ')\r\nno2 = input('Enter second number:')\r\nif no1 % 2 == 0:\r\n print('First number ' + str(no1) + ' is even number')\r\nelse: \r\n print('First number ' + str(no1) + ' is odd number')\r\nif no2 % 2 != 0:\r\n print('Second number ' +str(no2) + ' is odd number')\r\nelse:\r\n print('Second number ' + str(no2) + ' is even number')\r\n","sub_path":"Assignment-2b.py","file_name":"Assignment-2b.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"154273589","text":"#!/usr/bin/env python\n\n# Copyright 2018-2019 Alvaro Bartolome\n# See LICENSE for details.\n\n__author__ = \"Alvaro Bartolome \"\n\nimport time\n\nimport pandas as pd\nimport pkg_resources\nimport requests\nfrom lxml.html import fromstring\n\nfrom investpy import user_agent as ua\n\n\ndef retrieve_equities():\n \"\"\"\n This function retrieves all the available equities to retrieve data from.\n All the equities available can be found at: https://es.investing.com/equities/spain\n\n Returns\n -------\n :returns a dictionary containing all the equities information\n \"\"\"\n\n params = {\n \"noconstruct\": \"1\",\n \"smlID\": \"10119\",\n \"sid\": \"\",\n \"tabletype\": \"price\",\n \"index_id\": \"all\"\n }\n\n head = {\n \"User-Agent\": ua.get_random(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n }\n\n url = \"https://es.investing.com/equities/StocksFilter\"\n\n req = requests.get(url, params=params, headers=head)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#0015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n path_ = root_.xpath(\".//table[@id='cross_rate_markets_stocks_1']\"\n \"/tbody\"\n \"/tr\")\n\n results = list()\n\n if path_:\n for elements_ in path_:\n id_ = elements_.get('id').replace('pair_', '')\n\n for element_ in elements_.xpath('.//a'):\n tag_ = element_.get('href').replace('/equities/', '')\n full_name_ = element_.get('title').replace(' (CFD)', '')\n\n try:\n isin_ = retrieve_isin_code(tag_)\n except (ConnectionError, IndexError):\n isin_ = None\n\n data = {\n \"name\": element_.text,\n \"full_name\": full_name_.rstrip(),\n \"tag\": tag_,\n \"isin\": isin_,\n \"id\": id_\n }\n\n results.append(data)\n\n resource_package = __name__\n resource_path = '/'.join(('resources', 'equities', 'equities.csv'))\n file = pkg_resources.resource_filename(resource_package, resource_path)\n\n df = pd.DataFrame(results)\n df.to_csv(file, index=False)\n\n return df\n\n\ndef retrieve_isin_code(info):\n \"\"\"\n This is an additional function that adds data to the equities pandas.DataFrame.\n Added data in this case, are the ISIN codes of every company in order to identify it.\n\n Returns\n -------\n :returns a str that contains the ISIN code of the specified equity\n \"\"\"\n\n url = \"https://es.investing.com/equities/\" + info\n\n head = {\n \"User-Agent\": ua.get_random(),\n \"X-Requested-With\": \"XMLHttpRequest\",\n \"Accept\": \"text/html\",\n \"Accept-Encoding\": \"gzip, deflate, br\",\n \"Connection\": \"keep-alive\",\n }\n\n req = requests.get(url, headers=head, timeout=5)\n\n if req.status_code != 200:\n raise ConnectionError(\"ERR#0015: error \" + str(req.status_code) + \", try again later.\")\n\n root_ = fromstring(req.text)\n path_ = root_.xpath(\".//div[contains(@class, 'overViewBox')]\"\n \"/div[@id='quotes_summary_current_data']\"\n \"/div[@class='right']\"\n \"/div\")\n\n for p in path_:\n try:\n if p.xpath(\"span[not(@class)]\")[0].text_content().__contains__('ISIN'):\n code = p.xpath(\"span[@class='elp']\")[0].text_content().rstrip()\n time.sleep(.5)\n\n return code\n else:\n continue\n except IndexError:\n raise IndexError(\"ERR#0017: isin code unavailable or not found.\")\n\n return None\n\n\ndef equities_as_df():\n \"\"\"\n This function retrieves all the available equities and returns a pandas.DataFrame of them all.\n All the available equities can be found at: https://es.investing.com/equities/spain\n\n Returns\n -------\n :returns a pandas.DataFrame with all the available equities to retrieve data from\n \"\"\"\n\n resource_package = __name__\n resource_path = '/'.join(('resources', 'equities', 'equities.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n equities = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))\n else:\n equities = retrieve_equities()\n\n if equities is None:\n raise IOError(\"ERR#0001: equities list not found or unable to retrieve.\")\n else:\n return equities\n\n\ndef equities_as_list():\n \"\"\"\n This function retrieves all the available equities and returns a list of each one of them.\n All the available equities can be found at: https://es.investing.com/equities/spain\n\n Returns\n -------\n :returns a list with all the available equities to retrieve data from\n \"\"\"\n\n resource_package = __name__\n resource_path = '/'.join(('resources', 'equities', 'equities.csv'))\n if pkg_resources.resource_exists(resource_package, resource_path):\n equities = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))\n else:\n equities = retrieve_equities()\n\n if equities is None:\n raise IOError(\"ERR#0001: equities list not found or unable to retrieve.\")\n else:\n return equities['name'].tolist()\n","sub_path":"investpy/equities.py","file_name":"equities.py","file_ext":"py","file_size_in_byte":5461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"576516766","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-unsafe\n\nimport argparse\nimport functools\nimport logging\nimport os\nimport re\nimport shutil\nimport subprocess\nimport sys\nfrom collections import defaultdict\nfrom logging import Logger\nfrom pathlib import Path\nfrom typing import Any, List, Optional, Set, Union\n\nfrom .. import apply_annotations, log\nfrom ..analysis_directory import AnalysisDirectory\nfrom ..configuration import Configuration\nfrom .check import Check\nfrom .command import JSON, Command, Result, typeshed_search_path\nfrom .reporting import Reporting\n\n\nLOG: Logger = logging.getLogger(__name__)\n\n\ndef dequalify(annotation):\n return annotation.replace(\"typing.\", \"\")\n\n\ndef split_imports(types_list) -> Set[Any]:\n typing_imports = set()\n for full_type in types_list:\n if full_type:\n split_type = re.findall(r\"[\\w]+\", full_type)\n if len(split_type) > 1 and split_type[0] == \"typing\":\n typing_imports.add(split_type[1])\n return typing_imports\n\n\ndef _relativize_access(access, path):\n if not access:\n return []\n path = str(path).split(\".\", 1)[0].replace(\"/\", \".\").replace(\".__init__\", \"\")\n return access.replace(path, \"\", 1).strip(\".\").split(\".\")\n\n\nclass FunctionStub:\n def __init__(self, stub) -> None:\n self.name = stub.get(\"function_name\")\n self.actual = stub.get(\"annotation\")\n self.parameters = stub.get(\"parameters\")\n self.decorators = stub.get(\"decorators\")\n self.is_async = stub.get(\"async\")\n\n @staticmethod\n def is_instance(stub) -> bool:\n required_fields = [\"parameters\", \"decorators\", \"async\", \"function_name\"]\n return all(field in stub.keys() for field in required_fields)\n\n def _get_name(self):\n \"\"\" The last part of the access path is the function name \"\"\"\n return self.name.split(\".\")[-1] if self.name.split(\".\") else \"\"\n\n def _get_annotation(self) -> str:\n return \" -> \" + dequalify(self.actual) if self.actual else \"\"\n\n def _get_parameter_string(self) -> str:\n \"\"\" Depending on if an argument has a type, the style for default values\n changes. E.g.\n def fun(x=5)\n def fun(x : int = 5)\n \"\"\"\n parameters = []\n for parameter in self.parameters:\n name = parameter[\"name\"]\n if parameter[\"type\"]:\n name += \": \" + dequalify(parameter[\"type\"])\n if parameter[\"value\"]:\n name += \" = \" + parameter[\"value\"]\n elif parameter[\"value\"]:\n name += \"=\" + parameter[\"value\"]\n parameters.append(name)\n return \", \".join(parameters)\n\n def _get_decorator_string(self) -> str:\n decorator_string = \"\"\n for decorator in self.decorators:\n decorator_string += \"@{}\\n\".format(decorator)\n return decorator_string\n\n def _get_async_string(self) -> str:\n return \"async \" if self.is_async else \"\"\n\n def is_complete(self) -> bool:\n \"\"\" Determines if a stub completely types a function \"\"\"\n if not self.actual:\n return False\n for parameter in self.parameters:\n if parameter[\"name\"] != \"self\" and not parameter[\"type\"]:\n return False\n return True\n\n def to_string(self) -> str:\n return \"{}{}def {}({}){}: ...\".format(\n self._get_decorator_string(),\n self._get_async_string(),\n self._get_name(),\n self._get_parameter_string(),\n self._get_annotation(),\n )\n\n @functools.lru_cache(maxsize=1)\n def get_typing_imports(self):\n types_list = re.split(\"[^\\\\w.]+\", self.actual) if self.actual else []\n for parameter in self.parameters:\n if parameter[\"type\"]:\n types_list += re.split(\"[^\\\\w.]+\", parameter[\"type\"])\n return split_imports(types_list)\n\n def join_with(self, other) -> None:\n # pyre-fixme[16]: `FunctionStub` has no attribute `parent`\n if self.name != other.name and self.parent != other.parent:\n raise Exception(\"Tried to join incompatible stubs\")\n if (not self.actual) and other.actual:\n self.actual = other.actual\n for parameter, other_parameter in zip(self.parameters, other.parameters):\n if (not parameter[\"type\"]) and other_parameter[\"type\"]:\n parameter[\"type\"] = other_parameter[\"type\"]\n\n\nclass FieldStub:\n def __init__(self, stub) -> None:\n self.name = stub.get(\"attribute_name\")\n self.actual = stub.get(\"annotation\")\n\n @staticmethod\n def is_instance(stub) -> bool:\n required_fields = [\"annotation\", \"attribute_name\"]\n return all(field in stub.keys() for field in required_fields)\n\n def _get_name(self):\n \"\"\" The last part of the access path is the function name \"\"\"\n return self.name.split(\".\")[-1] if self.name.split(\".\") else \"\"\n\n def to_string(self) -> str:\n return \"{}: {} = ...\".format(self._get_name(), dequalify(self.actual))\n\n @functools.lru_cache(maxsize=1)\n def get_typing_imports(self):\n return split_imports(re.split(\"[^\\\\w.]+\", self.actual))\n\n\nclass Stub:\n stub: Optional[Union[FieldStub, FunctionStub]] = None\n\n def __init__(self, error) -> None:\n self.path = Path(error.path)\n self.parent = error.inference.get(\"parent\")\n self.stub = None\n if FunctionStub.is_instance(error.inference):\n self.stub = FunctionStub(error.inference)\n elif FieldStub.is_instance(error.inference):\n self.stub = FieldStub(error.inference)\n\n def is_function(self) -> bool:\n return isinstance(self.stub, FunctionStub) and not self.parent\n\n def is_method(self):\n return isinstance(self.stub, FunctionStub) and self.parent\n\n def is_field(self) -> bool:\n return isinstance(self.stub, FieldStub)\n\n def is_complete(self) -> bool:\n return isinstance(self.stub, FieldStub) or (\n isinstance(self.stub, FunctionStub)\n # pyre-fixme[16]: `Optional` has no attribute `is_complete`.\n and self.stub.is_complete()\n )\n\n def to_string(self):\n return self.stub.to_string()\n\n def get_typing_imports(self):\n return self.stub.get_typing_imports()\n\n def join_with(self, other) -> None:\n stub = self.stub\n if not self.is_field() and not other.is_field() and stub:\n stub.join_with(other.stub)\n else:\n raise Exception(\"Tried to join incompatible stubs\")\n\n\ndef join_stubs(stubs):\n # Join function stubs if they have the same parent and name\n stub_map = defaultdict(list)\n new_stubs = []\n for stub in stubs:\n if stub.is_field():\n new_stubs.append(stub)\n else:\n stub_map[(stub.parent, stub.stub.name)].append(stub)\n\n for stubs in stub_map.values():\n new_stub = stubs[0]\n for stub in stubs[1:]:\n new_stub.join_with(stub)\n new_stubs.append(new_stub)\n return new_stubs\n\n\nclass StubFile:\n def __init__(self, errors, full_only: bool = False) -> None:\n stubs = [Stub(error) for error in errors if Stub(error).stub]\n stubs = join_stubs(stubs)\n if full_only:\n stubs = [stub for stub in stubs if stub.is_complete()]\n self._stubs = stubs\n self._fields = [stub for stub in stubs if stub.is_field()]\n self._functions = [stub for stub in stubs if stub.is_function()]\n self._methods = [stub for stub in stubs if stub.is_method()]\n self._path = Path(errors[0].path)\n\n def to_string(self) -> str:\n \"\"\"We currently ignore nested classes, i.e.:\n class X:\n class Y:\n [ALL OF THIS IS IGNORED]\n \"\"\"\n classes = defaultdict(list)\n typing_imports = set()\n contents = \"\"\n stubs_in_file = []\n for stub in self._fields:\n parent = _relativize_access(stub.parent, stub.path)\n # Ignore nested classes\n if len(parent) == 1:\n classes[parent[0]].append(stub)\n else:\n stubs_in_file.append(stub)\n contents += stub.to_string() + \"\\n\"\n\n for stub in self._methods:\n parent = _relativize_access(stub.parent, stub.path)\n # Ignore nested classes\n if len(parent) == 1:\n classes[parent[0]].append(stub)\n\n for stub in self._functions:\n stubs_in_file.append(stub)\n contents += stub.to_string() + \"\\n\"\n\n for parent, stubs in classes.items():\n contents += \"\\nclass {}:\\n\".format(parent)\n for stub in stubs:\n stubs_in_file.append(stub)\n contents += \" {}\\n\".format(stub.to_string().replace(\"\\n\", \"\\n \"))\n\n for stub in stubs_in_file:\n typing_imports.update(stub.get_typing_imports())\n alphabetical_imports = sorted(list(typing_imports))\n if alphabetical_imports and contents != \"\":\n contents = (\n \"from typing import {}\\n\\n\".format(\n \", \".join(\n str(type_import) for type_import in alphabetical_imports\n )\n )\n + contents\n )\n return contents\n\n def is_empty(self):\n return self._stubs == []\n\n def path(self, directory):\n return directory / Path(\"{}i\".format(self._path))\n\n def output_to_file(self, path) -> None:\n contents = self.to_string()\n path.parent.mkdir(parents=True, exist_ok=True)\n path.write_text(contents)\n\n\ndef generate_stub_files(arguments, errors) -> List[StubFile]:\n errors = [\n error\n for error in errors\n if error.inference and not (error.is_external_to_global_root())\n ]\n files = defaultdict(list)\n errors.sort(key=lambda error: error.line)\n\n for error in errors:\n files[error.path].append(error)\n\n stubs = []\n for _path, errors in files.items():\n stub = StubFile(errors, full_only=arguments.full_only)\n if not stub.is_empty():\n stubs.append(stub)\n return stubs\n\n\ndef write_stubs_to_disk(arguments, stubs, type_directory) -> None:\n if type_directory.exists():\n LOG.log(log.SUCCESS, \"Deleting {}\".format(type_directory))\n shutil.rmtree(type_directory)\n type_directory.mkdir(parents=True, exist_ok=True)\n\n LOG.log(log.SUCCESS, \"Outputting inferred stubs to {}\".format(type_directory))\n for stub in stubs:\n stub.output_to_file(stub.path(type_directory))\n\n\ndef filter_paths(arguments, stubs, type_directory):\n unused_annotates = [\n path\n for path in arguments.in_place\n if all(not str(stub.path(Path(\"\"))).startswith(str(path)) for stub in stubs)\n ]\n for path in unused_annotates:\n LOG.log(log.SUCCESS, \"No annotations for {}\".format(path))\n\n return [\n stub\n for stub in stubs\n if any(\n str(stub.path(Path(\"\"))).startswith(str(path))\n for path in arguments.in_place\n )\n ]\n\n\ndef annotate_path(arguments, stub_path: str, file_path: str) -> None:\n try:\n annotated_content = apply_annotations.apply_stub_annotations(\n stub_path, file_path\n )\n with open(file_path, \"w\") as source_file:\n source_file.write(annotated_content)\n LOG.info(\"Annotated {}\".format(file_path))\n except Exception as error:\n LOG.warning(\"Failed to annotate {}\".format(file_path))\n if arguments.debug_infer:\n LOG.warning(\"\\tError: {}\".format(error))\n\n\ndef annotate_paths(\n root, arguments, formatter: Optional[str], stubs, type_directory\n) -> None:\n if arguments.in_place != []:\n stubs = filter_paths(arguments, stubs, type_directory)\n\n for stub in stubs:\n stub_path = stub.path(type_directory)\n if not stub._path.resolve().exists():\n file_path = (root / stub._path).resolve()\n else:\n file_path = stub._path.resolve()\n annotate_path(arguments, stub_path, file_path)\n if formatter:\n subprocess.call(formatter, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n\ndef annotate_from_existing_stubs(\n root: Path,\n arguments: argparse.Namespace,\n formatter: Optional[str],\n type_directory: Path,\n) -> None:\n in_place_paths = [Path(path) for path in arguments.in_place]\n for stub_path in type_directory.rglob(\"*.pyi\"):\n relative_source_path_for_stub = stub_path.relative_to(\n type_directory\n ).with_suffix(\".py\")\n\n if in_place_paths == [] or any(\n path\n in (relative_source_path_for_stub, *relative_source_path_for_stub.parents)\n for path in in_place_paths\n ):\n annotate_path(\n arguments, str(stub_path), str(root / relative_source_path_for_stub)\n )\n if formatter:\n subprocess.call(formatter, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n\ndef file_exists(path):\n if not os.path.exists(path):\n raise argparse.ArgumentTypeError(\"ERROR: \" + str(path) + \" does not exist\")\n return path\n\n\nclass Infer(Reporting):\n NAME = \"infer\"\n\n def __init__(\n self,\n arguments,\n original_directory: str,\n configuration: Optional[Configuration] = None,\n analysis_directory: Optional[AnalysisDirectory] = None,\n ) -> None:\n arguments.show_error_traces = True\n arguments.output = JSON\n super(Infer, self).__init__(\n arguments, original_directory, configuration, analysis_directory\n )\n self._print_errors: bool = arguments.print_only\n self._full_only: bool = arguments.full_only\n self._recursive: bool = arguments.recursive\n self._in_place: bool = arguments.in_place\n self._json: bool = arguments.json\n self._annotate_from_existing_stubs: bool = arguments.annotate_from_existing_stubs\n self._debug_infer: bool = arguments.debug_infer\n self._ignore_infer: List[str] = self._configuration.ignore_infer\n\n @classmethod\n def add_subparser(cls, parser: argparse._SubParsersAction) -> None:\n infer = parser.add_parser(cls.NAME)\n infer.set_defaults(command=cls)\n infer.add_argument(\n \"-p\",\n \"--print-only\",\n action=\"store_true\",\n help=\"Print raw JSON errors to standard output, \"\n + \"without converting to stubs or annnotating.\",\n )\n infer.add_argument(\n \"-f\",\n \"--full-only\",\n action=\"store_true\",\n help=\"Only output fully annotated functions. Requires infer flag.\",\n )\n infer.add_argument(\n \"-r\",\n \"--recursive\",\n action=\"store_true\",\n help=\"Recursively run infer until no new annotations are generated.\"\n + \" Requires infer flag.\",\n )\n infer.add_argument(\n \"-i\",\n \"--in-place\",\n nargs=\"*\",\n metavar=\"path\",\n type=file_exists,\n help=\"Add annotations to functions in selected paths.\"\n + \" Takes a set of files and folders to add annotations to.\"\n + \" If no paths are given, all functions are annotated.\"\n + \" WARNING: Modifies original files and requires infer flag and retype\",\n )\n infer.add_argument(\n \"--json\",\n action=\"store_true\",\n help=\"Accept JSON input instead of running full check.\",\n )\n infer.add_argument(\n \"--annotate-from-existing-stubs\",\n action=\"store_true\",\n help=\"Add annotations from existing stubs.\",\n )\n infer.add_argument(\n \"--debug-infer\",\n action=\"store_true\",\n help=\"Print error message when file fails to annotate.\",\n )\n\n def run(self) -> Command:\n self._analysis_directory.prepare()\n if self._annotate_from_existing_stubs:\n if self._arguments.in_place is None:\n raise argparse.ArgumentTypeError(\n \"--annotate-from-existing-stubs cannot be used without the \\\n --in-place argument\"\n )\n\n type_directory = Path(os.path.join(self._log_directory, \"types\"))\n annotate_from_existing_stubs(\n Path(self._local_root), self._arguments, self._formatter, type_directory\n )\n return self\n if self._json:\n result = self._errors_from_stdin()\n errors = self._get_errors(result, bypass_filtering=True)\n else:\n result = self._call_client(command=Infer.NAME)\n errors = self._get_errors(result, bypass_filtering=True)\n if self._print_errors:\n self._print(errors)\n else:\n type_directory = Path(os.path.join(self._log_directory, \"types\"))\n stubs = generate_stub_files(self._arguments, errors)\n write_stubs_to_disk(self._arguments, stubs, type_directory)\n if self._arguments.in_place is not None:\n LOG.info(\"Annotating files\")\n annotate_paths(\n self._configuration.local_configuration_root,\n self._arguments,\n self._formatter,\n stubs,\n type_directory,\n )\n\n return self\n\n def _flags(self) -> List[str]:\n flags = super()._flags()\n filter_directories = self._get_directories_to_analyze()\n if len(filter_directories):\n flags.extend([\"-filter-directories\", \";\".join(sorted(filter_directories))])\n search_path = self._configuration.search_path + typeshed_search_path(\n self._configuration.typeshed\n )\n if search_path:\n flags.extend([\"-search-path\", \",\".join(search_path)])\n if len(self._ignore_infer) > 0:\n flags.extend([\"-ignore-infer\", \";\".join(self._ignore_infer)])\n return flags\n\n def _errors_from_stdin(self) -> Result:\n input = sys.stdin.read()\n return Result(0, input)\n","sub_path":"client/commands/infer.py","file_name":"infer.py","file_ext":"py","file_size_in_byte":18398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"157245893","text":"import matplotlib.pyplot as plt; \r\nimport tensorflow as tf; \r\nimport numpy as np;\r\n\r\nwith tf.Session() as sess:\r\n\timg = tf.read_file(r\"C:\\Users\\Administrator\\Desktop\\0003\\test\\IMG_1702.JPG\") #读取图片,\r\n\timg_data = tf.image.decode_jpeg(img, channels=3) #解码\r\n\t#img_data = sess.run(tf.image.decode_jpeg(img, channels=3))\r\n\timg_data = sess.run(tf.image.rgb_to_grayscale(img_data)) #灰度化\r\n\tprint('大小:{}'.format(img_data.shape))\r\n\tprint(\"类型:%s\" % type(img_data))\r\n\tprint(img_data)","sub_path":"script/图片转换成灰度.py","file_name":"图片转换成灰度.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"200951582","text":"# Time: O(n)\n# Space: O(1)\n\n# 114\n# Given a binary tree, flatten it to a linked list in-place.\n#\n# (Flatten a binary tree to a fake \"linked list\" in pre-order traversal.\n# Here we use the right pointer in TreeNode as the next pointer in ListNode.)\n#\n# For example,\n# Given\n#\n# 1\n# / \\\n# 2 5\n# / \\ \\\n# 3 4 6\n# The flattened tree should look like:\n# 1\n# \\\n# 2\n# \\\n# 3\n# \\\n# 4\n# \\\n# 5\n# \\\n# 6\n#\n\n# Definition for a binary tree node\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n def __repr__(self): # print right child only\n return '{}->{}'.format(self.val, self.right)\n\n\n# 寻找前驱节点 similar to Morris Algo\n# 对于当前节点,如果其左子节点不为空,则在其左子树中找到最右边的节点,作为前驱节点,将当前节点的右子节点赋给前驱节点的右子节点,\n# 然后将当前节点的左子节点改为当前节点的右子节点,并将当前节点的左子节点设为空。继续处理链表中的下一个节点,直到所有节点都处理结束。\nclass Solution: # USE THIS\n # @param root, a tree node\n # @return nothing, do it in place\n def flatten(self, root):\n while root:\n if root.left: # if left subtree is not empty, fold into right subtree. Repeat folding.\n pre = root.left\n while pre.right:\n pre = pre.right\n\n pre.right = root.right\n root.right = root.left\n root.left = None\n root = root.right\n\n# modified postOrder (right->left->parent), maintain the 'tail' var (always update tail as current processed node)\n# preOrder NOT work for this problem, because when changing cur.right as cur.left, we lost the right subtree!\nclass Solution2: # also very good Time O(n) Space O(h)\n def flatten(self, root):\n def postOrder(node):\n if node:\n postOrder(node.right)\n postOrder(node.left)\n node.right = self.tail\n node.left = None\n self.tail = node\n\n self.tail = None\n postOrder(root)\n\nclass Solution3: # same to solution 2 but not use global var (passing param instead, not pretty, don't use)\n def flatten(self, root):\n self.flattenRecu(root, None)\n\n def flattenRecu(self, root, list_head):\n if root:\n list_head = self.flattenRecu(root.right, list_head)\n list_head = self.flattenRecu(root.left, list_head)\n root.right = list_head\n root.left = None\n return root\n else:\n return list_head\n\nif __name__ == \"__main__\":\n root = TreeNode(1)\n root.left = TreeNode(2)\n root.left.left = TreeNode(3)\n root.left.right = TreeNode(4)\n root.right = TreeNode(5)\n root.right.right = TreeNode(6)\n\n Solution().flatten(root)\n print(root) # 1->2->3->4->5->6->None\n","sub_path":"Python/flatten-binary-tree-to-linked-list.py","file_name":"flatten-binary-tree-to-linked-list.py","file_ext":"py","file_size_in_byte":3074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"32889996","text":"#!/usr/bin/env python\nimport argparse\nimport sys\nfrom pathlib import Path\n\nPROJECT_TEMPLATE = '''\nname: dbt_{adapter}\nversion: {version}\nconfig-version: 2\n\nmacro-paths: [\"macros\"]\n'''\n\n\nSETUP_PY_TEMPLATE = '''\n#!/usr/bin/env python\nfrom setuptools import find_packages\nfrom setuptools import setup\n\npackage_name = \"dbt-{adapter}\"\npackage_version = \"{version}\"\ndescription = \"\"\"The {adapter} adapter plugin for dbt (data build tool)\"\"\"\n\nsetup(\n name=package_name,\n version=package_version,\n description=description,\n long_description=description,\n author={author_name},\n author_email={author_email},\n url={url},\n packages=find_packages(),\n package_data={{\n 'dbt': [\n{package_data}\n ]\n }},\n install_requires=[\n \"{dbt_core_str}\",{dependencies}\n ]\n)\n'''.lstrip()\n\nADAPTER_INIT_TEMPLATE = '''\nfrom dbt.adapters.{adapter}.connections import {title_adapter}ConnectionManager\nfrom dbt.adapters.{adapter}.connections import {title_adapter}Credentials\nfrom dbt.adapters.{adapter}.impl import {title_adapter}Adapter\n\nfrom dbt.adapters.base import AdapterPlugin\nfrom dbt.include import {adapter}\n\n\nPlugin = AdapterPlugin(\n adapter={title_adapter}Adapter,\n credentials={title_adapter}Credentials,\n include_path={adapter}.PACKAGE_PATH)\n'''.lstrip()\n\n\nADAPTER_CONNECTIONS_TEMPLATE = '''\nfrom dataclasses import dataclass\n\nfrom dbt.adapters.base import Credentials\nfrom dbt.adapters.{adapter_src} import {connection_cls}\n\n\n@dataclass\nclass {title_adapter}Credentials(Credentials):\n # Add credentials members here, like:\n # host: str\n # port: int\n # username: str\n # password: str\n\n @property\n def type(self):\n return '{adapter}'\n\n def _connection_keys(self):\n # return an iterator of keys to pretty-print in 'dbt debug'.\n # Omit fields like 'password'!\n raise NotImplementedError\n\n\nclass {title_adapter}ConnectionManager({connection_cls}):\n TYPE = '{adapter}'\n'''.lstrip()\n\n\nADAPTER_IMPL_TEMPLATE = '''\nfrom dbt.adapters.{adapter_src} import {adapter_cls}\nfrom dbt.adapters.{adapter} import {title_adapter}ConnectionManager\n\n\nclass {title_adapter}Adapter({adapter_cls}):\n ConnectionManager = {title_adapter}ConnectionManager\n'''.lstrip()\n\n\nCATALOG_MACRO_TEMPLATE = \"\"\"\n{{% macro {adapter}__get_catalog(information_schema, schemas) -%}}\n\n {{% set msg -%}}\n get_catalog not implemented for {adapter}\n {{%- endset %}}\n\n {{{{ exceptions.raise_compiler_error(msg) }}}}\n{{% endmacro %}}\n\"\"\"\n\n\nINCLUDE_INIT_TEXT = \"\"\"\nimport os\nPACKAGE_PATH = os.path.dirname(__file__)\n\"\"\".lstrip()\n\n\nclass Builder:\n def __init__(self, args):\n self.args = args\n self.adapter = self.args.adapter\n self.dest = self.args.root / self.adapter\n # self.dbt_dir = self.dest / 'dbt'\n self.dbt_dir = Path('dbt')\n self.adapters = self.dbt_dir / 'adapters' / self.adapter\n self.include = self.dbt_dir / 'include' / self.adapter\n if self.dest.exists():\n raise Exception('path exists')\n\n def go(self):\n self.write_setup()\n self.write_adapter()\n self.write_include()\n\n def include_paths(self):\n return [\n self.include / 'macros' / '*.sql',\n self.include / 'dbt_project.yml',\n ]\n\n def dest_path(self, *paths):\n return self.dest.joinpath(*paths)\n\n def write_setup(self):\n self.dest.mkdir(parents=True, exist_ok=True)\n\n dbt_core_str = 'dbt-core=={}'.format(self.args.dbt_core_version)\n\n # 12-space indent, then single-quoted with a trailing comma. The path\n # should not be the actual path from the root but from the 'dbt' dir\n # (because this is in the 'dbt' package)\n package_data = '\\n'.join(\n \"{}'{!s}',\".format(12*' ', p.relative_to(self.dbt_dir))\n for p in self.include_paths()\n )\n\n setup_py_contents = SETUP_PY_TEMPLATE.format(\n adapter=self.adapter,\n version=self.args.package_version,\n author_name=self.args.author,\n author_email=self.args.email,\n url=self.args.url,\n dbt_core_str=dbt_core_str,\n dependencies=self.args.dependency,\n package_data=package_data,\n )\n self.dest_path('setup.py').write_text(setup_py_contents)\n\n def _make_adapter_kwargs(self):\n if self.args.sql:\n kwargs = {\n 'adapter_src': 'sql',\n 'adapter_cls': 'SQLAdapter',\n 'connection_cls': 'SQLConnectionManager',\n }\n else:\n kwargs = {\n 'adapter_src': 'base',\n 'adapter_cls': 'BaseAdapter',\n 'connection_cls': 'BaseConnectionManager',\n }\n kwargs.update({\n 'upper_adapter': self.adapter.upper(),\n 'title_adapter': self.args.title_case,\n 'adapter': self.adapter,\n })\n\n return kwargs\n\n def write_adapter(self):\n adapters_dest = self.dest_path(self.adapters)\n adapters_dest.mkdir(parents=True, exist_ok=True)\n\n kwargs = self._make_adapter_kwargs()\n\n init_text = ADAPTER_INIT_TEMPLATE.format(\n adapter=self.adapter,\n title_adapter=self.args.title_case\n )\n connections_text = ADAPTER_CONNECTIONS_TEMPLATE.format(**kwargs)\n impl_text = ADAPTER_IMPL_TEMPLATE.format(**kwargs)\n\n (adapters_dest / '__init__.py').write_text(init_text)\n (adapters_dest / 'connections.py').write_text(connections_text)\n (adapters_dest / 'impl.py').write_text(impl_text)\n\n def write_include(self):\n include_dest = self.dest_path(self.include)\n include_dest.mkdir(parents=True, exist_ok=True)\n macros_dest = include_dest / 'macros'\n macros_dest.mkdir(exist_ok=True)\n\n dbt_project_text = PROJECT_TEMPLATE.format(\n adapter=self.adapter,\n version=self.args.project_version,\n )\n catalog_macro_text = CATALOG_MACRO_TEMPLATE.format(\n adapter=self.adapter\n )\n\n (include_dest / '__init__.py').write_text(INCLUDE_INIT_TEXT)\n (include_dest / 'dbt_project.yml').write_text(dbt_project_text)\n # make sure something satisfies the 'include/macros/*.sql' in setup.py\n (macros_dest / 'catalog.sql').write_text(catalog_macro_text)\n\n\ndef parse_args(argv=None):\n if argv is None:\n argv = sys.argv[1:]\n parser = argparse.ArgumentParser()\n parser.add_argument('root', type=Path)\n parser.add_argument('adapter')\n parser.add_argument('--title-case', '-t', default=None)\n parser.add_argument('--dependency', action='append')\n parser.add_argument('--dbt-core-version', default='0.16.1rc1')\n parser.add_argument('--email')\n parser.add_argument('--author')\n parser.add_argument('--url')\n parser.add_argument('--sql', action='store_true')\n parser.add_argument('--package-version', default='0.0.1')\n parser.add_argument('--project-version', default='1.0')\n parser.add_argument(\n '--no-dependency', action='store_false', dest='set_dependency'\n )\n parsed = parser.parse_args()\n\n if parsed.title_case is None:\n parsed.title_case = parsed.adapter.title()\n\n if parsed.set_dependency:\n \n prefix = '\\n '\n \n if parsed.dependency:\n # ['a', 'b'] => \"'a',\\n 'b'\"; ['a'] -> \"'a',\"\n \n parsed.dependency = prefix + prefix.join(\n \"'{}',\".format(d) for d in parsed.dependency\n )\n else:\n parsed.dependency = prefix + ''\n else:\n parsed.dependency = ''\n\n if parsed.email is not None:\n parsed.email = \"'{}'\".format(parsed.email)\n else:\n parsed.email = ''\n if parsed.author is not None:\n parsed.author = \"'{}'\".format(parsed.author)\n else:\n parsed.author = ''\n if parsed.url is not None:\n parsed.url = \"'{}'\".format(parsed.url)\n else:\n parsed.url = ''\n return parsed\n\n\ndef main():\n builder = Builder(parse_args())\n builder.go()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/scripts/create_adapter_plugins.py","file_name":"create_adapter_plugins.py","file_ext":"py","file_size_in_byte":8235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"193285254","text":"from tkinter import *\nfrom tkinter import messagebox\nimport storage as thedata\n\ncolors = [\"white\", \"gray\", \"#687b87\", \"#3279a8\", \"red\"]\napp = Tk()\n\nclass database_communication:\n def adding_to_database(self, task_name, task_message):\n try:\n if task_name != \" \" and task_message != \" \":\n thedata.add_data(thedata,task_message)\n else:\n print(\"Make sure all fields are filled\")\n except:\n print(\"Something went wrong!!!!\")\n def updating_a_record(self):\n pass\n def deleting_a_record(self, x):\n \n if str(x).isnumeric():\n thedata.delete_data(x)\n\n else:\n return \"Must be an integer\"\n\n \n \nlocal_access = database_communication()\nclass homePage:\n def __init__(self, root, bg_color):\n self.app = root\n self.app.geometry(\"800x400\") \n self.app.title(\"Get Easy\")\n self.app.config(bg = bg_color[0])\n self.colors = bg_color\n\n def header_menu(self, ):\n self.header_frame = Frame(self.app, bg = self.colors[2])\n self.header_frame.place(relwidth =1, relheight = 0.15, rely = 0)\n Label(self.header_frame, text = \"Gat Easy\", font = (\"bold\", 20),fg = self.colors[0], bg = self.colors[2]).place(\n relwidth =0.5, relheight =0.5, relx =0.02, rely =0.25\n )\n def side_menu(self, ):\n self.side_frame = Frame(self.app, bg = self.colors[2])\n self.side_frame.place(relwidth =0.2, relheight = 1, relx = 0)\n ######### \n ###################\n self.home_Button = Button(self.side_frame, text = \"Home\", font = (\"bold\", 15), fg = self.colors[2], anchor = W, command = lambda: self.homeMenu(self.main_section()))\n self.add_event_Button = Button(self.side_frame, text = \"Add Task\", font = (\"bold\", 15), fg = self.colors[2], anchor = W, command = lambda: self.add_item_menu(self.main_section()))\n self.update_event_Button = Button(self.side_frame, text = \"Update Task\", font = (\"bold\", 15), fg = self.colors[2], anchor = W, command = lambda: self.update_item_menu(self.main_section())) \n self.delete_event_Button = Button(self.side_frame, text = \"Delete Task\", font = (\"bold\", 15), fg = self.colors[2], anchor = W, command = lambda: self.remove_item_menu(self.main_section())) \n self.close_app_Button = Button(self.side_frame, text = \"Exit\", font = (\"bold\", 15), fg = self.colors[2], anchor = W, command = self.close_application)\n \n self.home_Button.place(relwidth =0.98, relheight = 0.1, relx = 0.01, rely = 0.25)\n self.add_event_Button.place(relwidth =0.98, relheight = 0.1, relx = 0.01, rely = 0.37)\n self.update_event_Button.place(relwidth =0.98, relheight = 0.1, relx = 0.01, rely = 0.49)\n self.delete_event_Button.place(relwidth =0.98, relheight = 0.1, relx = 0.01, rely = 0.61)\n self.close_app_Button.place(relwidth =0.98, relheight = 0.1, relx = 0.01, rely = 0.73)\n\n def main_section(self):\n self.main_box = Frame(self.app, relief = RAISED)\n self.main_box.place(relwidth = 0.79, relheight = 0.825, relx =0.205, rely = 0.16)\n return self.main_box\n \n def homeMenu(self, mainbox):\n Label(mainbox, text = \"hello there\", font = (\"bold\", 15)).place(relwidth = 0.5, relheight = 0.25, relx =0.25, rely = 0.25)\n\n def add_item_menu(self, mainbox):\n add_box = LabelFrame(mainbox, text = \"Add Task\", font = (\"bold\", 15))\n add_box.place(relwidth = 0.95, relheight =0.95, relx = 0.02, rely = 0.02)\n \n Label(add_box, text = \"Event Name\", font = (\"bold\", 15),anchor = W ).place(\n relwidth = 0.5, relheight =0.1, relx = 0.02, rely = 0.05\n )\n ##############\n event_name = Entry(add_box, font = (\"bold\", 15))\n event_name.place(relwidth = 0.65, relheight =0.12, relx = 0.02, rely = 0.18)\n Label(add_box, text = \"Message\", font = (\"bold\", 15), anchor = W).place(\n relwidth = 0.5, relheight =0.1, relx = 0.02, rely = 0.32\n )\n message = Entry(add_box, font = (\"bold\", 15))\n message.place(relwidth = 0.65, relheight = 0.45, relx = 0.02, rely = 0.42)\n\n add_btn = Button(add_box, text = \"Add\", font = (\"bold\", 15), command = lambda: local_access.adding_to_database(event_name.get(), message.get()))\n add_btn.place(relwidth = 0.2, relheight =0.1, relx = 0.73, rely = 0.77)\n\n #FUNCTION TO DISPLAY THE SEARCH ENTRY AND SEARCH BUTTON\n\n def search_menu(self, the_box, btn_text ):\n Label(the_box, text = \"Event Number\", font = (\"bold\", 15), anchor = W).place(\n relwidth = 0.5, relheight =0.1, relx = 0.02, rely = 0.02\n )\n event_no = Entry(the_box, font = (\"bold\", 15))\n event_no.place(relwidth = 0.65, relheight =0.12, relx = 0.02, rely = 0.18)\n \"\"\"\n search button will call function(1st func) to call a function(2nd func) in storage file to check if the event is available or not then \n the function(1st func) will call the search result function(3rd) to display the appropriate information based on the results.\n \"\"\"\n search_btn = Button(the_box, text = \"Search\", font = (\"bold\", 15))\n search_btn.place(relwidth = 0.2, relheight =0.12, relx = 0.7, rely = 0.18)\n\n ## Function to display final output based on the result of the search.\n def search_result(self, the_box, results):\n # Label(the_box, text = \"Event Number\", font = (\"bold\", 15), anchor = W).place(\n # relwidth = 0.5, relheight =0.1, relx = 0.02, rely = 0.02\n # )\n # event_no = Entry(the_box, font = (\"bold\", 15))\n # event_no.place(relwidth = 0.65, relheight =0.12, relx = 0.02, rely = 0.18)\n # search_btn = Button(the_box, text = \"Search\", font = (\"bold\", 15))\n # search_btn.place(relwidth = 0.2, relheight =0.12, relx = 0.7, rely = 0.18)\n\n # is_found = True\n # if is_found:\n # event_name = Entry(the_box, font = (\"bold\", 15))\n # event_name.place(relwidth = 0.65, relheight =0.12, relx = 0.02, rely = 0.18)\n # Label(the_box, text = \"Message\", font = (\"bold\", 15), anchor = W).place(\n # relwidth = 0.5, relheight =0.1, relx = 0.02, rely = 0.32\n # )\n # message = Entry(the_box, font = (\"bold\", 15))\n # message.place(relwidth = 0.65, relheight = 0.45, relx = 0.02, rely = 0.42)\n # action_btn = Button(the_box, text = btn_text, font = (\"bold\", 15))\n # action_btn.place(relwidth = 0.2, relheight =0.12, relx = 0.7, rely = 0.18)\n # else:\n # Label(the_box, text = \"Event Not Found!!\", font = (\"bold\", 15)).place(\n # relwidth = 0.5, relheight =0.45, relx = 0.25, rely = 0.42\n # )\n pass\n\n def remove_item_menu(self, main_box):\n remove_box = LabelFrame(main_box, text = \"Remove Task\", font = (\"bold\", 15))\n remove_box.place(relwidth = 0.95, relheight =0.95, relx = 0.02, rely = 0.02)\n #self.search_result(remove_box, \"Remove\")\n\n def update_item_menu(self, main_box):\n update_box = LabelFrame(main_box, text = \"Update Task\", font = (\"bold\", 15))\n update_box.place(relwidth = 0.95, relheight =0.95, relx = 0.02, rely = 0.02)\n #self.search_result(update_box, \"Update\")\n\n\n def close_application(self):\n response = messagebox.askokcancel(\"Close Application\", \"Do you want to exit!\")\n if response == True:\n self.app.quit()\n\nx = homePage(app, colors)\nx.header_menu()\nx.side_menu()\nx.main_section()\n\napp.mainloop()\n","sub_path":"TASKMANAGER.py","file_name":"TASKMANAGER.py","file_ext":"py","file_size_in_byte":8092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"468866584","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport urllib.parse\nimport urllib.request\nimport codecs\nimport re\n#import json\n#import os\ndef get_response(url):\n response = urllib.request.urlopen(url)\n main_page = response.read().decode('utf-8')\n return main_page\n\ndef main():\n url = 'http://www.poi86.com/poi/amap.html'\n main_page = get_response(url)\n # 按地区划分,主体 xpath: /html/body/div[2]/div/div[2]/ul\n pt_main = re.compile(r'
  • (.+?)
  • ', re.S)\n main_content = pt_main.findall(main_page)\n print(len(main_content))\n print(main_content[0])\n\n result_data = {} # save city names\n\n for prov in main_content:\n p_name = re.search(r'(.+?)', prov).group(1)\n result_data[p_name] = {}\n # 市/州/区\n pt_city = re.compile(r'
    (.+?)
    ', re.S)\n cities = pt_city.findall(prov)\n for city in cities:\n city_infos = re.findall(r'', city)\n link_container = {}\n print(len(city_infos))\n for c_info in city_infos:\n sec = c_info.split('\"')\n city_name = sec[3].split('POI')[0]\n city_link = sec[1]\n #link_container.update({city_name: city_link})\n result_data[p_name].update({city_name: [city_name]})\n print(city_name)\n \n sub_main_page = get_response('http://www.poi86.com'+city_link)\n sub_main_content = pt_main.findall(sub_main_page)\n for county in sub_main_content:\n county_name = re.search(r'(.+?)', county).group(1)\n result_data[p_name][city_name].append(county_name)\n print(county_name)\n \n print(result_data)\n with codecs.open('city_names_from_POI.txt', 'w', 'utf-8') as f:\n for prop, city in result_data.items():\n for c, c_names in city.items():\n f.write(\"%s\\t%s\"%(c,' \\t\\n'.join(c_names)))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"weathercn/get_city_names.py","file_name":"get_city_names.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"256989600","text":"#!/usr/bin/python\r\nimport os\r\nfrom logging import StreamHandler, FileHandler, Formatter, getLogger, INFO, DEBUG\r\nlogPath = '../.'\r\nlogPath = logPath + os.sep\r\n\r\n\r\nclass ServiceLog(object):\r\n def __init__(self, log_name):\r\n self.log_name = log_name\r\n\r\n def _logger_die(self, logger, msg):\r\n logger.error(msg)\r\n raise AssertionError(msg)\r\n\r\n def ret_log_file_path(self):\r\n return logPath + self.log_name\r\n\r\n def logger_writer(self, date):\r\n formatter = Formatter('[%(asctime)s][%(filename)s:%(lineno)s][%(levelname)s][%(thread)d] %(message)s')\r\n DataLog = getLogger(self.log_name)\r\n DataLog.handlers = []\r\n DataLog.setLevel(DEBUG)\r\n DataLog.propagate = False\r\n\r\n console = StreamHandler()\r\n console.setFormatter(formatter)\r\n console.setLevel(INFO)\r\n DataLog.addHandler(console)\r\n\r\n logfiledebug = FileHandler(filename=logPath + self.log_name + '.' + date + '.debug.log', mode='a')\r\n logfiledebug.setFormatter(formatter)\r\n logfiledebug.setLevel(DEBUG)\r\n DataLog.addHandler(logfiledebug)\r\n\r\n logfileinfo = FileHandler(filename=logPath + self.log_name + '.' + date + '.info.log', mode='a')\r\n logfileinfo.setFormatter(formatter)\r\n logfileinfo.setLevel(INFO)\r\n DataLog.addHandler(logfileinfo)\r\n DataLog.die = lambda msg: self._logger_die(DataLog, msg)\r\n return DataLog\r\n","sub_path":"server/service_log.py","file_name":"service_log.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"559775165","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport argparse\n\n__all__ = [\"get_predict_args_parser\"]\n\nif os.name == \"nt\":\n bert_dir = \"\"\n root_dir = \"\"\nelse:\n bert_dir = \"/home1/liushaoweihua/pretrained_lm/bert_chinese/\"\n root_dir = \"/home1/jupyterlab/Keras-Bert-Ner/\"\n \ndef get_predict_args_parser():\n \n parser = argparse.ArgumentParser()\n \n data_group = parser.add_argument_group(\"Data File Paths\", \"Config the train/dev/test file paths\")\n data_group.add_argument(\"-test_data\", type=str, default=os.path.join(root_dir, \"data\", \"test.txt\"), required=True, help=\"(REQUIRED) Test data path\")\n data_group.add_argument(\"-max_len\", type=int, default=64, help=\"(OPTIONAL) Max sequence length. Default is 64\")\n \n model_group = parser.add_argument_group(\"Model Paths\", \"Config the model paths\")\n model_group.add_argument(\"-model_path\", type=str, default=os.path.join(root_dir, \"models\"), required=True, help=\"(REQUIRED) Model path\")\n model_group.add_argument(\"-model_name\", type=str, default=\"BERT-BILSTM-CRF.h5\", required=True, help=\"(REQUIRED) Model name\")\n \n output_group = parser.add_argument_group(\"Output Paths\", \"Config the output paths\")\n output_group.add_argument(\"-output_path\", type=str, default=os.path.join(root_dir, \"test_outputs\"), help=\"(OPTIONAL) Output file paths\")\n \n bert_group = parser.add_argument_group(\"BERT File paths\", \"Config the vocab of a pretrained or fine-tuned BERT model\")\n bert_group.add_argument(\"-bert_vocab\", type=str, default=os.path.join(bert_dir, \"vocab.txt\"), required=True, help=\"(REQUIRED) vocab.txt\")\n \n action_group = parser.add_argument_group(\"Action Configs\", \"Config the actions during running\")\n action_group.add_argument(\"-device_map\", type=str, default=\"cpu\", help=\"(OPTIONAL) Use CPU/GPU to train. If use CPU, then 'cpu'. If use GPU, then assign the devices, such as '0'. Default is 'cpu'\")\n \n return parser.parse_args()","sub_path":"keras_bert_ner/utils/predict_helper.py","file_name":"predict_helper.py","file_ext":"py","file_size_in_byte":2042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"368043734","text":"#!/usr/bin/env python\n\nimport requests\nimport sys\nfrom pymongo import MongoClient\nfrom bson import ObjectId\n\nurl = 'http://127.0.0.1:5000/transacao/'\n\nclient = MongoClient('localhost', 27017)\n\ndb = client['controle-financeiro']\ncontas = db['conta']\n\nid_conta_debitada = sys.argv[1]\nid_conta_creditada = sys.argv[2]\nvalor = sys.argv[3]\ndescricao = sys.argv[4]\n\nconta_debitada = contas.find_one({\"_id\": ObjectId(id_conta_debitada)})\nconta_creditada = contas.find_one({\"_id\": ObjectId(id_conta_creditada)})\n\nheaders = {\n 'conta_debitada': conta_debitada[\"_id\"],\n 'conta_creditada': conta_creditada[\"_id\"],\n 'valor': valor,\n 'descricao': descricao\n}\n\nr = requests.post(url, data=headers)\nprint(r.text)\n","sub_path":"scripts/post_transacao.py","file_name":"post_transacao.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"382477539","text":"# Simulation\n#\n# This class reads the ROOT objects and creates its own data collection, which has the analytic boolean and numeric variables which we need. Though\n# it does not manage the creation or editing of histograms.\n#\n# Last Modified: 2013-01-22 22:37\n#\n\n####################################################################################################\n# DO NOT EDIT - PROTOTYPE CODE\n####################################################################################################\n\nimport sys\nimport math\n\nimport ROOT\nimport RooTrackerTools\n\nimport ParticleCodes\nimport EventCodes\n\nimport Geometry\n\n\nclass EventCollection:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.Events = []\n\t\t\n\tdef InterpretEvent(This, TrueGRTVertices, ReconstructedPIDs, TrueTrajectories, ReconstructedTECTs, SelectionReferences):\n\t\t\n\t\tEvent1 = Event()\n\t\t\n\t\tEvent1.TrueEvent.InterpretGRTVertices(TrueGRTVertices)\n\t\tEvent1.ReconstructedEvent.InterpretPIDs(ReconstructedPIDs, TrueTrajectories)\n\t\tEvent1.ReconstructedEvent.InterpretTECTs(ReconstructedTECTs)\n\t\t\n\t\tfor SelectionReference in SelectionReferences:\n\t\t\tEvent1.SelectionResponses[SelectionReference] = False\n\t\t\n\t\tThis.Events.append(Event1)\n\t\t\n\t\treturn Event1\n\nclass Event:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.TrueEvent = TrueEvent()\n\t\tThis.ReconstructedEvent = ReconstructedEvent()\n\t\n\t\tThis.SelectionResponses = {}\n\t\n\nclass TrueEvent:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.TrueVertices = []\n\t\tThis.NumberOfTrueVertices = 0\n\t\t\n\t\tThis.EventCodeDeconstructor = EventCodes.Deconstructor()\n\t\t\n\t\tThis.ContainsDelta1HadronToProtonPhotonInteraction = False\n\t\tThis.ContainsDelta1HadronToProtonPi0MesonInteraction = False\n\t\t\n\t\tThis.NumberOfDelta1HadronToProtonPhotonInteractions = 0\n\t\tThis.NumberOfDelta1HadronToProtonPhotonCCInteractions = 0\n\t\tThis.NumberOfDelta1HadronToProtonPhotonNCInteractions = 0\n\t\tThis.NumberOfDelta1HadronToProtonPi0MesonInteractions = 0\n\t\t\n\t\tThis.RTTools = RooTrackerTools.RooTrackerTools()\n\t\t\n\tdef InterpretGRTVertices(This, GRTVertices):\n\t\t\t\t\n\t\tfor GRTVertex1 in GRTVertices:\n\t\t\t\n\t\t\tTrueVertex1 = TrueVertex()\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tThis.EventCodeDeconstructor.ReadCode(GRTVertex1.EvtCode)\n\t\t\t\n\t\t\tTrueVertex1.CurrentCode = This.EventCodeDeconstructor.Elements[\"Current Code\"].Content\n\t\t\tTrueVertex1.ProcessCode = This.EventCodeDeconstructor.Elements[\"Process Code\"].Content\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\t\t\n\t\t\tfor GRTParticle1 in This.RTTools.getParticles(GRTVertex1): # Look through each particle in the vertex.\n\t\t\t\n\t\t\t\tTrueParticle1 = TrueParticle()\n\t\t\t\t\n\t\t\t\tTrueParticle1.PDG = GRTParticle1.pdg\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif ((GRTParticle1.pdg == ParticleCodes.MuNeutrino) and (GRTParticle1.status == 0)): # See whether there is an incident mu neutrino.\n\t\t\t\t\tTrueVertex1.ContainsIncidentMuonNeutrino = True\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\n\t\t\t\tif (GRTParticle1.pdg == ParticleCodes.Delta1Baryon): # See whether there is a Delta 1 Hadron\n\n\t\t\t\t\tFirstProduct = GRTParticle1.first_daughter\n\t\t\t\t\tLastProduct = GRTParticle1.last_daughter\n\t\t\t\t\tNumberOfProducts = LastProduct - FirstProduct + 1\n\n\t\t\t\t\tif (NumberOfProducts == 2):\t# The Delta 1 Hadron must deteriorate to two particles.\n\t\t\t\t\t\t\n\t\t\t\t\t\tfor GRTParticle2 in This.RTTools.getParticles(GRTVertex1):\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\tif (GRTParticle2.i >= FirstProduct and GRTParticle2.i <= LastProduct):\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif (GRTParticle2.pdg == ParticleCodes.Proton):\n\t\t\t\t\t\t\t\t\tTrueVertex1.ContainsProtonFromDelta1Hadron = True\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif (GRTParticle2.pdg == ParticleCodes.Photon):\n\t\t\t\t\t\t\t\t\tTrueVertex1.ContainsPhotonFromDelta1Hadron = True\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t\tif (GRTParticle2.pdg == ParticleCodes.Pi0Meson):\n\t\t\t\t\t\t\t\t\tTrueVertex1.ContainsPi0MesonFromDelta1Hadron = True\n\t\t\t\t\n\t\t\t\tTrueVertex1.TrueParticles.append(TrueParticle1)\n\t\t\t\t\n\t\t\t\t\n\t\t\tTrueVertex1.IsDelta1HadronToProtonPhotonInteraction\t= (TrueVertex1.ContainsIncidentMuonNeutrino and TrueVertex1.ContainsProtonFromDelta1Hadron and TrueVertex1.ContainsPhotonFromDelta1Hadron and TrueVertex1.CurrentCode == \"RP\")\n\t\t\tTrueVertex1.IsDelta1HadronToProtonPi0MesonInteraction = (TrueVertex1.ContainsIncidentMuonNeutrino and TrueVertex1.ContainsProtonFromDelta1Hadron and TrueVertex1.ContainsPi0MesonFromDelta1Hadron and TrueVertex1.CurrentCode == \"RP\")\t\t\n\t\t\t\n\t\t\tif (TrueVertex1.IsDelta1HadronToProtonPhotonInteraction):\n\t\t\t\t\n\t\t\t\tThis.NumberOfDelta1HadronToProtonPhotonInteractions += 1\n\t\t\t\t\n\t\t\t\tif (TrueVertex1.CurrentCode == \"CC\"):\n\t\t\t\t\tThis.NumberOfDelta1HadronToProtonPhotonCCInteractions += 1\n\t\t\t\tif (TrueVertex1.CurrentCode == \"NC\"):\n\t\t\t\t\tThis.NumberOfDelta1HadronToProtonPhotonCCInteractions += 1\n\t\t\t\t\t\n\t\t\tif (TrueVertex1.IsDelta1HadronToProtonPi0MesonInteraction):\n\t\t\t\tThis.NumberOfDelta1HadronToProtonPi0MesonInteractions += 1\n\t\t\t\n\t\t\tThis.TrueVertices.append(TrueVertex1)\n\t\t\n\t\tThis.NumberOfTrueVertices = len(This.TrueVertices)\t\t\t\t\t\t\t\t\t\t\t\n\n\nclass TrueVertex:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.EventCode = \"\"\n\t\t\n\t\tThis.CurrentCode = \"\"\n\t\tThis.ProcessCode = \"\"\n\t\t\n\t\tThis.TrueParticles = []\n\t\t\n\t\tThis.ContainsIncidentMuonNeutrino = False\n\t\tThis.ContainsProtonFromDelta1Hadron = False\n\t\tThis.ContainsPhotonFromDelta1Hadron = False\n\t\tThis.ContainsPi0MesonFromDelta1Hadron = False\n\t\t\n\t\tThis.IsDelta1HadronToProtonPhotonInteraction = False\n\t\tThis.IsDelta1HadronToProtonPi0MesonInteraction = False\n\n\nclass TrueParticle:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.PDGCode = 0\n\t\tThis.Position = Geometry.FourVector()\n\n\nclass ReconstructedEvent:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.ReconstructedObjects = []\n\t\tThis.ReconstructedPaths = []\n\t\tThis.ReconstructedTorrents = []\n\t\tThis.ReconstructedParticles = []\n\t\t\n\t\tThis.Protons = []\n\t\tThis.Photons = []\n\t\t\n\t\tThis.PullVariableLimits = {}\n\n\t\tThis.PullVariableLimits[ParticleCodes.Electron] = 0.5\n\t\tThis.PullVariableLimits[ParticleCodes.Kaon1] = 0.5\n\t\tThis.PullVariableLimits[ParticleCodes.MuLepton] = 0.5\n\t\tThis.PullVariableLimits[ParticleCodes.Pi1Meson] = 0.5\n\t\tThis.PullVariableLimits[ParticleCodes.Proton] = 0.5\n\t\t\n\t\tThis.NumberOfProtonPaths = 0\n\t\tThis.NumberOfCorrectlyReconstructedProtonPaths = 0\n\t\tThis.NumberOfMuLeptonPaths = 0\n\t\tThis.NumberOfElectronPaths = 0\n\t\tThis.NumberOfAntielectronPaths = 0\n\t\t\t\t\n\tdef InterpretPIDs(This, PIDs, Truth_Trajectories):\n\t\t\n\t\tfor PID in PIDs:\n\t\t\t\n\t\t\tReconstructedObject1 = ReconstructedObject()\n\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\tfor TPCTrack1 in PID.TPC:\n\t\t\t\tReconstructedObject1.NumberOfPoints = TPCTrack1.NNodes\n\t\t\t\t\n\t\t\t\tif (TPCTrack1.NNodes > 18):\n\t\t\t\t\tReconstructedObject1.ContainsCorrectNumberOfPoints = True\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\tif (len(PID.ParticleIds) > 0):\n\t\t\t\t\n\t\t\t\tReconstructedObject1.EnergyMomentum = PID.FrontMomentum\n\t\t\t\tReconstructedObject1.EnergyMomentum_Initial.Y = PID.FrontMomentum\n\t\t\t\tReconstructedObject1.EnergyMomentum_Initial.Z = PID.FrontMomentum\n\t\t\t\tReconstructedObject1.EnergyMomentum_Initial.T = PID.FrontMomentum\n\t\t\t\t\n\t\t\t\tReconstructedObject1.Position_Initial.X = PID.FrontPosition.X()\n\t\t\t\tReconstructedObject1.Position_Initial.Y = PID.FrontPosition.Y()\n\t\t\t\tReconstructedObject1.Position_Initial.Z = PID.FrontPosition.Z()\n\t\t\t\tReconstructedObject1.Position_Initial.T = PID.FrontPosition.T()\n\t\t\t\t\n\t\t\t\tReconstructedObject1.Direction_Initial.X = PID.FrontDirection.X()\n\t\t\t\tReconstructedObject1.Direction_Initial.Y = PID.FrontDirection.Y()\n\t\t\t\tReconstructedObject1.Direction_Initial.Z = PID.FrontDirection.Z()\n\t\t\t\t\t\n\t\t\t\tReconstructedObject1.DetectorCode = str(PID.Detectors)\n\n\t\t\t\tBestTPCTrackIndex = 0\n\t\t\t\tMaximumNumberOfPoints = 0\n\t\t\t\t\t\t\n\t\t\t\tfor i, TPCTrack1 in enumerate(PID.TPC):\n\t\t\t\t\tif (TPCTrack1.NNodes > MaximumNumberOfPoints):\n\t\t\t\t\t\tBestTPCTrackIndex = i\n\t\t\t\t\t\n\t\t\t\tfor j , TPCTrack1 in enumerate(PID.TPC):\n\t\t\t\t\tif (j == i):\n\t\t\t\t\t\tReconstructedObject1.PullVariables[ParticleCodes.Electron] = TPCTrack1.PullEle\n\t\t\t\t\t\tReconstructedObject1.PullVariables[ParticleCodes.Kaon1] = TPCTrack1.PullKaon\n\t\t\t\t\t\tReconstructedObject1.PullVariables[ParticleCodes.MuLepton] = TPCTrack1.PullMuon\n\t\t\t\t\t\tReconstructedObject1.PullVariables[ParticleCodes.Pi1Meson] = TPCTrack1.PullPion\n\t\t\t\t\t\tReconstructedObject1.PullVariables[ParticleCodes.Proton] = TPCTrack1.PullProton\n\n\t\t\t\tLowestPull = 100\n\t\t\t\t\n\t\t\t\tfor ParticleCode, ParticlePull in ReconstructedObject1.PullVariables.iteritems():\n\t\t\t\t\t\n\t\t\t\t\tif (math.fabs(ParticlePull) < math.fabs(LowestPull) and math.fabs(ParticlePull) < math.fabs(This.PullVariableLimits[ParticleCode])):\n\t\t\t\t\t\t\n\t\t\t\t\t\tLowestPull = math.fabs(ParticlePull)\n\t\t\t\t\t\t\n\t\t\t\t\t\tReconstructedObject1.ParticleCode = ParticleCode\n\n\n\t\t\t\tfor Trajectory1 in Truth_Trajectories: # Compare to truth trajectories.\n\t\t\t\t\tif (Trajectory1.ID == PID.TrueParticle.ID):\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tReconstructedObject1.TrueParticleCode = Trajectory1.PDG\n\t\t\t\t\t\t\t\t\n\t\t\t\t\t\tReconstructedObject1.TrueEnergyMomentum_Initial.X = Trajectory1.InitMomentum.X()\n\t\t\t\t\t\tReconstructedObject1.TrueEnergyMomentum_Initial.Y = Trajectory1.InitMomentum.Y()\n\t\t\t\t\t\tReconstructedObject1.TrueEnergyMomentum_Initial.Z = Trajectory1.InitMomentum.Z()\n\t\t\t\t\t\tReconstructedObject1.TrueEnergyMomentum_Initial.T = Trajectory1.InitMomentum.E()\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\n\t\t\tThis.ReconstructedObjects.append(ReconstructedObject1)\n\t\t\n\t\t\n\t\tfor ReconstructedObject2 in This.ReconstructedObjects:\n\t\t\t\n\t\t\tif (ReconstructedObject2.ParticleCode == ParticleCodes.Proton):\t\t\t\t\n\t\t\t\tThis.NumberOfProtonPaths += 1\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tif (ReconstructedObject2.IsCorrectlyReconstructed()):\n\t\t\t\t\tThis.NumberOfCorrectlyReconstructedProtonPaths += 1\n\t\t\t\t\n\t\t\t\tProton1 = ReconstructedParticle()\n\t\t\n\t\t\t\tProton1.EnergyMomentum_Initial = ReconstructedObject2.EnergyMomentum_Initial\n\t\t\t\tProton1.Position_Initial = ReconstructedObject2.Position_Initial\n\t\t\t\t\t\t\t\t\n\t\t\t\tThis.Protons.append(Proton1)\n\t\t\t\t\t\t\t\n\t\t\tif (ReconstructedObject2.ParticleCode == ParticleCodes.MuLepton):\t\t\t\t\n\t\t\t\tThis.NumberOfMuLeptonPaths += 1\n\t\t\t\t\n\t\t\tif (ReconstructedObject2.ParticleCode == ParticleCodes.Electron):\t\t\t\t\n\t\t\t\tThis.NumberOfElectronPaths += 1\n\t\t\t\t\n\t\t\tif (ReconstructedObject2.ParticleCode == ParticleCodes.AntiElectron):\t\t\t\t\n\t\t\t\tThis.NumberOfAntielectronPaths += 1\n\t\t\t\t\n\t\t\t\t\n\tdef InterpretTECTs(This, TECTs):\n\t\t\t\t\n\t\tfor TECT1 in TECTs:\n\t\t\t\n\t\t\tReconstructedTorrent1 = ReconstructedTorrent()\n\t\t\t\t\t\t\t\t\t\n\t\t\tTECT_Energy = TECT1.EMEnergyFit_Result\n\t\t\t\t\t\t\t\t\t\t\n\t\t\tif (TECT1.IsShowerLike): # Determine whether the TEC reconstruction is track-like or torrent-like, it shouldn't matter which for the photon, but the directions are different.\n\t\t\t\n\t\t\t\tTECT_UnitDirection = Geometry.ThreeVector(TECT1.Shower.Direction.X(), TECT1.Shower.Direction.Y(), TECT1.Shower.Direction.Z())\n\t\t\t\t\n\t\t\t\tReconstructedTorrent1.Position_Initial.T = TECT1.Shower.Position.T()\n\t\t\t\tReconstructedTorrent1.Position_Initial.X = TECT1.Shower.Position.X()\n\t\t\t\tReconstructedTorrent1.Position_Initial.Y = TECT1.Shower.Position.Y()\n\t\t\t\tReconstructedTorrent1.Position_Initial.Z = TECT1.Shower.Position.Z()\n\t\t\t\t\n\t\t\telif (TECT1.IsTrackLike):\n\t\t\t\t\n\t\t\t\tTECT_UnitDirection = Geometry.ThreeVector(TECT1.Track.Direction.X(), TECT1.Track.Direction.Y(), TECT1.Track.Direction.Z())\n\t\t\t\t\n\t\t\t\tReconstructedTorrent1.Position_Initial.T = TECT1.Track.Position.T()\n\t\t\t\tReconstructedTorrent1.Position_Initial.X = TECT1.Track.Position.X()\n\t\t\t\tReconstructedTorrent1.Position_Initial.Y = TECT1.Track.Position.Y()\n\t\t\t\tReconstructedTorrent1.Position_Initial.Z = TECT1.Track.Position.Z()\n\t\t\t\t\t\t\n\t\t\tif (TECT_UnitDirection.Modulus() < 1.1):#For some reason there sometimes are occurences where the magnitude is far above 1. This if filters them out. We should probably look into this further\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tReconstructedTorrent1.EnergyMomentum_Initial.T = TECT_Energy\n\t\t\t\tReconstructedTorrent1.EnergyMomentum_Initial.X = TECT_UnitDirection.X * TECT_Energy\n\t\t\t\tReconstructedTorrent1.EnergyMomentum_Initial.Y = TECT_UnitDirection.Y * TECT_Energy\n\t\t\t\tReconstructedTorrent1.EnergyMomentum_Initial.Z = TECT_UnitDirection.Z * TECT_Energy\n\t\t\t\t\t\t\t\t\t\t\t\n\t\t\t\tReconstructedTorrent1.Direction_Initial.X = TECT_UnitDirection.X\n\t\t\t\tReconstructedTorrent1.Direction_Initial.Y = TECT_UnitDirection.Y\n\t\t\t\tReconstructedTorrent1.Direction_Initial.Z = TECT_UnitDirection.Z\n\t\t\t\t\n\t\t\t\tThis.ReconstructedTorrents.append(ReconstructedTorrent1)\t\t\n\t\t\n\nclass ReconstructedObject:\n\t\n\tdef __init__(This):\n\t\t\n\t\tThis.ParticleCode = 0\n\t\t\n\t\tThis.NumberOfPoints = 0\n\t\tThis.ContainsCorrectNumberOfPoints = False\n\t\t\n\t\tThis.DetectorCode = \"\"\n\t\t\n\t\tThis.EnergyMomentum = 0\n\t\t\n\t\tThis.EnergyMomentum_Initial = Geometry.FourVector()\n\t\tThis.EnergyMomentum_Final = Geometry.FourVector()\n\t\t\n\t\tThis.Position_Initial = Geometry.FourVector()\n\t\tThis.Position_Final = Geometry.FourVector()\n\t\t\n\t\tThis.Direction_Initial = Geometry.ThreeVector()\n\t\tThis.Direction_Final = Geometry.ThreeVector()\n\t\t\n\t\tThis.PullVariables = {}\n\t\t\n\t\t\n\t\tThis.TrueParticleCode = 0\n\t\t\n\t\tThis.TrueEnergyMomentum_Initial = Geometry.FourVector()\n\t\tThis.TrueEnergyMomentum_Final = Geometry.FourVector()\n\t\t\n\tdef IsCorrectlyReconstructed(This):\n\t\t\n\t\treturn (This.ParticleCode == This.TrueParticleCode)\n\t\t\n\tdef IsWithinVolume(This, ThreeDimensionalObject1):\n\t\t\n\t\treturn ThreeDimensionalObject1.Contains(Position_Initial)\n\t\t\nclass ReconstructedPath(ReconstructedObject):\n\t\n\tdef __init__(This):\n\t\tReconstructedObject.__init__(This)\n\t\t\n\t\tThis.ObjectType = \"Path\"\n\nclass ReconstructedTorrent(ReconstructedObject):\n\t\n\tdef __init__(This):\n\t\tReconstructedObject.__init__(This)\n\t\t\n\t\tThis.ObjectType = \"Torrent\"\n\nclass ReconstructedParticle(ReconstructedObject):\n\t\n\tdef __init__(This):\n\t\tReconstructedObject.__init__(This)\n\t\t\n\t\tThis.ObjectType = \"Particle\"\n","sub_path":"Code Archive/Analysis_57_2013-02-06/Analysis_57_2013-02-06/Simulation.py","file_name":"Simulation.py","file_ext":"py","file_size_in_byte":13245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"588926056","text":"# !/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport roslaunch\nimport rospy\nimport sys\nimport signal\nimport tty, termios\n\n\ninit_x = 0.8\ninit_y = 0.8\ninit_a = 0.8\nlaunch_nav = None\nnav_running = None\n\ndef quit(signum,frame):\n\n print('stop serialTest.py')\n sys.exit()\n\ndef setLaunchFile():\n\n with open('/home/sf/catkin_ws/src/arbotix_ros/mbot_navigation/launch/test.launch', 'r') as f:\n newText = f.read().replace('px__', str(init_x))\n newText = newText.replace('py__', str(init_y))\n newText = newText.replace('pa__', str(init_a))\n rospy.sleep(0.5)\n with open('/home/sf/catkin_ws/src/arbotix_ros/mbot_navigation/launch/amcl.launch', 'w') as f:\n f.write(newText)\n\ndef launch_nav_start():\n global launch_nav,nav_running\n if not nav_running:\n uuid_nav = roslaunch.rlutil.get_or_generate_uuid(None, False)\n roslaunch.configure_logging(uuid_nav)\n launch_nav = roslaunch.parent.ROSLaunchParent(uuid_nav, [\"/home/sf/catkin_ws/src/arbotix_ros/mbot_navigation/launch/nav_cloister_demo.launch\"])\n launch_nav.start()\n nav_running = True\n\ndef launch_nav_shutdown():\n global launch_nav, nav_running\n if nav_running:\n # save_pose()\n launch_nav.shutdown()\n nav_running = False\n\nif __name__ == '__main__':\n\n signal.signal(signal.SIGINT, quit)\n signal.signal(signal.SIGTERM, quit)\n\n while True:\n signal.signal(signal.SIGINT, quit)\n signal.signal(signal.SIGTERM, quit)\n launch_nav_start()\n rospy.sleep(10)\n print('change launch File finished')\n # print('waiting for Q')\n #\n # fd = sys.stdin.fileno()\n # old_settings = termios.tcgetattr(fd)\n # try:\n # tty.setraw(fd)\n # ch = sys.stdin.read(1)\n # finally:\n # termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n\n\n # if ch == 'q':\n # launch_nav_shutdown()\n setLaunchFile()\n rospy.sleep(0.5)\n print('try to restart nav')\n launch_nav.start()\n print('restart')\n rospy.sleep(1000)\n break","sub_path":"Test/testEditLaunch.py","file_name":"testEditLaunch.py","file_ext":"py","file_size_in_byte":2107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"1407642","text":"#Weighted Uniform Strings\r\nimport string\r\nfrom datetime import datetime\r\n\r\n\r\ndef pesar(s):\r\n dic = {}\r\n conj = set(s)\r\n for i in conj:\r\n for letter in range(len(list(string.ascii_lowercase))):\r\n if list(string.ascii_lowercase)[letter] == i:\r\n dic[list(string.ascii_lowercase)[letter]] = letter + 1\r\n return dic\r\n\r\n\r\ndef weightedUniformStrings(s, queries):\r\n dic = pesar(s)\r\n subsIn = [[] for i in range(len(queries))]\r\n \r\n for char, peso in dic.items():\r\n for q in range(len(queries)):\r\n if queries[q] % peso == 0:\r\n aux = char * (queries[q] // peso)\r\n subsIn[q].append(aux in s) \r\n res = [any(i) for i in subsIn]\r\n for i in range(len(res)):\r\n if res[i] == True:\r\n res[i] = 'Yes'\r\n else:\r\n res[i] = 'No'\r\n return res \r\n\r\nif __name__ == '__main__':\r\n inicio = datetime.now()\r\n\r\n s = input()\r\n\r\n queries_count = int(input())\r\n\r\n queries = []\r\n\r\n for _ in range(queries_count):\r\n queries_item = int(input())\r\n queries.append(queries_item)\r\n\r\n result = weightedUniformStrings(s, queries)\r\n print(result)\r\n print(datetime.now() - inicio)\r\n","sub_path":"HackerRank/Tentativa weighted.py","file_name":"Tentativa weighted.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"332650668","text":"\"\"\"Oscillator network ODE\"\"\"\n\nimport numpy as np\n\nfrom scipy.integrate import ode\nfrom robot_parameters import RobotParameters\n\n\ndef network_ode(_time, state, robot_parameters):\n \"\"\"Network_ODE\n\n Parameters\n ----------\n _time: \n Time\n state: \n ODE states at time _time\n robot_parameters: \n Instance of RobotParameters\n\n Return\n ------\n :\n Returns derivative of state (phases and amplitudes)\n \"\"\"\n n_oscillators = robot_parameters.n_oscillators\n \n phases = state[:n_oscillators]\n amplitudes = state[n_oscillators:2*n_oscillators]\n \n temp_phases = np.zeros_like(phases)\n temp_amplitudes = np.zeros_like(amplitudes)\n \n for i in range(0,n_oscillators):\n for j in range(0,n_oscillators):\n temp_phases[i] += amplitudes[j]*robot_parameters.coupling_weights[i,j]*np.sin(phases[j]-phases[i]-robot_parameters.phase_bias[i,j]) \n \n temp_phases[i] += 2*np.pi*robot_parameters.freqs[i] \n temp_amplitudes[i] = robot_parameters.rates[i]*(robot_parameters.nominal_amplitudes[i]-amplitudes[i])\n \n #print(np.concatenate([temp_phases, temp_amplitudes]))\n \n return np.concatenate([temp_phases, temp_amplitudes])\n \n\n\ndef motor_output(phases, amplitudes, iteration=None):\n \"\"\"Motor output.\n\n Parameters\n ----------\n phases: \n Phases of the oscillator\n amplitudes: \n Amplitudes of the oscillator\n\n Returns\n -------\n : \n Motor outputs for joint in the system.\n\n \"\"\"\n \n body_joints = 10\n nb_limbs = 4\n motor = np.zeros_like(phases)[:body_joints+nb_limbs]\n \n for i in range(0,body_joints):\n motor[i] = amplitudes[i]*(1+np.cos(phases[i]))-amplitudes[i+body_joints]*(1+np.cos(phases[i+body_joints]))\n \n for i in range(body_joints,len(motor)):\n if amplitudes[i+body_joints] != 0.0:\n motor[i] = phases[i+body_joints]\n \n return motor\n\nclass RobotState(np.ndarray):\n \"\"\"Robot state\"\"\"\n\n def __init__(self, *_0, **_1):\n super(RobotState, self).__init__()\n self[:] = 0.0\n\n @classmethod\n def salamandra_robotica_2(cls, n_iterations):\n \"\"\"State of Salamandra robotica 2\"\"\"\n shape = (n_iterations, 2*24)\n return cls(\n shape,\n dtype=np.float64,\n buffer=np.zeros(shape)\n )\n\n def phases(self, iteration=None):\n \"\"\"Oscillator phases\"\"\"\n return self[iteration, :24] if iteration is not None else self[:, :24]\n\n def set_phases(self, iteration, value):\n \"\"\"Set phases\"\"\"\n self[iteration, :24] = value\n\n def set_phases_left(self, iteration, value):\n \"\"\"Set body phases on left side\"\"\"\n self[iteration, :10] = value\n\n def set_phases_right(self, iteration, value):\n \"\"\"Set body phases on right side\"\"\"\n self[iteration, 10:20] = value\n\n def set_phases_legs(self, iteration, value):\n \"\"\"Set leg phases\"\"\"\n self[iteration, 20:24] = value\n\n def amplitudes(self, iteration=None):\n \"\"\"Oscillator amplitudes\"\"\"\n return self[iteration, 24:] if iteration is not None else self[:, 24:]\n\n def set_amplitudes(self, iteration, value):\n \"\"\"Set amplitudes\"\"\"\n self[iteration, 24:] = value\n\n\nclass SalamandraNetwork:\n \"\"\"Salamandra oscillator network\"\"\"\n\n def __init__(self, sim_parameters, n_iterations):\n super(SalamandraNetwork, self).__init__()\n # States\n self.state = RobotState.salamandra_robotica_2(n_iterations)\n # Parameters\n self.robot_parameters = RobotParameters(sim_parameters)\n # Set initial state\n # Replace your oscillator phases here\n self.state.set_phases(iteration=0,\n value=1e-4*np.random.ranf(self.robot_parameters.n_oscillators))\n \n # Set solver\n self.solver = ode(f=network_ode)\n self.solver.set_integrator('dopri5')\n self.solver.set_initial_value(y=self.state[0], t=0.0)\n\n def step(self, iteration, time, timestep):\n \"\"\"Step\"\"\"\n self.solver.set_f_params(self.robot_parameters)\n self.state[iteration+1, :] = self.solver.integrate(time+timestep)\n\n def get_motor_position_output(self, iteration=None):\n \"\"\"Get motor position\"\"\"\n return motor_output(\n self.state.phases(iteration=iteration),\n self.state.amplitudes(iteration=iteration),\n iteration=iteration,\n )\n\n","sub_path":"Python/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"25958645","text":"# -*- coding: utf-8 -*-\nimport json\nimport sys\nimport csv\nimport nltk\n\nsys.setrecursionlimit(10**9)\nconsonants= []\nconsonantsUni= []\nvowels= []\nvowelsUni= []\nvowelModifiersUni= []\nspecialConsonants= []\nspecialConsonantsUni= []\nspecialCharUni= []\nspecialChar= []\n\nvowelsUni.extend(['ඌ','ආ','ඊ','ඊ','ඊ','ඒ','ඌ','ඖ'])\n\nvowels.extend(['oo','aa','ii','ie','ee','ea','uu','au'])\n\nvowelModifiersUni.extend(['ූ','ා','ී','ී','ී','ේ','ූ','ෞ'])\n\nvowelsUni.extend(['අ','යි','එ','උ','ඔ','ඓ'])\n\nvowels.extend(['a','i','e','u','o','ai'])\n\nvowelModifiersUni.extend(['','ි','ෙ','ු','ො','ෛ'])\n\nnVowels=len(vowels)\n\n# special characher Repaya\nspecialConsonantsUni.append('ර්'+'\\u200D')\nspecialConsonantsUni.append('ර්'+'\\u200D')\n\nspecialConsonants.append(\"/R\")\nspecialConsonants.append(\"\\r\")\n\nconsonantsUni.extend(['ච','ත','ශ','ඥ','ඳ','ඹ','ඟ'])\n\nconsonants.extend(['ch','th','sh','gn', 'nd','mb', 'ng'])\n\nconsonantsUni.extend(['ක','ක','ග','ජ','ට','ඩ','න','ප','බ','ම','ය','ර','ල','ව','ව','ස','හ'])\n\nconsonants.extend(['k','c','g','j','t','d','n','p','b','m','y','r','l','v','w','s','h'])\n\nconsonantsUni.append('ර')\nconsonants.append('r')\n\nspecialCharUni.append('ෲ')\nspecialChar.append('ruu')\nspecialCharUni.append('ෘ')\nspecialChar.append('ru')\n\nprepo_singhlish_word = []\nprepo_alt_word = []\nprepo_sin_word = []\n\ndef loadEnglishWordList():\n with open(\"english_words_in_corpus.txt\",\"r\",encoding='utf-8', errors='ignore') as f_en:\n english_words = json.load(f_en)\n return english_words\n\ndef translate(text):\n # special consonents\n for i in range (0,len(specialConsonants)):\n text = text.replace(specialConsonants[i], specialConsonantsUni[i])\n # consonents + special\n for i in range (0,len(specialCharUni)):\n for j in range(0,len(consonants)):\n s = consonants[j] + specialChar[i]\n v = consonantsUni[j] + specialCharUni[i]\n r = s\n text = text.replace(r, v)\n # consonants + Rakaransha + vowel modifiers\n for j in range(0,len(consonants)):\n for i in range(0,len(vowels)):\n s = consonants[j] + \"r\" + vowels[i]\n v = consonantsUni[j] + \"්‍ර\" + vowelModifiersUni[i]\n r = s\n # r = new RegExp(s, \"g\")\n text = text.replace(r, v)\n\n s = consonants[j] + \"r\"\n v = consonantsUni[j] + \"්‍ර‍\"\n r = v\n text = text.replace(r, v)\n\n\n # constants with vowels modifiers\n for i in range(0,len(consonants)):\n for j in range(0,nVowels):\n s = consonants[i]+vowels[j]\n v = consonantsUni[i] + vowelModifiersUni[j]\n r = s\n text = text.replace(r, v)\n\n # Hal kirima\n for i in range(0, len(consonants)):\n r = consonants[i]\n text = text.replace(r, consonantsUni[i]+\"්\")\n\n\n # adding vowels\n for i in range(0,len(vowels)):\n r = vowels[i]\n text = text.replace(r, vowelsUni[i])\n\n return text\n\ndef loadPrepositions():\n prepositions_file = open('sinhala_preposition.csv', 'r', encoding=\"utf-8-sig\")\n with prepositions_file:\n dataReader = csv.DictReader(prepositions_file)\n for i, row in enumerate(dataReader):\n prepo_sin_word.append(row['sin'].lower())\n prepo_singhlish_word.append(row['singlish'].lower())\n prepo_alt_word.append(row['alt1'].lower())\n\n\n\ndef transliteratecsvFile(file):\n output = []\n loadPrepositions()\n print(prepo_singhlish_word)\n print(prepo_sin_word)\n print(prepo_alt_word)\n eng = loadEnglishWordList()\n print(eng)\n dataReader = csv.DictReader(file)\n transliterated_file = open('transliterated_l1.csv', 'w', encoding=\"utf-8\", newline='')\n with transliterated_file:\n myFields = ['singlish_content', 'transliterated_L1','man_written']\n dataWriter = csv.DictWriter(transliterated_file, fieldnames=myFields)\n dataWriter.writeheader()\n for row in dataReader:\n sent = row['content'].lower()\n words = nltk.wordpunct_tokenize(sent)\n for i in range(len(words)):\n if words[i] not in eng :\n if words[i] in prepo_singhlish_word:\n index = prepo_singhlish_word.index(words[i])\n words[i] = prepo_sin_word[index]\n elif words[i] in prepo_alt_word:\n index = prepo_alt_word.index(words[i])\n words[i] = prepo_sin_word[index]\n else:\n words[i] = translate(words[i])\n text = ' '.join(word for word in words)\n dataWriter.writerow({'singlish_content': sent ,'transliterated_L1': text, 'man_written': row['man_written']})\n\n\nif __name__ == '__main__':\n output = []\n eng = loadEnglishWordList()\n print(eng)\n with open('../data.csv', 'rt')as f:\n data = csv.DictReader(f)\n for row in data:\n sent = row['content'].lower()\n words = nltk.wordpunct_tokenize(sent)\n for i in range(len(words)):\n if words[i] not in eng :\n words[i] = translate(words[i])\n text = ' '.join(word for word in words)\n print(text)\n with open('adaderana.txt', 'a', encoding='utf-8', errors='ignore') as file1:\n file1.write(text)","sub_path":"preprocessing/Transliteration.py","file_name":"Transliteration.py","file_ext":"py","file_size_in_byte":5434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635750050","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# import Image\nimport os\nimport nibabel as nib\nimport tifffile\nimport pickle\nfrom PIL import ImageEnhance , Image , ImageFilter\nimport xlwt\n\n\n\ndef DiceCoefficientCalculator(msk1,msk2):\n intersection = msk1*msk2 # np.logical_and(msk1,msk2)\n DiceCoef = intersection.sum()*2/(msk1.sum()+msk2.sum() + np.finfo(float).eps)\n return DiceCoef\n\n\nSliceNumbers = range(107,140)\n\nnulcieFull = [\n'1-THALAMUS_deformed.nii.gz' ,\n'2-AV_deformed.nii.gz' ,\n'4-VA_deformed.nii.gz' ,\n'5-VLa_deformed.nii.gz' ,\n'6-VLP_deformed.nii.gz' ,\n'7-VPL_deformed.nii.gz',\n'8-Pul_deformed.nii.gz',\n'9-LGN_deformed.nii.gz' ,\n'10-MGN_deformed.nii.gz',\n'11-CM_deformed.nii.gz'\t,\n'12-MD-Pf_deformed.nii.gz' ,\n'13-Hb_deformed.nii.gz'] # , '4567-VL_deformed.nii.gz'\n\n\nDir_oldPriors = '/media/artin/D0E2340CE233F576/Thalamus_Segmentation/Data/Manual_Delineation_Sanitized_Full'\nDir_newPriors = '/media/artin/D0E2340CE233F576/Thalamus_Segmentation/Data/NewPriors/test/7T_MS'\nDir_save = '/media/artin/D0E2340CE233F576/Thalamus_Segmentation/Data/NewPriors/test'\n\nresults = xlwt.Workbook(encoding=\"utf-8\")\nsheet = results.add_sheet('Majority Voting')\n\nlst_Old = os.listdir(Dir_oldPriors)\nlst_newTemp = os.listdir(Dir_newPriors)\n\nlst_new = []\nfor i in range(len(lst_newTemp)):\n if lst_newTemp[i][:5] == 'vimp2':\n lst_new.append(lst_newTemp[i])\n\nsheet.write(0, len(lst_new)+1, 'Average')\n\nfor n in range(len(nulcieFull)):\n\n nuclei = nulcieFull[n]\n sheet.write(n+1, 0, nuclei.split('_deformed.nii.gz')[0])\n\n print('nuclei: ' + nuclei)\n for l in range(len(lst_Old)):\n\n A = nib.load(Dir_oldPriors + '/' + lst_Old[l] + '/Manual_Delineation_Sanitized' + '/' + nuclei)\n A = A.get_data()\n A = A[50:198,130:278,SliceNumbers]\n\n if l == 0:\n Full_Priors_old = A[...,np.newaxis]\n else:\n Full_Priors_old = np.append(Full_Priors_old,A[...,np.newaxis],axis=3)\n\n MW_Seg = np.sum(Full_Priors_old,axis=3)\n MW_Seg = MW_Seg > 10\n\n Dice = np.zeros(len(lst_new))\n for l in range(len(lst_new)):\n A = nib.load(Dir_newPriors + '/' + lst_new[l] + '/Manual_Delineation_Sanitized' + '/' + nuclei)\n A = A.get_data()\n A = A[50:198,130:278,SliceNumbers]\n\n Dice[l] = DiceCoefficientCalculator(A,MW_Seg)\n if n == 0:\n sheet.write(0, l+1, lst_new[l].split('vimp2_')[1])\n sheet.write(n+1, l+1, Dice[l])\n\n\n Dice = np.append(Dice , np.mean(Dice))\n\n sheet.write(n+1, len(lst_new)+1, Dice[len(lst_new)])\n np.savetxt(Dir_save + '/DiceCoefficient_' + nuclei.split('_deformed.nii.gz')[0] + '.txt', 100*Dice, fmt='%2.1f')\n\n results.save(Dir_save + '/DiceCoefficient.xls')\n","sub_path":"notMainCodes/Pure_MajorityVoting_Method.py","file_name":"Pure_MajorityVoting_Method.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"243646866","text":"import urllib.request\nimport urllib.parse\nimport json\nfrom django.test import TestCase, Client\nfrom data import models\nfrom django.forms.models import model_to_dict\n\nclass MenuTest(TestCase):\n\tfixtures = ['tests.json']\n\n\tdef setUp(self):\n\t\tself.user = models.User.objects.get(pk=1)\n\t\tself.vendor = models.Vendor.objects.get(pk=1)\n\t\tself.entree = models.Entree.objects.get(pk=1)\n\t\tself.entree2 = models.Entree.objects.get(pk=2)\n\t\t# Menu for vendor w/ pk=1\n\t\tself.menu = models.Menu.objects.get(pk=1)\n\n\tdef test_create_menu(self):\n\t\tvendor_id = self.vendor.pk\n\t\tentree_list = [1, 2]\n\n\t\tresp_create = self.client.post('/api/v1/menu/create', {'vendor_id': vendor_id, 'entree_id_list': entree_list})\n\t\tresp_json = (resp_create.content).decode('utf-8')\n\t\tresp = json.loads(resp_json)\n\n\t\t# Check request has succeeded\n\t\tself.assertEqual(resp_create.status_code, 200)\n\t\tself.assertEqual(resp[\"ok\"], True)\n\n\t\tmenu_pk = resp[\"resp\"][\"menu_id\"]\n\t\tmenu = models.Menu.objects.get(pk=menu_pk)\n\t\tentrees = menu.entrees.all()\n\t\tentree_ids = []\n\t\tfor e in entrees:\n\t\t\tentree_ids.append(e.pk)\n\t\tself.assertEqual(menu.vendor.pk, vendor_id)\n\t\tself.assertEqual(entree_ids, entree_list)\n\n\tdef test_add_entrees(self):\n\t\tentrees_to_add = \"[4, 5]\"\n\t\tentree4 = models.Entree.objects.get(pk=4)\n\t\tentree5 = models.Entree.objects.get(pk=5)\n\n\t\tmenu_id = self.menu.pk\n\n\t\tresp_create = self.client.post('/api/v1/menu/' + str(menu_id) + '/add_entrees', {'entree_id_list': entrees_to_add})\n\t\tresp_json = (resp_create.content).decode('utf-8')\n\t\tresp = json.loads(resp_json)\n\n\t\t# Check request has succeeded\n\t\tself.assertEqual(resp_create.status_code, 200)\n\t\tself.assertEqual(resp[\"ok\"], True)\n\n\t\t# Check entrees added to menu\n\t\tmenu_entrees = models.Entree.objects.filter(menu__id=1)\n\t\tself.assertTrue(entree4 in menu_entrees)\n\t\tself.assertTrue(entree5 in menu_entrees)\n\n\tdef test_remove_entrees(self):\n\t\tentrees_to_remove = [1, 2]\n\t\tmenu_id = self.menu.pk\n\n\t\tresp_remove = self.client.post('/api/v1/menu/' + str(menu_id) + '/remove_entrees', \\\n\t\t\t{'entree_id_list': entrees_to_remove})\n\t\tresp_json = (resp_remove.content).decode('utf-8')\n\t\tresp = json.loads(resp_json)\n\n\t\t# Check request has succeeded\n\t\tself.assertEqual(resp_remove.status_code, 200)\n\t\tself.assertEqual(resp[\"ok\"], True)\n\n\t\tmenu_entrees = models.Entree.objects.filter(menu__id=1)\n\t\tself.assertFalse(self.entree in menu_entrees)\n\t\tself.assertFalse(self.entree2 in menu_entrees)\n\n\tdef test_lookup_menu(self):\n\t\tmenu_id = self.menu.pk\n\t\tentrees = []\n\n\t\tfor e in self.menu.entrees.all():\n\t\t\te_dict = model_to_dict(e)\n\t\t\te_dict[\"price\"] = str(e_dict[\"price\"])\n\t\t\tentrees.append(e_dict)\n\n\t\tvendor_dict = model_to_dict(self.menu.vendor)\n\n\t\tresp_lookup = self.client.get('/api/v1/menu/' + str(menu_id) + '/lookup_menu')\n\t\tresp_json = (resp_lookup.content).decode('utf-8')\n\t\tresp = json.loads(resp_json)\n\n\t\t# Check request has succeeded\n\t\tself.assertEqual(resp_lookup.status_code, 200)\n\t\tself.assertEqual(resp[\"ok\"], True)\n\n\t\te_list = resp[\"resp\"][\"entrees\"]\n\t\tvendor = resp[\"resp\"][\"vendor\"]\n\n\t\tself.assertEqual(e_list, entrees)\n\t\tself.assertEqual(vendor_dict, vendor)\n\n\tdef test_lookup_menu_vendor(self):\n\t\tvendor_id = self.vendor.pk\n\n\t\tresp_lookup_vend = self.client.get('/api/v1/menu/' + str(vendor_id) + '/lookup_menu_vendor')\n\t\tresp_json = (resp_lookup_vend.content).decode('utf-8')\n\t\tresp = json.loads(resp_json)\n\n\t\t# Check request has succeeded\n\t\tself.assertEqual(resp_lookup_vend.status_code, 200)\n\t\tself.assertEqual(resp[\"ok\"], True)\n\n\t\tself.assertEqual(resp[\"resp\"][\"menu_id\"], self.menu.pk)\n\n\n\n\n\n","sub_path":"trof/data/test_menu.py","file_name":"test_menu.py","file_ext":"py","file_size_in_byte":3532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"387662178","text":"from bamboo import treefunctions as op\n\nclass highlevelLambdas:\n def __init__(self,HHself):\n # All the attributes of the BaseHH are contained in HHself object\n # All the lambdas will be saved in the highlevelLambdas object to avoid confusions of all the attributes of HH base object\n\n # 4-Momentum association #\n self.ll_p4 = lambda l1,l2 : l1.p4+l2.p4\n self.lljj_p4 = lambda l1,l2,j1,j2 : l1.p4+l2.p4+j1.p4+j2.p4\n self.lep1j_p4 = lambda lep,j1 : lep.p4+j1.p4\n self.lep2j_p4 = lambda lep,j1,j2 : lep.p4+j1.p4+j2.p4\n self.lep3j_p4 = lambda lep,j1,j2,j3 : lep.p4+j1.p4+j2.p4+j3.p4\n self.lep4j_p4 = lambda lep,j1,j2,j3,j4 : lep.p4+j1.p4+j2.p4+j3.p4+j4.p4\n \n\n # bReg corr 4 momenta of ak4-bTagged jet #\n self.bJetCorrP4 = lambda j : op._to.Construct(\"ROOT::Math::LorentzVector >\", (j.pt*j.bRegCorr, j.eta, j.phi, j.mass)).result\n \n # Dilep-Met variables #\n self.DilepMET_deltaPhi = lambda l1,l2,met : self.ll_p4(l1,l2).Phi()-met.phi\n self.DilepMET_Pt = lambda l1,l2,met : op.sqrt(op.pow(met.pt*op.cos(met.phi)+self.ll_p4(l1,l2).Px(),2)+op.pow(met.pt*op.sin(met.phi)+self.ll_p4(l1,l2).Py(),2))\n # SingleLep-Met variables\n self.SinglepMet_Pt = lambda lep,met : op.sqrt(op.pow(met.pt*op.cos(met.phi)+lep.p4.Px(),2)+op.pow(met.pt*op.sin(met.phi)+lep.p4.Py(),2))\n self.SinglepMet_dPhi = lambda lep, met : lep.p4.Phi()-met.phi\n \n # Transverse mass #\n self.MT_ll = lambda l1,l2,met : op.sqrt(2*self.ll_p4(l1,l2).Pt()*met.pt*(1-op.cos(self.ll_p4(l1,l2).Phi()-met.phi)))\n self.MT_lljj = lambda l1,l2,j1,j2,met : op.sqrt(2*self.lljj_p4(l1,l2,j1,j2).Pt()*met.pt*(1-op.cos(self.lljj_p4(l1,l2,j1,j2).Phi()-met.phi)))\n self.MT = lambda lep,met : op.sqrt(2*lep.p4.Pt()*met.pt*(1-op.cos(lep.p4.Phi()-met.phi)))\n self.MT_W1W2_ljj = lambda lep,j1,j2,met : op.sqrt(2*self.lep2j_p4(lep,j1,j2).Pt()*met.pt*(1-op.cos(self.lep2j_p4(lep,j1,j2).Phi()-met.phi)))\n self.MT_W1W2_lj = lambda lep,j1,met : op.sqrt(2*self.lep1j_p4(lep,j1).Pt()*met.pt*(1-op.cos(self.lep1j_p4(lep,j1).Phi()-met.phi)))\n # TODO : clean different versions (eg MT)\n \n # dilep + dijet #\n self.M_lljj = lambda l1,l2,j1,j2 : op.invariant_mass(self.lljj_p4(l1,l2,j1,j2))\n self.MinDR_lj = lambda l1,l2,j1,j2 : op.min(op.min(op.deltaR(l1.p4,j1.p4),op.deltaR(l1.p4,j2.p4)),\n op.min(op.deltaR(l2.p4,j1.p4),op.deltaR(l2.p4,j2.p4)))\n \n self.MinDR_lep3j = lambda lep,j1,j2,j3 : op.min(op.min(op.deltaR(lep.p4,j1.p4),op.deltaR(lep.p4,j2.p4)),op.deltaR(lep.p4,j3.p4))\n \n # Higgs related variables #\n self.HT2 = lambda l1,l2,j1,j2,met : op.sqrt(op.pow(met.pt*op.cos(met.phi)+l1.p4.Px()+l2.p4.Px(),2)+op.pow(met.pt*op.sin(met.phi)+l1.p4.Py()+l2.p4.Py(),2)) + op.abs((j1.p4+j2.p4).Pt())\n self.HT2R = lambda l1,l2,j1,j2,met : self.HT2(met,l1,l2,j1,j2)/(met.pt+l1.p4.Pt()+l2.p4.Pt()+j1.p4.Pt()+j2.p4.Pt())\n self.HT2_l3jmet = lambda l,j1,j2,j3,met : op.sqrt(op.pow(met.pt*op.cos(met.phi)+l.p4.Px(),2)+op.pow(met.pt*op.sin(met.phi)+l.p4.Py(),2)) + op.abs((j1.p4+j2.p4+j3.p4).Pt())\n self.HT2R_l3jmet = lambda l,j1,j2,j3,met : self.HT2_l3jmet(met,l,j1,j2,j3)/(met.pt+l.p4.Pt()+j1.p4.Pt()+j2.p4.Pt()+j3.p4.Pt())\n self.HT2_l4jmet = lambda l,j1,j2,j3,j4,met : op.sqrt(op.pow(met.pt*op.cos(met.phi)+l.p4.Px(),2)+op.pow(met.pt*op.sin(met.phi)+l.p4.Py(),2)) + op.abs((j1.p4+j2.p4+j3.p4+j4.p4).Pt())\n self.HT2R_l4jmet = lambda l,j1,j2,j3,j4,met : self.HT2_l4jmet(met,l,j1,j2,j3,j4)/(met.pt+l.p4.Pt()+j1.p4.Pt()+j2.p4.Pt()+j3.p4.Pt()+j4.p4.Pt())\n \n #min j1j2DR\n self.MinDiJetDRLoose = lambda j1,j2,j3: op.min(op.min(op.deltaR(j1.p4,j2.p4), op.deltaR(j2.p4,j3.p4)), op.deltaR(j1.p4,j3.p4))\n \n # ------------------------------------ lambdas for BDT variables ------------------------------------ #\n self.mindr_lep1_jet = lambda lep, jets : op.deltaR(lep.p4, op.sort(jets, lambda j : op.deltaR(lep.p4, j.p4))[0].p4)\n self.HT = lambda jets : op.rng_sum(jets, lambda j : j.p4.Pt())\n \n # mT2\n self.ET = lambda lep : op.sqrt(op.pow(lep.p4.M(),2) + op.pow(lep.p4.Pt(),2))\n self.mT2 = lambda jet, lep, met : (op.pow(jet.p4.M(),2) + op.pow(lep.p4.M(),2) + op.pow(met.p4.M(),2) + \n 2*(ET(lep)*ET(jet) - (lep.p4.Px()*jet.p4.Px() + lep.p4.Py()*jet.p4.Py())) +\n 2*(ET(lep)*ET(met) - (lep.p4.Px()*met.p4.Px() + lep.p4.Py()*met.p4.Py())) +\n 2*(ET(jet)*ET(met) - (jet.p4.Px()*met.p4.Px() + jet.p4.Py()*met.p4.Py())))\n \n # pZ component of met\n # https://github.com/HEP-KBFI/hh-bbww/blob/f4ab60f81a920268a3f2187b97a58ec449b26883/src/comp_metP4_B2G_18_008.cc\n # some necessary constants (visP4 = lepP4 + Wjj_simple)\n # - - - - - used to compute neuP4 - - - - - #\n _a = lambda visP4, met, mH : (op.pow(mH, 2) - op.pow(visP4.M(), 2) + 2.*visP4.Px()*met.p4.Px() + 2.*visP4.Py()*met.p4.Py())\n _A = lambda visP4 : 4.0 * op.pow(visP4.E(), 2) - op.pow(visP4.Pz(), 2)\n _B = lambda visP4, met, mH : -4.0 * _a(visP4, met, mH) * visP4.Pz()\n _C = lambda visP4, met, mH : 4.0*op.pow(visP4.E(), 2)*(op.pow(met.p4.Px(), 2) + op.pow(met.p4.Py(), 2)) - op.pow(_a(visP4, met, mH), 2)\n _D = lambda visP4, met, mH : (op.pow(_B(visP4, met, mH), 2) - 4.0*_A(visP4)*_C(visP4, met, mH))\n _pos = lambda visP4, met, mH : (-_B(visP4, met, mH) + op.sqrt(_D(visP4, met, mH)))/(2.*_A(visP4))\n _neg = lambda visP4, met, mH : (-_B(visP4, met, mH) - op.sqrt(_D(visP4, met, mH)))/(2.*_A(visP4))\n neuPz = lambda visP4, met, mH : (op.switch(_D(visP4, met, mH) < 0., - _B(visP4, met, mH)/(2.*_A(visP4)), \n op.switch(op.abs(_pos(visP4, met, mH)) < op.abs(_neg(visP4, met, mH)), \n _pos(visP4, met, mH), _neg(visP4, met, mH))))\n # - - - - - - - - - - - - - - - - - - - - - #\n neuP4 = lambda visP4, met, mH : op._to.Construct(\"ROOT::Math::LorentzVector >\", \n (met.p4.Px(), \n met.p4.Py(), \n neuPz(visP4, met, mH), \n op.sqrt(op.pow(met.p4.Px(),2)+op.pow(met.p4.Py(),2)+op.pow(neuPz(visP4, met, mH),2)))).result\n \n # P4 of W1 (l,neu)\n self.Wlep_simple = lambda j1P4,j2P4,lepP4,met,mH : lepP4 + neuP4(j1P4+j2P4+lepP4, met, mH)\n # P4 of W2 (j,j)\n self.Wjj_simple = lambda j1P4,j2P4 : j1P4 + j2P4 \n # P4 of HWW (W1 + W2)\n self.HWW_simple = lambda j1P4,j2P4,lepP4,met,mH : Wjj_simple(j1P4,j2P4) + Wlep_simple(lepP4,neuP4(j1P4+j2P4+lepP4, met, mH))\n # dR_HWW\n self.dR_Hww = lambda j1P4,j2P4,lepP4,met,mH : op.deltaR(Wjj_simple(j1P4,j2P4), Wlep_simple(j1P4,j2P4,lepP4,met,mH))\n # P4 of lep + met\n self.Wlep_met_simple = lambda lepP4, metP4 : lepP4 + metP4\n # SimpleP4 of HWW (W1 + W2)\n self.HWW_met_simple = lambda j1P4,j2P4,lepP4,metP4 : Wjj_simple(j1P4, j2P4) + Wlep_met_simple(lepP4,metP4)\n # Total P4\n self.HHP4_simple_met = lambda HbbRegP4, j1P4, j2P4, lepP4,metP4 : HbbRegP4 + Wjj_simple(j1P4, j2P4) + Wlep_met_simple(lepP4,metP4)\n \n # CosThetaS calculation\n #comp_cosThetaS = lambda ob1p4, ob2p4 : op.abs(ob1p4.Boost(-(ob1p4+ob2p4).BoostVector()).CosTheta())\n motherPx = lambda ob1p4,ob2p4 : (ob1p4.Px()+ob2p4.Px())\n motherPy = lambda ob1p4,ob2p4 : (ob1p4.Py()+ob2p4.Py())\n motherPz = lambda ob1p4,ob2p4 : (ob1p4.Pz()+ob2p4.Pz())\n motherE = lambda ob1p4,ob2p4 : (ob1p4.E()+ob2p4.E())\n BoostP4 = lambda ob1p4,ob2p4 : op._to.Construct(\"ROOT::Math::LorentzVector >\", (motherPx(ob1p4,ob2p4), motherPy(ob1p4,ob2p4), motherPz(ob1p4,ob2p4), motherE(ob1p4,ob2p4))).result\n self.comp_cosThetaS = lambda ob1p4,ob2p4 : op.abs(op.cos(op.deltaR(BoostP4(ob1p4,ob2p4), ob1p4)))\n \n # MET_LD\n # Equation 3 (page 33) of AN-2019/111 v13\n # Similar to MET, but more robust against pileup\n jetSumPx = lambda jets : op.rng_sum(jets, lambda j : j.p4.Px())\n jetSumPy = lambda jets : op.rng_sum(jets, lambda j : j.p4.Py())\n lepSumPx = lambda leps : op.rng_sum(leps, lambda l : l.p4.Px())\n lepSumPy = lambda leps : op.rng_sum(leps, lambda l : l.p4.Py())\n self.MET_LD = lambda met, jets, leps : 0.6*met.pt + 0.4*op.sqrt(op.pow(jetSumPx(jets)+lepSumPx(leps),2)+op.pow(jetSumPy(jets)+lepSumPy(leps),2)) \n \n \n \n# # Lepton conePt\n# # from Base\n# lambda_conept_electron = lambda lep : op.multiSwitch((op.AND(op.abs(lep.pdgId)!=11 , op.abs(lep.pdgId)!=13) , op.static_cast(\"Float_t\",lep.pt)),\n# # if (abs(lep.pdgId)!=11 and abs(lep.pdgId)!=13): return lep.pt : anything that is not muon or electron\n# (op.AND(op.abs(lep.pdgId)==11 , lep.mvaTTH > 0.80) , op.static_cast(\"Float_t\",lep.pt)),\n# # if electron, check above MVA \n# op.static_cast(\"Float_t\",0.9*lep.pt*(1.+lep.jetRelIso)))\n# # else: return 0.90 * lep.pt / jetPtRatio where jetPtRatio = 1./(Electron_jetRelIso + 1.)\n# # from Base\n# lambda_conept_muon = lambda lep : op.multiSwitch((op.AND(op.abs(lep.pdgId)!=11 , op.abs(lep.pdgId)!=13) , op.static_cast(\"Float_t\",lep.pt)),\n# # if (abs(lep.pdgId)!=11 and abs(lep.pdgId)!=13): return lep.pt : anything that is not muon or electron\n# (op.AND(op.abs(lep.pdgId)==13 , lep.mediumId ,lep.mvaTTH > 0.85) , op.static_cast(\"Float_t\",lep.pt)),\n# # if muon, check that passes medium and above MVA\n# op.static_cast(\"Float_t\",0.9*lep.pt*(1.+lep.jetRelIso)))\n# # else: return 0.90 * lep.pt / lep.jetPtRatiov2\n# \n# lepConePt = lambda lep: op.switch(op.abs(lep.pdgId) == 11, lambda_conept_electron(lep), lambda_conept_muon(lep))\n # TODO : remove above -> conept can be accessed as HHself.muon_conept(muon.idx) and same for electron\n \n \n","sub_path":"highlevelLambdas.py","file_name":"highlevelLambdas.py","file_ext":"py","file_size_in_byte":10917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"635352675","text":"# Given a string, return a new string made of every other char starting with the first, so \"Hello\" yields \"Hlo\".\n#\n# string_bits('Hello') → 'Hlo'\n# string_bits('Hi') → 'H'\n# string_bits('Heeololeo') → 'Hello'\n#\n# This exercise could also be done with a loops, but enumeration simplifies grabbing values\n# based on indices. This also makes use of the modulus operator, popular for \"FizzBuzzy\"\n# problems. -Luan\n\ndef string_bits(str):\n modString = \"\"\n for index, value in enumerate(str):\n if index % 2 == 0:\n modString += value\n return modString\n","sub_path":"python/WU2.string_bits.py","file_name":"WU2.string_bits.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"554311908","text":"\nimport time\nimport easygui\nimport os\nfrom os import path\n\ncounter = 0\n# path = \"H:\\\\Ordnerstruktur Barto\\\\Schaltplaene\"\npathsp = easygui.diropenbox()\n#Dateinamen\nschaltplanUV = f\"08_TU_175G010AA000_2019_03_06_07_52_Schaltplan_UV_1_2.pdf\"\nprotok_unt = f\"08_TU_175G010AA000_2019_03_26_07_30_Protokoll_Unterschrieben.pdf\"\nlegende = f\"Stromkreislegende_UV_1_2.pdf\"\nzw1 = \"175G0101alt.zw1\"\n\nprint(\"-----------Alle fehlenden Dateien/Ordner einfügen!-----------\")\n'''Ordnerbenennung wenn möglich mit Schlüsselwörtern wie „Lüftung\", „NSHV\", „Serverraum2\", „Halle\" und „Technikum\" benennen. /n Andernfalls werden die neuen Ordner nicht aufgelistet und das Programm muss abgeändert werden./n\nprint('Dateibenennung nur mit „Schaltplan_\", „Protokoll_Unterschrieben_\", „Stromkreislegende_\" und der Ordnername der die Datei beinhaltet (UV_1_2 bzw. UV-1_2, UV_Bezeichnung).'''\n\n\n#Schaltplan_UV_{beforedot}_{afterdot}.pdf\n\nsp = False\nprtk = False\nleg = False\nzw = False\n\n\nfor s, (root, dirs, files) in enumerate(os.walk(pathsp)):\n if \"UV\" in root or \"Lüftung\" in root or \"NSHV\" in root or \"Serverraum 2\" in root or \"Halle\" in root or \"Technikum\" in root:\n if \"Archiv\" in root:\n pass\n else:\n print(f\"Ordner {s -3} ({root})\")\n lastpart = os.path.basename(os.path.normpath(root))\n splitin2 = lastpart.split(\".\")\n beforedot = splitin2[0].replace(\"UV \", \"\")\n afterdot = splitin2[-1]\n # Archiv-Format\n seclast = os.path.split(os.path.split(root)[0])[1]\n splitintwo = seclast.split(\".\")\n befored = splitintwo[0].replace(\"UV \", \"\")\n afterd = splitintwo[-1]\n\n # for-loop mit file für eine Datei..\n for file in files:\n if \"Schaltplan\" in file and file.endswith(\".pdf\"):\n print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n sp = True\n elif \"Protokoll\" in file and file.endswith(\".pdf\"):\n print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n prtk = True\n elif \"legende\" in file and file.endswith(\".pdf\"):\n print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n leg = True\n elif file.endswith(\".zw1\"):\n print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n zw = True\n for t in files:\n if sp == False:\n print(\" \".join(f\"08_TU_175G010AA000_Datum_Uhrzeit_Schaltplan_UV_{beforedot}_{afterdot}.pdf\").replace(\" \",\"\"), \" fehlt!\")\n sp = True\n elif prtk == False:\n print(\" \".join(f\"08_TU_175G010AA000_Datum_Uhrzeit_Protokoll_Unterschrieben_UV_{beforedot}_{afterdot}.pdf\").replace(\" \",\"\"), \" fehlt!\")\n prtk = True\n elif leg == False:\n print(\" \".join(f\"Stromkreislegende_UV_{beforedot}_{afterdot}.pdf\").replace(\" \",\"\"), \" fehlt!\")\n leg = True\n elif zw == False:\n print(\" \".join(f\"175G0101_UV_{beforedot}_{afterdot}.pdf\").replace(\" \",\"\"), \" fehlt!\")\n # print(\" \".join((\"Andere Datei: \", file)))\n zw = True\n # Abzweigung keine Datei existiert! (Verbesserung einbauen)\n if sp == False and prtk == False and leg == False and zw == False:\n print(\" \".join(\n f\"Schaltplan_UV_{beforedot}_{afterdot}.pdf, \\n Protokoll_Unterschrieben_UV_{beforedot}_{afterdot}.pdf, \\n Stromkreislegende_UV_{beforedot}_{afterdot}.pdf, \\n \"\n f\"175G0101_UV_{beforedot}_{afterdot}.zw1\").replace(\" \", \"\"), \"fehlen!\")\n\n print(counter, \" Dateien sind vorhanden!\")\n counter = 0\n sp = False\n prtk = False\n leg = False\n zw = False\n\n # Aufgaben: Archiv-Ausgabe,Warnung bei ungenauen Dateinamen, Verhalten bei anderen Dateien anpassen\nprint(\"*************************************************************\")\nfor s, (root, dirs, files) in enumerate(os.walk(pathsp)):\n if \"UV\" in root or \"Lüftung\" in root or \"NSHV\" in root or \"Serverraum 2\" in root:\n\n # for-loop mit file für eine Datei..\n for file in files:\n if \"Schaltplan\" in file and file.endswith(\".pdf\"):\n # print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n sp = True\n elif \"Protokoll\" in file and file.endswith(\".pdf\"):\n # print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n prtk = True\n elif \"legende\" in file and file.endswith(\".pdf\"):\n # print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n leg = True\n elif file.endswith(\".zw1\"):\n # print(\" \".join((file, \"vorhanden!\")))\n counter += 1\n zw = True\n print(f\"Ordner {s -3} - {counter} Dateien sind vorhanden!\")\n\n counter = 0\n sp = False\n prtk = False\n leg = False\n zw = False\n\nt = input(\"Enter zum verlassen.\")\n\n#FIXME:\n#1. Archive raus\n#2. bei fehlenden Dateien nur für UV-Ordner „before and afterdot\" einsetzen\n#3. zusätzliche Ordner bei Suche aufnehmen\n#4. Casesensitive beseitigen\n#5. Nummern am Anfang entfernen","sub_path":"schaltplaene2try - aenderung1.py","file_name":"schaltplaene2try - aenderung1.py","file_ext":"py","file_size_in_byte":5491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"375268628","text":"# importing all the important libraries\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom pprint import pprint\r\nfrom sklearn.utils import shuffle \r\n\r\ndataset = pd.read_csv('tic-tac-toe.data',sep=',',names=['f1','f2','f3','f4','f5','f6','f7','f8','f9','class'])\r\n\r\n# creating function of entropy\r\ndef entropy(target_col):\r\n elements,counts = np.unique(target_col,return_counts = True) # get different elements and counts of the target_column\r\n entropy = np.sum([(-counts[i]/np.sum(counts))*np.log2(counts[i]/np.sum(counts)) for i in range(len(elements))]) # calculate the entropy\r\n return entropy # return the entropy\r\n\r\n# calculating information gain of the entire dataset\r\ndef InfoGain(data,split_attribute_name,target_name=\"class\"): \r\n total_entropy = entropy(data[target_name]) # calculate the total entropy of the target_column\r\n vals,counts= np.unique(data[split_attribute_name],return_counts=True) # get different elements and counts of the target_column\r\n Weighted_Entropy = np.sum([(counts[i]/np.sum(counts))*entropy(data.where(data[split_attribute_name]==vals[i]).dropna()[target_name]) for i in range(len(vals))]) #calculating the weighted entropy\r\n Information_Gain = total_entropy - Weighted_Entropy # calculate information gain\r\n return Information_Gain # return information gain\r\n\r\n# calculating the gain ratio\r\ndef split_information(data,split_attribute_name,target_name=\"class\"):\r\n vals,counts = np.unique(data[split_attribute_name],return_counts=True) # get different elements and counts of the target_column\r\n split_info = np.sum([(counts[i]/np.sum(counts))*(np.log2(counts[i]/np.sum(counts))) for i in range(len(vals))]) # calculate the split information \r\n split_info = -(split_info) \r\n IG = InfoGain(data,split_attribute_name,target_name='class') # calculating the information gain for the gain ratio\r\n if(split_info == 0):\r\n split_info = 0.000000000000000000000000000000000000000001\r\n Gain_ratio = IG / split_info # calculating the final gain ratio \r\n return Gain_ratio # returning the final gain ratio\r\n\r\n# creating ID3 decision tree\r\ndef ID3(data,originaldata,features,target_attribute_name=\"class\",parent_node_class = None,approach = \"IG\"):\r\n # First stopping Condition: If all target_values have the same value, return this value\r\n if len(np.unique(data[target_attribute_name])) <= 1:\r\n return np.unique(data[target_attribute_name])[0]\r\n \r\n # second stopping condition: If length of the data is zero then, return the mode target feature value\r\n elif len(data) == 0:\r\n return np.unique(originaldata[target_attribute_name])[np.argmax(np.unique(originaldata[target_attribute_name],return_counts=True)[1])]\r\n\r\n # if the feature is empty then return the parent node class\r\n elif len(features) == 0:\r\n return parent_node_class\r\n \r\n # if none of the above stopping condition are true then grow the tree.\r\n else:\r\n # parent node class is the default value for the node, which is the mode target feature value of the current node\r\n parent_node_class = np.unique(data[target_attribute_name])[np.argmax(np.unique(data[target_attribute_name],return_counts=True)[1])]\r\n\r\n #find and select the feature which finds the best split for the dataset\r\n if (approach == \"IG\"):\r\n item_values = [InfoGain(data,feature,target_attribute_name) for feature in features] #Return the information gain values for the features in the dataset\r\n elif (approach == \"GR\"):\r\n item_values = [split_information(data,feature,target_attribute_name) for feature in features] #Return the information gain values for the features in the dataset\r\n \r\n # find the index of the best split\r\n best_feature_index = np.argmax(item_values)\r\n # best feature at the best feature index for the split \r\n best_feature = features[best_feature_index]\r\n \r\n # create a tree structure in the dictionary and assign the best feature as the root node.\r\n tree = {best_feature:{}}\r\n \r\n # remove the feature which was selected, means remove best features\r\n features = [i for i in features if i != best_feature]\r\n \r\n # now grow the node under the best feature\r\n for value in np.unique(data[best_feature]):\r\n value = value\r\n \r\n # split the dataset where we found the best value of the information gain to create a sub_data\r\n sub_data = data.where(data[best_feature] == value).dropna()\r\n\r\n # now apply recursion on the ID3 function using sub_data\r\n subtree = ID3(sub_data,dataset,features,target_attribute_name,parent_node_class,approach=approach)\r\n \r\n # then on the best feature apply the subtree\r\n tree[best_feature][value] = subtree\r\n return(tree) \r\n\r\n# the function to predict the output of the test data using the tree that we have created\r\ndef predict(query,tree,default = 'positive'):\r\n # we will check for every feature if it exists in the query. \r\n # If we find the feature name which exists in the dictionary then go inside the dictionary otherwise return the default value \r\n for key in list(query.keys()):\r\n if key in list(tree.keys()):\r\n # In here if we will tackel the situation where we come across an unseen query. then we will return the default value\r\n try:\r\n result = tree[key][query[key]] \r\n except:\r\n return default\r\n # save the result of the key which fits the value for the key\r\n result = tree[key][query[key]]\r\n # we implemented recursion in here because we have to go through the tree until we find the matching key and value in the our tree dictionary\r\n if isinstance(result,dict):\r\n return predict(query,result)\r\n else:\r\n # if we found the result then return the result.\r\n return result\r\n\r\ndef test(data,tree):\r\n # create queries simply by removing feature value column from the test dataset\r\n queries = data.iloc[:,:-1].to_dict(orient = \"records\")\r\n \r\n # creating an empty dataset where the prediction made by the tree will be stored\r\n # we will use this to calculate the accuracy of the dataset\r\n predicted = pd.DataFrame(columns=[\"predicted\"]) \r\n \r\n #Calculate the prediction accuracy\r\n for i in range(len(data)):\r\n predicted.loc[i,\"predicted\"] = predict(queries[i],tree,1.0) \r\n prediction_accuracy = (np.sum(predicted[\"predicted\"] == data[\"class\"])/len(data))*100\r\n # returning the accuracy and the predicted dataset\r\n return prediction_accuracy,predicted\r\n\r\n# Confusion matrix function\r\ndef confusionmatrix(actual, predicted, normalize = False):\r\n unique = sorted(set(actual)) # find the unique values from the actual dataset\r\n matrix = [[0 for _ in unique] for _ in unique] # create a matrix\r\n imap = {key: i for i, key in enumerate(unique)}\r\n # Now let's generate the confusion matrix\r\n for p, a in zip(predicted, actual): \r\n matrix[imap[p]][imap[a]] += 1 # creating confusion matrix for each indexes\r\n # perform matrix normalization\r\n if normalize:\r\n sigma = sum([sum(matrix[imap[i]]) for i in unique])\r\n matrix = [row for row in map(lambda i: list(map(lambda j: j / sigma, i)), matrix)]\r\n # return the final confusion matrix\r\n return matrix\r\n\r\n# create a function for performing 10 folds cross validation 10 times\r\ndef cross_validation_score(dataset, num_of_folds = 10,IGorGR = 'IG'):\r\n confusion_expected = [] # an empty list for the actual values for the confusion matrix\r\n confusion_predicted = [] # an empty list for the predicted values for the confusion matrix\r\n num_of_rows = dataset.shape[0] # number of rows of the dataset\r\n fold_size = num_of_rows / num_of_folds # calculating the fold size of the dataset\r\n fold_size = int(fold_size) # converting it to int\r\n accuracy = [] # empty array for the accuacy\r\n tree_number_for_print = 0\r\n for z in range(10): # 10 times\r\n m = 0 # m and n are used for performing 10 fold cross validation\r\n n = 0 \r\n dataset_copy = dataset\r\n dataset_copy = shuffle(dataset_copy).reset_index(drop=True) # shuffle the dataset\r\n for i in range(10): # 10 fold cross validation\r\n tree_number_for_print = tree_number_for_print + 1\r\n #print('-----------------------------------------------------------------------Tree Number',tree_number_for_print,'------------------------------------------------------------')\r\n m = m + fold_size # back limit for the cross validation split\r\n training_data = dataset_copy.drop(dataset_copy.index[n:m]).reset_index(drop=True) # training data\r\n testing_data = dataset_copy.iloc[n:m].reset_index(drop=True) # testing/ validation data\r\n confusion_expected.append(testing_data) # for the confusion matrix\r\n n = n + fold_size # front limit for the cross validation split\r\n tree = ID3(training_data,training_data,training_data.columns[:-1],approach=IGorGR) # find the tree using ID3 function\r\n #pprint(tree) # print the final tree\r\n prediction_accuracy,predicted = test(testing_data,tree) #getting the prediction accuracy\r\n #print(\"The accuracy of the tree number: \",tree_number_for_print,\" is: \",prediction_accuracy)\r\n confusion_predicted.append(predicted) # predicted dataset for the confusion matrix\r\n accuracy.append(prediction_accuracy) # store the accuracy value\r\n mean = np.mean(accuracy) # calculate the mean of all the accuracy\r\n variance = np.var(accuracy) # calculate the variance of all the accuracy\r\n confusion_mat_index = accuracy.index(max(accuracy)) # get the index for the confusion matrix\r\n con_fun_actual = list(confusion_expected[confusion_mat_index]['class']) # actual list for the confusion matrix\r\n con_fun_predicted = list(confusion_predicted[confusion_mat_index]['predicted']) # predicted list for the confusion matrix\r\n confusion_matrix = confusionmatrix(con_fun_actual,con_fun_predicted) # creating the confusion matrix based on the confusion matrix function\r\n confusion_matrix = np.matrix(confusion_matrix) # convert the confusion matrix to a matrix \r\n return mean,variance,confusion_matrix # return the value of the mean, varaince and confusion matrix\r\n\r\n# printing the final mean, variance and confusion matrix.\r\nprint(\"The decision tree of tic-tac-toe dataset using Information Gain Approach\") \r\nfinal_mean_IG,final_variance_IG,final_confusion_matrix_IG = cross_validation_score(dataset,10,'IG') \r\nprint(\"Final Mean of Information Gain Tree: \",final_mean_IG)\r\nprint(\"Final variance of Information Gain Tree: \", final_variance_IG)\r\nprint(\"Confusion matrix of Information Gain Tree: \\n\",final_confusion_matrix_IG)\r\n\r\nprint(\"The decision tree of tic-tac-toe dataset using Gain Ratio Approach\") \r\nfinal_mean_GR,final_variance_GR,final_confusion_matrix_GR = cross_validation_score(dataset,10,'GR')\r\nprint(\"Final Mean of Gain Ratio Tree: \",final_mean_GR)\r\nprint(\"Final variance of Gain Ratio Tree: \", final_variance_GR)\r\nprint(\"Confusion matrix of Gain Ratio Tree: \\n\",final_confusion_matrix_GR)\r\n\r\n\r\n############################################################# WINE #########################################################\r\n# take the wine dataset and pre-process it.\r\ndataset = pd.read_csv('wine.data',sep=',',names=['class','alcohol','malic acid','ash','Alcalinity of ash','magnesium','total phenols','flavanoids','Nonflavanoid phenols','proanthocyanins','color intensity','hue','OD280/OD315 of diluted wines','proline'])\r\na = dataset['class']\r\ndataset = dataset.drop([\"class\"],axis=1)\r\ndataset = ((dataset-dataset.min())/(dataset.max()-dataset.min()) * (1 + 1)) - 1\r\ndataset['class'] = a # dataset with class at the end\r\n# feature names of the list.\r\nfeature_names = ['alcohol','malic acid','ash','Alcalinity of ash','magnesium','total phenols','flavanoids','Nonflavanoid phenols','proanthocyanins','color intensity','hue','OD280/OD315 of diluted wines','proline']\r\n\r\n# entropy calculation function of the wine dataset\r\ndef entropy_wine(target_col):\r\n elements, counts = np.unique(target_col, return_counts=True)\r\n # entropy calculation of the target_column dataset\r\n entropy = np.sum(\r\n [\r\n (-counts[i] / np.sum(counts)) * np.log2(counts[i] / np.sum(counts))\r\n for i in range(len(elements))\r\n ]\r\n )\r\n # return the entropy\r\n return entropy\r\n\r\n# calculating information gain\r\ndef infogain_wine(dataset,leftsplit,rightsplit,total_entropy,split_values):\r\n total_elements = dataset.shape[0] # total elements of the dataset\r\n entropy_leftsplit = entropy_wine(leftsplit) # entropy of the leftsplit\r\n entropy_rightsplit = entropy_wine(rightsplit) # entropy of the rightsplit\r\n weighted_entropy = (((leftsplit.shape[0]/total_elements)*entropy_leftsplit) + ((rightsplit.shape[0]/total_elements)*entropy_rightsplit)) # calculating the weighted entropy\r\n final_infogain = total_entropy - weighted_entropy # final information gain\r\n return final_infogain # returning final information gain\r\n\r\n# calculating the gain ratio\r\ndef split_information_wine(dataset,leftsplit,rightsplit,total_entropy,split_values):\r\n total_elements = dataset.shape[0] # length of the dataset\r\n leftsplit_size = leftsplit.shape[0] # length of the leftsplit of the dataset\r\n rightsplit_size = rightsplit.shape[0] # length of the rightsplit of the dataset\r\n if (rightsplit.shape[0] == 0):\r\n rightsplit_size = 0.000000000000000000000000000000000000000001\r\n split_information = (((leftsplit_size/total_elements)*(np.log2(leftsplit_size/total_elements)))+((rightsplit_size/total_elements)*(np.log2(rightsplit_size/total_elements)))) # calculating the split information \r\n split_information = -split_information\r\n IG = infogain_wine(dataset,leftsplit,rightsplit,total_entropy,split_values) # calculating the information gain\r\n if (split_information == 0):\r\n split_information = 0.000000000000000000000000000000000000000001\r\n Gain_Ratio = IG / split_information # calculating the gain ratio \r\n return Gain_Ratio# returning the gain ratio\r\n\r\n# calculating the best split index\r\ndef best_split_index(dataset,feature_to_split,feature_names,target_attribute_name = \"class\",IGGR = 'IG'):\r\n # finding the total entropy \r\n total_entropy = entropy_wine(dataset[target_attribute_name])\r\n\r\n # sorting the dataset for a perticular feature\r\n sorted_dataset = dataset.sort_values([feature_to_split],axis = 0,ascending = True)\r\n\r\n # find the split_values\r\n split_feature_val = sorted_dataset[feature_to_split].values\r\n\r\n # finding information gain\r\n best_info_gain = []\r\n for i in range(len(split_feature_val)):\r\n # finding differet splits\r\n leftsplit = sorted_dataset.loc[sorted_dataset[feature_to_split] <= split_feature_val[i]]\r\n rightsplit = sorted_dataset.loc[sorted_dataset[feature_to_split] > split_feature_val[i]]\r\n if (IGGR == 'IG'): # for information gain \r\n gain_information = infogain_wine(sorted_dataset,leftsplit['class'],rightsplit['class'],total_entropy,split_feature_val)\r\n elif (IGGR == 'GR'): # for gain ratio\r\n gain_information = split_information_wine(sorted_dataset,leftsplit['class'],rightsplit['class'],total_entropy,split_feature_val)\r\n # storing the best information gain\r\n best_info_gain.append(gain_information)\r\n # storing the index of the best split value\r\n index_of_best_split_val = best_info_gain.index(max(best_info_gain))\r\n # finding the best information gain \r\n max_info_gain = max(best_info_gain)\r\n return max_info_gain\r\n\r\n# creating the left and right split of the input dataset based on the Information Gain\r\ndef left_right_split(dataset,best_feature,target_attribute_name = 'class',IGGR = 'IG'):\r\n # finding the total entropy \r\n total_entropy = entropy_wine(dataset[target_attribute_name])\r\n # sorting the dataset for a perticular feature\r\n sorted_dataset = dataset.sort_values([best_feature],axis = 0,ascending = True)\r\n # find the split_values\r\n split_feature_val = sorted_dataset[best_feature].values\r\n # finding information gain\r\n best_info_gain = []\r\n for i in range(len(split_feature_val)):\r\n # finding differet splits\r\n leftsplit = sorted_dataset.loc[sorted_dataset[best_feature] <= split_feature_val[i]]\r\n rightsplit = sorted_dataset.loc[sorted_dataset[best_feature] > split_feature_val[i]]\r\n if (IGGR == 'IG'):\r\n gain_information = infogain_wine(sorted_dataset,leftsplit['class'],rightsplit['class'],total_entropy,split_feature_val)\r\n elif (IGGR == 'GR'):\r\n gain_information = split_information_wine(sorted_dataset,leftsplit['class'],rightsplit['class'],total_entropy,split_feature_val)\r\n best_info_gain.append(gain_information)\r\n # finding the best split value index\r\n index_of_best_split_val = best_info_gain.index(max(best_info_gain))\r\n leftsplit = sorted_dataset.loc[sorted_dataset[best_feature] <= split_feature_val[index_of_best_split_val]] # leftsplit of the dataset\r\n rightsplit = sorted_dataset.loc[sorted_dataset[best_feature] > split_feature_val[index_of_best_split_val]] # rightsplit of the dataset\r\n if (IGGR == 'IG'):\r\n gain_information = infogain_wine(sorted_dataset,leftsplit['class'],rightsplit['class'],total_entropy,split_feature_val)\r\n elif (IGGR == 'GR'):\r\n gain_information = split_information_wine(sorted_dataset,leftsplit['class'],rightsplit['class'],total_entropy,split_feature_val)\r\n return leftsplit,rightsplit,split_feature_val[index_of_best_split_val]\r\n\r\n# creating the ID3 decision tree\r\ndef ID3_wine(data,originaldata,features,target_attribute_name=\"class\",parent_node_class = None,IGGR = 'IG'):\r\n # First stopping Condition: If all target_values have the same value, return this value\r\n if len(np.unique(data[target_attribute_name])) <= 1:\r\n return np.unique(data[target_attribute_name])[0]\r\n \r\n # second stopping condition: If length of the data is zero then, return the mode target feature value\r\n elif len(data) == 0:\r\n return np.unique(originaldata[target_attribute_name])[np.argmax(np.unique(originaldata[target_attribute_name],return_counts=True)[1])]\r\n\r\n # if the feature is empty then return the parent node class\r\n elif len(features) == 0:\r\n return parent_node_class\r\n\r\n # if none of the above stopping condition are true then grow the tree.\r\n else:\r\n # parent node class is the default value for the node, which is the mode target feature value of the current node\r\n parent_node_class = np.unique(data[target_attribute_name])[np.argmax(np.unique(data[target_attribute_name],return_counts=True)[1])]\r\n \r\n #find and select the feature which finds the best split for the dataset\r\n IG = []\r\n for i in features:\r\n IG.append(best_split_index(data,i,features,'class',IGGR=IGGR))\r\n # find the index of the best split\r\n index_best_feature = IG.index(max(IG))\r\n # best feature at the best feature index for the split \r\n best_feature = features[index_best_feature]\r\n # create a tree structure in the dictionary and assign the best feature as the root node.\r\n tree = {best_feature:{}}\r\n # now grow the node under the best feature \r\n # applying recursion on the ID3 tree function\r\n for i in range(2):\r\n data_copy = data\r\n if (i == 0):\r\n l,tp0,split_value_l = left_right_split(data_copy,best_feature,'class',IGGR=IGGR)\r\n subtree_l = ID3_wine(l,dataset,features,'class',parent_node_class,IGGR=IGGR)\r\n split_value_l = str(split_value_l)\r\n split_value_l = '<= ' + split_value_l\r\n tree[best_feature][split_value_l] = subtree_l\r\n elif (i == 1):\r\n tp0,r,split_value_r = left_right_split(data_copy,best_feature,'class',IGGR=IGGR)\r\n subtree_r = ID3_wine(r,dataset,features,'class',parent_node_class,IGGR=IGGR)\r\n split_value_r = str(split_value_r)\r\n split_value_r = '> ' + split_value_r\r\n tree[best_feature][split_value_r] = subtree_r\r\n return(tree)\r\n\r\n# the function to predict the output of the test data using the tree that we have created\r\ndef predict_wine(queries,tree,default=2):\r\n # we will check for every feature if it exists in the query. \r\n # If we find the feature name which exists in the dictionary then go inside the dictionary otherwise return the default value \r\n for key in list(queries.keys()):\r\n if (type(tree) == dict):\r\n if key in list(tree.keys()):\r\n b = tree[key].keys()\r\n b = list(b)\r\n z = float(b[0][2:])\r\n if (queries[key] <= z): \r\n copy_tree = tree\r\n copy_tree = copy_tree[key][b[0]]\r\n a = predict_wine(queries,copy_tree)\r\n return a\r\n # in here we are trying to predict the output based on the tree generated by us\r\n elif (queries[key] > z):\r\n copy_tree = tree\r\n copy_tree = copy_tree[key][b[1]]\r\n b = predict_wine(queries,copy_tree)\r\n return b\r\n else:\r\n return tree\r\n\r\n# getting the prediction accuracy\r\ndef test_wine(data,tree):\r\n # create queries simply by removing feature value column from the test dataset\r\n queries = data.iloc[:,:-1].to_dict(orient = \"records\")\r\n #Create a empty DataFrame in whose columns the prediction of the tree are stored\r\n predicted = pd.DataFrame(columns=[\"predicted\"])\r\n #Calculate the prediction accuracy\r\n for i in range(len(data)):\r\n predicted.loc[i,\"predicted\"] = predict_wine(queries[i],tree,1.0) \r\n prediction_accuracy = (np.sum(predicted[\"predicted\"] == data[\"class\"])/len(data))*100\r\n return prediction_accuracy,predicted\r\n\r\n# A Simple Confusion Matrix Implementation\r\ndef confusionmatrix_wine(actual, predicted, normalize = False):\r\n unique = sorted(set(actual))\r\n matrix = [[0 for _ in unique] for _ in unique]\r\n imap = {key: i for i, key in enumerate(unique)}\r\n # Generate Confusion Matrix\r\n for p, a in zip(predicted, actual):\r\n matrix[imap[p]][imap[a]] += 1\r\n # Matrix Normalization\r\n if normalize:\r\n sigma = sum([sum(matrix[imap[i]]) for i in unique])\r\n matrix = [row for row in map(lambda i: list(map(lambda j: j / sigma, i)), matrix)]\r\n return matrix\r\n \r\n# create a function for performing 10 folds cross validation 10 times\r\ndef cross_validation_score_wine(dataset, num_of_folds = 10,IGGR='IG'):\r\n confusion_expected = [] # an empty list for the actual values for the confusion matrix\r\n confusion_predicted = [] # an empty list for the predicted values for the confusion matrix\r\n num_of_rows = dataset.shape[0] # number of rows in the dataset\r\n fold_size = num_of_rows / num_of_folds # calculating the fold size\r\n fold_size = int(fold_size) # converting it to integer\r\n accuracy = [] # array for the accuracy\r\n tree_number_for_print = 0 # number of trees\r\n for z in range(10): # 10 times 10 fold cross validation\r\n m = 0\r\n n = 0 \r\n dataset_copy = dataset # copied dataset\r\n dataset_copy = shuffle(dataset_copy).reset_index(drop=True)\r\n for i in range(10): # 10 fold cross validation\r\n tree_number_for_print = tree_number_for_print + 1\r\n m = m + fold_size # back limit\r\n training_data = dataset_copy.drop(dataset_copy.index[n:m]).reset_index(drop=True)\r\n testing_data = dataset_copy.iloc[n:m].reset_index(drop=True)\r\n confusion_expected.append(testing_data)\r\n n = n + fold_size # front limit\r\n ## find tree and it's accuracy\r\n tree = ID3_wine(training_data,training_data,feature_names,IGGR=IGGR)\r\n #pprint(tree)\r\n prediction_accuracy,predicted = test_wine(testing_data,tree)\r\n #print(\"The accuracy of the tree number \",tree_number_for_print,\" is: \",prediction_accuracy)\r\n confusion_predicted.append(predicted)\r\n accuracy.append(prediction_accuracy)\r\n mean = np.mean(accuracy) # finding mean of the accuracy\r\n variance = np.var(accuracy) # finding variance of the accuracy\r\n confusion_mat_index = accuracy.index(max(accuracy)) \r\n con_fun_actual = list(confusion_expected[confusion_mat_index]['class'])\r\n con_fun_predicted = list(confusion_predicted[confusion_mat_index]['predicted'])\r\n confusion_matrix = confusionmatrix_wine(con_fun_actual,con_fun_predicted)\r\n confusion_matrix = np.matrix(confusion_matrix)\r\n return mean,variance,confusion_matrix\r\n\r\nprint(\"The decision tree of Wine dataset using information gain\")\r\nfinal_mean_IG,final_variance_IG,final_confusion_matrix_IG = cross_validation_score_wine(dataset,num_of_folds=10,IGGR='IG')\r\nprint(\"Final Mean: \",final_mean_IG)\r\nprint(\"Final variance: \", final_variance_IG)\r\nprint(\"Confusion matrix: \\n\",final_confusion_matrix_IG)\r\n\r\nprint(\"The decision tree of Wine dataset using Gain Ratio\")\r\nfinal_mean_GR,final_variance_GR,final_confusion_matrix_GR = cross_validation_score_wine(dataset,num_of_folds=10,IGGR='GR')\r\nprint(\"Final Mean: \",final_mean_GR)\r\nprint(\"Final variance: \", final_variance_GR)\r\nprint(\"Confusion matrix: \\n\",final_confusion_matrix_GR)","sub_path":"Decision Tree From Scratch Q2.py","file_name":"Decision Tree From Scratch Q2.py","file_ext":"py","file_size_in_byte":25796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586679053","text":"\"\"\"\nDetermine if a 9x9 Sudoku board is valid. Only the filled cells need to be validated according to the following rules:\n\nEach row must contain the digits 1-9 without repetition.\nEach column must contain the digits 1-9 without repetition.\nEach of the 9 3x3 sub-boxes of the grid must contain the digits 1-9 without repetition.\nThe Sudoku board could be partially filled, where empty cells are filled with the character '.'.\n\nNote:\nA Sudoku board (partially filled) could be valid but is not necessarily solvable.\nOnly the filled cells need to be validated according to the mentioned rules.\nThe given board contain only digits 1-9 and the character '.'.\nThe given board size is always 9x9.\n\"\"\"\n\n\nclass Solution:\n def isValidSudoku(self, board):\n \"\"\"\n :type board: List[List[str]]\n :rtype: bool\n \"\"\"\n seen = []\n for i, row in enumerate(board):\n for j, c in enumerate(row):\n if c != '.':\n seen += [(c, j), (i, c), (i//3, j//3, c)]\n return len(seen) == len(set(seen))","sub_path":"Valid Sudoku.py","file_name":"Valid Sudoku.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"633808664","text":"expected_output = {\r\n \"load_time\":\"1%/0%\",\r\n \"one_minute\":\"0%\",\r\n \"five_minutes\":\"0%\",\r\n \"ntp_time\":\"15:09:55.102 UTC Wed Nov 10 2021\",\r\n \"smart_licensing_status\":{\r\n \"export_authorization_key\":{\r\n \"features_authorized\":\"\"\r\n },\r\n \"utility\":{\r\n \"status\":\"DISABLED\"\r\n },\r\n \"smart_licensing_using_policy\":{\r\n \"status\":\"ENABLED\"\r\n },\r\n \"account_information\":{\r\n \"smart_account\":\"\",\r\n \"virtual_account\":\"\"\r\n },\r\n \"data_privacy\":{\r\n \"sending_hostname\":\"yes\",\r\n \"callhome_hostname_privacy\":\"DISABLED\",\r\n \"smart_licensing_hostname_privacy\":\"DISABLED\",\r\n \"version_privacy\":\"DISABLED\"\r\n },\r\n \"transport\":{\r\n \"type\":\"Callhome\"\r\n },\r\n \"miscellaneous\":{\r\n \"custom_id\":\"\"\r\n },\r\n \"policy\":{\r\n \"policy_in_use\":\"Merged from multiple sources.\",\r\n \"reporting_ack_required\":\"yes (CISCO default)\",\r\n \"unenforced_non_export_perpetual_attributes\":{\r\n \"first_report_requirement_days\":\"365 (CISCO default)\",\r\n \"reporting_frequency_days\":\"0 (CISCO default)\",\r\n \"report_on_change_days\":\"90 (CISCO default)\"\r\n },\r\n \"unenforced_non_export_subscription_attributes\":{\r\n \"first_report_requirement_days\":\"90 (CISCO default)\",\r\n \"reporting_frequency_days\":\"90 (CISCO default)\",\r\n \"report_on_change_days\":\"90 (CISCO default)\"\r\n },\r\n \"enforced_perpetual_subscription_license_attributes\":{\r\n \"first_report_requirement_days\":\"0 (CISCO default)\",\r\n \"reporting_frequency_days\":\"0 (CISCO default)\",\r\n \"report_on_change_days\":\"0 (CISCO default)\"\r\n },\r\n \"export_perpetual_subscription_license_attributes\":{\r\n \"first_report_requirement_days\":\"0 (CISCO default)\",\r\n \"reporting_frequency_days\":\"0 (CISCO default)\",\r\n \"report_on_change_days\":\"0 (CISCO default)\"\r\n }\r\n },\r\n \"usage_reporting\":{\r\n \"last_ack_received\":\"\",\r\n \"next_ack_deadline\":\"Feb 07 10:32:11 2022 UTC\",\r\n \"reporting_push_interval\":\"30 days\",\r\n \"next_ack_push_check\":\"\",\r\n \"next_report_push\":\"Nov 10 11:32:02 2021 UTC\",\r\n \"last_report_push\":\"\",\r\n \"last_report_file_write\":\"\"\r\n }\r\n },\r\n \"license_usage\":{\r\n \"license_name\":{\r\n \"network-advantage (C9300-24 Network Advantage):\":{\r\n \"description\":\"C9300-24 Network Advantage\",\r\n \"count\":2,\r\n \"version\":\"1.0\",\r\n \"status\":\"IN USE\",\r\n \"export_status\":\"NOT RESTRICTED\",\r\n \"feature_name\":\"network-advantage\",\r\n \"feature_description\":\"C9300-24 Network Advantage\",\r\n \"enforcement_type\":\"NOT ENFORCED\",\r\n \"license_type\":\"Perpetual\"\r\n },\r\n \"dna-advantage (C9300-24 DNA Advantage):\":{\r\n \"description\":\"C9300-24 DNA Advantage\",\r\n \"count\":2,\r\n \"version\":\"1.0\",\r\n \"status\":\"IN USE\",\r\n \"export_status\":\"NOT RESTRICTED\",\r\n \"feature_name\":\"dna-advantage\",\r\n \"feature_description\":\"C9300-24 DNA Advantage\",\r\n \"enforcement_type\":\"NOT ENFORCED\",\r\n \"license_type\":\"Subscription\"\r\n },\r\n \"network-advantage (C9300-48 Network Advantage):\":{\r\n \"description\":\"C9300-48 Network Advantage\",\r\n \"count\":1,\r\n \"version\":\"1.0\",\r\n \"status\":\"IN USE\",\r\n \"export_status\":\"NOT RESTRICTED\",\r\n \"feature_name\":\"network-advantage\",\r\n \"feature_description\":\"C9300-48 Network Advantage\",\r\n \"enforcement_type\":\"NOT ENFORCED\",\r\n \"license_type\":\"Perpetual\"\r\n },\r\n \"dna-advantage (C9300-48 DNA Advantage):\":{\r\n \"description\":\"C9300-48 DNA Advantage\",\r\n \"count\":1,\r\n \"version\":\"1.0\",\r\n \"status\":\"IN USE\",\r\n \"export_status\":\"NOT RESTRICTED\",\r\n \"feature_name\":\"dna-advantage\",\r\n \"feature_description\":\"C9300-48 DNA Advantage\",\r\n \"enforcement_type\":\"NOT ENFORCED\",\r\n \"license_type\":\"Subscription\"\r\n }\r\n }\r\n },\r\n \"product_information\":{\r\n \"udi\":{\r\n \"pid\":\"C9300-24UX\",\r\n \"sn\":\"FCW2303D16Y\"\r\n },\r\n \"ha_udi_list\":{\r\n \"active\":{\r\n \"pid\":\"C9300-24UX\",\r\n \"sn\":\"FCW2303D16Y\"\r\n },\r\n \"standby\":{\r\n \"pid\":\"C9300-24U\",\r\n \"sn\":\"FHH2043P09E\"\r\n },\r\n \"member\":{\r\n \"pid\":\"C9300-48T\",\r\n \"sn\":\"FCW2139L056\"\r\n }\r\n }\r\n },\r\n \"agent_version\":{\r\n \"smart_agent_for_licensing\":\"5.3.10_rel/25\"\r\n },\r\n \"license_authorizations\":{\r\n \"overall_status\":{\r\n \"active\":{\r\n \"pid\":\"C9300-24UX\",\r\n \"sn\":\"FCW2303D16Y\",\r\n \"status\":\"NOT INSTALLED\"\r\n },\r\n \"standby\":{\r\n \"pid\":\"C9300-24U\",\r\n \"sn\":\"FHH2043P09E\",\r\n \"status\":\"NOT INSTALLED\"\r\n },\r\n \"member\":{\r\n \"pid\":\"C9300-48T\",\r\n \"sn\":\"FCW2139L056\",\r\n \"status\":\"NOT INSTALLED\"\r\n }\r\n },\r\n \"purchased_licenses\":\"No Purchase Information Available\"\r\n },\r\n \"usage_report_summary\":{\r\n \"total\":\"12\",\r\n \"purged\":\"0\",\r\n \"total_acknowledged_received\":\"0\",\r\n \"waiting_for_ack\":\"0\",\r\n \"available_to_report\":\"12\",\r\n \"collecting_data\":\"4\"\r\n }\r\n}","sub_path":"src/genie/libs/parser/iosxe/tests/ShowLicenseAll/cli/equal/golden_output_expected.py","file_name":"golden_output_expected.py","file_ext":"py","file_size_in_byte":5596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"81921259","text":"# https://programmers.co.kr/learn/courses/30/lessons/42578\n\nfrom collections import defaultdict\ndef solution(clothes):\n answer = 0\n int_dict=defaultdict(int)\n for i in clothes:\n int_dict[i[1]]+=1\n multiple=1\n for k in int_dict.values():\n multiple*=(k+1)\n return multiple-1","sub_path":"etc/위장.py","file_name":"위장.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"645949017","text":"#!/usr/bin/env python\n\nimport socket\nfrom serverSidePublisher import publishingProcessor\nfrom acquisitionProcessor import acquisitionProcessor\nimport math\nimport numpy as np\nimport rospy\nimport rosbag\nfrom sensor_msgs.msg import CompressedImage\n\nimport sys\nimport os\nimport time\n\nimport multiprocessing\nimport logging\nimport traceback\nlogger = multiprocessing.log_to_stderr()\nlogger.setLevel(logging.INFO)\n\n\ndef get_ip():\n # from https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib\n\n s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n try:\n # doesn't even have to be reachable\n s.connect(('10.255.255.255', 1))\n IP = s.getsockname()[0]\n except:\n IP = '127.0.0.1'\n finally:\n s.close()\n return IP\n\n\n# IMPORT THE ENVIRONMENT VARIABLES (DEPENDING ON THE MODE)\nACQ_ROS_MASTER_URI_DEVICE_IP = get_ip()\nACQ_ROS_MASTER_URI_DEVICE_PORT = \"11311\"\n\nLAB_ROS_MASTER_IP = os.getenv('LAB_ROS_MASTER_IP', \"\")\nLAB_ROS_MASTER_PORT = \"11311\"\n\n\n# Define the two concurrent processes:\ndef runDeviceSideProcess(ROS_MASTER_URI, quitEvent, is_autobot):\n \"\"\"\n Receive and process data from the remote device (Duckiebot or watchtower).\n \"\"\"\n logger.info('Device side processor starting')\n\n os.environ['ROS_MASTER_URI'] = ROS_MASTER_URI\n ap = acquisitionProcessor(logger, is_autobot)\n ap.liveUpdate(outputDictQueue, inputDictQueue, quitEvent)\n\n\ndef runServerSideProcess(ROS_MASTER_URI, quitEvent, is_autobot):\n \"\"\"\n Publush the processed data to the ROS Master that the graph optimizer uses.\n \"\"\"\n logger.info('Server side processor starting')\n os.environ['ROS_MASTER_URI'] = ROS_MASTER_URI\n pp = publishingProcessor(logger, is_autobot)\n pp.publishOnServer(outputDictQueue, inputDictQueue,\n quitEvent)\n\n\nif __name__ == '__main__':\n \"\"\"\n Starts the two processes and sets up their termination.\n \"\"\"\n\n print(\"Image and odometry acquision, processing and publishing setting up\")\n\n # Event to terminate the two processes\n quitEvent = multiprocessing.Event()\n\n ros_master_uri_server = \"http://\"+LAB_ROS_MASTER_IP + \\\n \":\"+LAB_ROS_MASTER_PORT\n ros_master_uri_device = \"http://\"+ACQ_ROS_MASTER_URI_DEVICE_IP + \\\n \":\"+ACQ_ROS_MASTER_URI_DEVICE_PORT\n\n node_name = rospy.get_name()\n veh_name = node_name.split(\"/\")[1]\n\n is_autobot = bool(rospy.get_param(\n \"/\"+veh_name+\"/acquisition_bridge/is_autobot\", default=True))\n\n # outputDictQueue is used to pass data between the two processes\n outputDictQueue = multiprocessing.Queue(maxsize=100)\n inputDictQueue = multiprocessing.Queue(maxsize=10)\n\n # Start the processes\n\n deviceSideProcess = multiprocessing.Process(target=runDeviceSideProcess,\n args=(\n ros_master_uri_device, quitEvent, is_autobot),\n name=\"deviceSideProcess\")\n\n serverSideProcess = multiprocessing.Process(target=runServerSideProcess,\n args=(\n ros_master_uri_server, quitEvent, is_autobot),\n name=\"serverSideProcess\")\n deviceSideProcess.start()\n serverSideProcess.start()\n\n # Exit if any of the two processes exits:\n while True:\n # print(\"Current approx. size %d \" % outputDictQueue.qsize())\n\n if not deviceSideProcess.is_alive():\n quitEvent.set()\n deviceSideProcess.terminate()\n # Wait until the queue is processed\n while not outputDictQueue.empty() and serverSideProcess.is_alive():\n time.sleep(1)\n outputDictQueue.close()\n # Give time for submitting the last message to the server\n serverSideProcess.join()\n time.sleep(0.5)\n serverSideProcess.terminate()\n print(\"The device side process exited. Stopping everything.\")\n sys.exit()\n\n if not serverSideProcess.is_alive():\n quitEvent.set()\n deviceSideProcess.terminate()\n serverSideProcess.terminate()\n outputDictQueue.close()\n print(\"The server side process exited. Stopping everything.\")\n sys.exit()\n\n time.sleep(1)\n","sub_path":"rescue-acquisition-bridge/packages/acquisition_bridge/src/acquire_and_publish.py","file_name":"acquire_and_publish.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"306162349","text":"from django import forms\r\nfrom tool.models import Statistics\r\nfrom tool.models import AccountInfo\r\nfrom material import Layout, Row, Fieldset\r\n\r\n# Create your models here.\r\nINFORMATION_TYPE = (\r\n ('basic','basic'),\r\n ('clusters','clusters'),\r\n ('geography','geography'),\r\n ('analysis','analysis'),\r\n ('advanced','advanced')\r\n)\r\n\r\nclass showStatisticsForm(forms.ModelForm):\r\n# name = forms.charField(max_length=200)\r\n# accountType = forms.MultipleChoiceField(choices=['text1','text1'], widget=forms.CheckboxSelectMultiple())\r\n# relationSearch = forms.ModelMultipleChoiceField(\r\n# widget = forms.CheckboxSelectMultiple,\r\n# required = True\r\n# )\r\n informationType = forms.ChoiceField(choices=INFORMATION_TYPE)\r\n account = forms.ModelChoiceField(queryset=AccountInfo.objects.all())\r\n \r\n class Meta:\r\n model = Statistics\r\n \r\n fields = ['informationType','test','account']\r\n# widgets = {\r\n## 'relationSearch':forms.CheckboxSelectMultiple(),\r\n# 'account' : forms.ModelChoiceField(queryset=AccountInfo.objects.all())\r\n# }\r\n \r\n layout = Layout(\r\n Fieldset(\"Select your account\",\r\n 'informationType','test','account'))\r\n \r\n# def save(self, commit=True):\r\n# # do something with self.cleaned_data['temp_id']\r\n# return super(Statistics, self).save(commit=commit)","sub_path":"tool/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"311878473","text":"#009 - IMC\n\npeso = float(input('Seu Peso: '))\naltura = float(input('Sua Altura: '))\nimc = peso/(altura*altura)\nresultado = ''\n\nif imc < 20:\n resultado = 'Abaixo do Peso'\nelif imc >= 20 and imc < 25:\n resultado = 'Peso Ideal'\nelif imc >= 25 and imc < 30:\n resultado = 'Sobrepeso'\nelif imc >= 30 and imc < 35:\n resultado = 'Obesidade Moderada'\nelif imc >= 35 and imc < 40:\n resultado = 'Obesidade Severa'\nelif imc >= 40 and imc < 50:\n resultado = 'Obesidade Mórbida'\nelse:\n resultado = 'Super Obesidade' \n\nprint(f'Seu IMC: {imc:.2f}\\nResultado: {resultado}')\n","sub_path":"Revisões - UFRPE/#1: Condicionais - Slides/009 - IMC.py","file_name":"009 - IMC.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"558110213","text":"import cv2 \nimport numpy as np\nfrom math import pi, cos, sin\n\ncolor = cv2.imread('circles.png')\n#color = cv2.medianBlur(color,5)\n\ncv2.imshow('Orig',color)\n\ngray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)\n\ncanny = cv2.Canny(gray,200,20)\n#print(cv2.HOUGH_GRADIENT)\n#print(gray)\ncircles = cv2.HoughCircles(gray, cv2.HOUGH_GRADIENT, 1, 60, param1=200, param2=20, minRadius=0, maxRadius=0)\n#circles = cv.HoughCircles(img,cv.HOUGH_GRADIENT,1,20,param1=50,param2=30,minRadius=0,maxRadius=0)\nprint(circles)\ncircles = np.uint16(np.around(circles))\nprint(circles)\nfor i in circles[0,:]:\n cv2.circle(color,(i[0],i[1]),i[2],(0,255,0),1)\n # draw the center of the circle\n cv2.circle(color,(i[0],i[1]),2,(0,0,255),-1)\n\ncv2.imshow('Circles',color)\n\n\n#dt = cv2.distanceTransform(255, cv2.DIST_L2 ,3)\ndt= cv2.distanceTransform(255-canny,cv2.DIST_L2,3) \n\ncv2.imshow('Hu',dt)\n#cv2.waitKey(0)\n\nprint(dt)\nprint(len(dt))\nprint(len(dt[0]))\nprint(len(dt[1]))\nminInlierDist = 2.0\n\n\nfor i in circles[0,:]:\n print(i[0])\n print(i[1])\n counter = 0\n inlier = 0\n maxInlierDist = i[2]/25\n #print(\"Radius_b: \", i[2])\n #print(maxInlierDist)\n \n if maxInlierDist 500):\n\t\t\t\t\treturn distances[goal]\n\n\t\tunvisited[visited_index] = -1\n\t\toutline = \"\"\n\t\td_outline = \"\"\n\t\tfor unv in unvisited:\n\t\t\toutline += \"{} \".format(unv)\n\t\tfor d in distances:\n\t\t\td_outline += \"{} \".format(distances[d])\n\n\t\tif (closest == goal):\t\t\t\t\t\t\t\t# determine if the goal has been visited\n\t\t\tgoal_found = True\n\t\t\treturn distances[goal]\n\t\tneighbors = atom_table[closest]\n\t\tfor n in neighbors:\t\t\t\t\t\t\t\t\t# update shortest distance to current atom's neighbors\n\t\t\tcurrent_path_length = distances[n]\n\t\t\tif (closest_dist + 1 < current_path_length):\n\t\t\t\tdistances[n] = closest_dist + 1\n\t\tclosest_dist = 99999\t\t\t\t\t\t\t\t# visited atoms are removed from the unvisited list, so this value is reset\n\t\tfailsafe += 1\n\ndef countNitrogens(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tnitrogenCount = 0\n\tfor ch in smile:\n\t\tif ch == 'N':\n\t\t\tnitrogenCount += 1\n\treturn nitrogenCount\n\ndef countOxygens(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\toxygenCount = 0\n\tfor ch in smile:\n\t\tif ch == 'O':\n\t\t\toxygenCount += 1\n\treturn oxygenCount\n\ndef countCarbons(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tcarbonCount = 0\n\tfor ch in smile:\n\t\tif ch == 'C':\n\t\t\tcarbonCount += 1\n\treturn carbonCount\n\ndef countBorons(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tboronCount = 0\n\tfor ch in smile:\n\t\tif ch == 'B':\n\t\t\tboronCount += 1\n\treturn boronCount\n\ndef countIodine(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tiodCount = 0\n\tfor ch in smile:\n\t\tif ch == 'I':\n\t\t\tiodCount += 1\n\treturn iodCount\n\ndef countPhos(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tphosCount = 0\n\tfor ch in smile:\n\t\tif ch == 'P':\n\t\t\tphosCount += 1\n\treturn phosCount\n\ndef countFluorine(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tfluorCount = 0\n\tfor ch in smile:\n\t\tif ch == 'F':\n\t\t\tfluorCount += 1\n\treturn fluorCount\n\ndef countSulfurs(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tsulfurCount = 0\n\tfor ch in smile:\n\t\tif ch == 'S':\n\t\t\tsulfurCount += 1\n\treturn sulfurCount\n\ndef countDoubleBonds(mol):\n\tsmile = Chem.MolToSmiles(mol)\n\tdoubleBonds = 0\n\tfor ch in smile:\n\t\tif ch == '=':\n\t\t\tdoubleBonds += 1\n\treturn doubleBonds\n\n\nmain()\n","sub_path":"old_files_archive/dbdock.py","file_name":"dbdock.py","file_ext":"py","file_size_in_byte":8629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"66673434","text":"\n\nfrom xai.brain.wordbase.adjectives._private import _PRIVATE\n\n#calss header\nclass _PRIVATER(_PRIVATE, ):\n\tdef __init__(self,): \n\t\t_PRIVATE.__init__(self)\n\t\tself.name = \"PRIVATER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"private\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_privater.py","file_name":"_privater.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469936","text":"\n\nfrom xai.brain.wordbase.adjectives._snazzy import _SNAZZY\n\n#calss header\nclass _SNAZZIER(_SNAZZY, ):\n\tdef __init__(self,): \n\t\t_SNAZZY.__init__(self)\n\t\tself.name = \"SNAZZIER\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"snazzy\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_snazzier.py","file_name":"_snazzier.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"216354641","text":"import socket\n\n\ndef client():\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((socket.gethostname(), 1024))\n\n print(s.recv(1024).decode('utf-8'))\n\n s.close()\nif __name__ == '__main__':\n client()\n\n\n\n\n\n\n","sub_path":"Socket/simple_server-client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"478026616","text":"import numpy as np\nfrom keras.datasets import imdb\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LSTM\nfrom keras.layers.embeddings import Embedding\nfrom keras.preprocessing import sequence\nfrom collections import Counter\nimport os\nimport getEmbeddings2\nimport matplotlib.pyplot as plt\nfrom sklearn.metrics import confusion_matrix, classification_report\nimport seaborn as sn\nimport pandas as pd\n\n\ntop_words = 5000\nepoch_num = 8\nbatch_size = 25\n\ndef plot_cmat(yte, ypred):\n '''Plotting confusion matrix'''\n #cm = confusion_matrix(yte, ypred, labels=[0,1])\n #print(cm)\n report = classification_report(yte, ypred, labels=[0,1])\n print(report)\n cm = confusion_matrix(yte, ypred, labels=[0,1])\n print(cm)\n df_cm = pd.DataFrame(cm)\n sn.set(font_scale=1.4) # for label size\n sn.heatmap(df_cm, annot=True, annot_kws={\"size\": 12}) # font size\n plt.savefig('baseline-cm-lstm-custom-8e-25b-nojacquard-anmol.png')\n # svm = sns.heatmap(df_cm, annot=True, cmap='Blues', linecolor='white', linewidths=1)\n# figure = svm.get_figure()\n # figure.savefig('matrix.png')\n \n \n#if not os.path.isfile('xtr_shuffled.npy') or \\\n # not os.path.isfile('xte_shuffled.npy') or \\\n # not os.path.isfile('ytr_shuffled.npy') or \\\n # not os.path.isfile('yte_shuffled.npy'):\n\ngetEmbeddings2.clean_data()\n\nxtr = np.load('xtr_shuffled.npy', allow_pickle=True)\nxte = np.load('xte_shuffled.npy', allow_pickle=True)\ny_train = np.load('ytr_shuffled.npy', allow_pickle=True)\ny_test = np.load('yte_shuffled.npy', allow_pickle=True)\n\ncnt = Counter()\nx_train = []\nfor x in xtr:\n print(x)\n x_train.append(str(x).split())\n for word in x_train[-1]:\n cnt[word] += 1\n\n# Storing most common words\nmost_common = cnt.most_common(top_words + 1)\nword_bank = {}\nid_num = 1\nfor word, freq in most_common:\n word_bank[word] = id_num\n id_num += 1\n\n# Encode the sentences\nfor news in x_train:\n i = 0\n while i < len(news):\n if news[i] in word_bank:\n news[i] = word_bank[news[i]]\n i += 1\n else:\n del news[i]\n\ny_train = list(y_train)\ny_test = list(y_test)\n\n# Delete the short news\ni = 0\nwhile i < len(x_train):\n if len(x_train[i]) > 10:\n i += 1\n else:\n del x_train[i]\n del y_train[i]\n\n# Generating test data\nx_test = []\nfor x in xte:\n x_test.append(str(x).split())\n\n# Encode the sentences\nfor news in x_test:\n i = 0\n while i < len(news):\n if news[i] in word_bank:\n news[i] = word_bank[news[i]]\n i += 1\n else:\n del news[i]\n\n\n# Truncate and pad input sequences\nmax_review_length = 500\nX_train = sequence.pad_sequences(x_train, maxlen=max_review_length)\nX_test = sequence.pad_sequences(x_test, maxlen=max_review_length)\n\n# Convert to numpy arrays\ny_train = np.array(y_train)\ny_test = np.array(y_test)\n\n# Create the model\nembedding_vecor_length = 64\nmodel = Sequential()\nmodel.add(Embedding(top_words+2, embedding_vecor_length, input_length=max_review_length))\nmodel.add(LSTM(100))\nmodel.add(Dense(1, activation='sigmoid'))\nmodel.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(model.summary())\nmodel.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epoch_num, batch_size=batch_size)\n\n# Final evaluation of the model\nscores = model.evaluate(X_test, y_test, verbose=0)\nprint(\"Accuracy= %.2f%%\" % (scores[1]*100))\n\n# Draw the confusion matrix\ny_pred = model.predict_classes(X_test)\nplot_cmat(y_test, y_pred)\n","sub_path":"230 Covid-19 Fake News Detector/LSTM-custom.py","file_name":"LSTM-custom.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"18321647","text":"import http\nimport cmd\nimport requests\n\nclass CommandLine(cmd.Cmd):\n def __init__(self):\n cmd.Cmd.__init__(self)\n\n def nothing():\n pass\n\n def do_hello(self, extra):\n print(\"Hello World\")\n print(self)\n print(extra)\n return\n\n def emptyline(self):\n print(\"Arg: \")\n print(self)\n return\n\n def do_query(self, query):\n print(query)\n r = requests.post(\"http://127.0.0.1:5000/query/\", data={\"query\":query})\n\n print(r.status_code)\n print(r.headers)\n print(r.text)\n\n return\n\n\n\n\nhistory = []\n\nprint(\"Hello, welcome to the SQL query program. Any input is terminated by a newline character. To view previous queries, simply use the up arrow key. Happy searching!\")\n\ncom = CommandLine()\ncom.cmdloop()\n","sub_path":"command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"103264651","text":"# I HAVE CREATED THIS FILE - ASHISH\r\n\r\nfrom django.http import HttpResponse\r\nfrom django.shortcuts import render\r\n\r\n\r\ndef index(request):\r\n return render(request, 'index.html')\r\n # return HttpResponse(\"Home\")\r\n\r\n\r\ndef analyze(request):\r\n # text written in text area whose name is text\r\n djtext = (request.POST.get('text', 'default'))\r\n\r\n # checkbox for removing punctuations whose name is removepunc\r\n removepunc = (request.POST.get('removepunc', 'off'))\r\n\r\n # checkbox for capitalizing the whole text\r\n fullcaps = (request.POST.get('fullcaps', 'off'))\r\n\r\n # checking for counting the no of chararcters\r\n charcount = (request.POST.get('charcount', 'off'))\r\n\r\n #checking for new lines remover\r\n newlineremover = (request.POST.get('newlineremover', 'off'))\r\n\r\n print(removepunc)\r\n print(djtext)\r\n print(fullcaps)\r\n print(newlineremover)\r\n\r\n # giving the default args\r\n analyzed_text = []\r\n purpose = []\r\n\r\n # Checking for the tasks to be done.\r\n if removepunc == \"on\":\r\n analyzed = ''\r\n purpose.append('remove punctuations')\r\n punctuations = '''!@'.<>,/?{}[]-'''\r\n for char in djtext:\r\n if char not in punctuations:\r\n analyzed = analyzed + char\r\n analyzed_text.append(analyzed)\r\n\r\n if fullcaps == \"on\":\r\n purpose.append('Capitalize word')\r\n analyzed_text.append(djtext.upper())\r\n\r\n if charcount == 'on':\r\n purpose.append(\"Count the number of characters\")\r\n analyzed_text.append(str(len(djtext)))\r\n\r\n if newlineremover == 'on':\r\n purpose.append(\"Removes new line \")\r\n a = ''\r\n for ch in djtext:\r\n if ch != '\\n' and ch != '\\r':\r\n a = a + ch\r\n print(a)\r\n analyzed_text.append(a)\r\n\r\n\r\n # checking if its not empty.\r\n if len(purpose) == 0:\r\n purpose.append(\"select something to do.\")\r\n if len(analyzed_text) == 0:\r\n analyzed_text.append(f'''the defaut text was \"{djtext}\" ''')\r\n\r\n # Making the final dictionary of data to be sent .\r\n params = {'purpose': ', '.join(purpose), 'analyzed_text': ' \\n'.join(analyzed_text)}\r\n return render(request, 'analyze.html', params)\r\n","sub_path":"textutils/textutils/textutils/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"465112452","text":"import sqlite3\r\n\r\ndef CreateTable(databaseName, table_name,sql):\r\n with sqlite3.connect(databaseName) as db:\r\n cursor = db.cursor()\r\n cursor.execute(\"select name from sqlite_master where name=?\",(table_name,))\r\n result = cursor.fetchall()\r\n keep_table = True\r\n if len(result) == 1:\r\n response = input(\"This table already exists! Recreate table?: \")\r\n if response == \"y\":\r\n keep_table = False\r\n cursor.execute(\"drop table if exists {}\".format(table_name))\r\n db.commit()\r\n else:\r\n print(\"No changes made.\")\r\n else:\r\n keep_table = False\r\n if not keep_table:\r\n cursor.execute(sql)\r\n db.commit()\r\n\r\ndef CreateStaffTable():\r\n sql = \"\"\"create table Staff\r\n (StaffID text,\r\n StaffFirstName text,\r\n StaffLastName text,\r\n primary key(StaffID))\"\"\"\r\n CreateTable(databaseName, \"Staff\", sql)\r\n\r\ndef CreateItemTable():\r\n sql = \"\"\"create table Item\r\n (ItemCode text,\r\n ItemDescription text,\r\n ItemPrice real,\r\n ItemCategory text,\r\n ItemQualityCheck integer,\r\n ItemStatus text,\r\n primary key(ItemCode)\r\n foreign key(ItemCategory) references Category(ItemCategory))\"\"\"\r\n CreateTable(databaseName, \"Item\", sql)\r\n \r\ndef CreateCategoryTable():\r\n sql = \"\"\"create table Category\r\n (ItemCategory text,\r\n ItemCategoryDescription text,\r\n primary key(ItemCategory))\"\"\"\r\n CreateTable(databaseName, \"Category\", sql)\r\n \r\ndef CreateDonationTable():\r\n sql = \"\"\"create table Donation\r\n (DonationCode text,\r\n ItemCode text,\r\n DonatorID text,\r\n StaffID text,\r\n Date text,\r\n primary key(DonationCode)\r\n foreign key(ItemCode) references Item(ItemCode)\r\n foreign key(DonatorID) references Donator(DonatorID)\r\n foreign key (StaffID) references Staff(StaffID))\"\"\"\r\n CreateTable(databaseName, \"Donation\", sql)\r\n\r\ndef CreateDonatorTable():\r\n sql = \"\"\"create table Donator\r\n (DonatorID text,\r\n DonatorFirstName text,\r\n DonatorLastName text,\r\n DonatorAddress1 text,\r\n DonatorAddress2 text,\r\n DonatorCity text,\r\n DonatorCounty text,\r\n DonatorPostCode text,\r\n DonatorContact text,\r\n primary key(DonatorID))\"\"\"\r\n CreateTable(databaseName, \"Donator\", sql)\r\n\r\n\r\n \r\n\r\nif __name__ == \"__main__\":\r\n databaseName = \"charityShop.db\"\r\n CreateStaffTable()\r\n CreateCategoryTable()\r\n CreateItemTable()\r\n CreateDonatorTable()\r\n CreateDonationTable()\r\n","sub_path":"Implementation/FUDGE/Database/Creation (!!!).py","file_name":"Creation (!!!).py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"188577401","text":"import os\nimport pathlib\n\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport dash_table\nimport plotly.graph_objs as go\nimport plotly.offline as pyo\nimport dash_daq as daq\n\nimport pandas as pd\nimport numpy as np\nimport time\nimport datetime\n\nfrom polling_manager import PollingManager\n\nfrom threading import Thread\n\nfrom constants import SOLANA_TOKEN_CODE, ETHERIUM_TOKEN_CODE, UNISWAP_EXCHANGE_CODE, SERUM_EXCHANGE_CODE, \\\n POLLING_DELAY_SECONDS, DASHBOARD_REFRESH_SECONDS\nfrom database import SQLLiteDatabase\nfrom postgres_database import PostgresDatabase\nfrom token_metrics_service import TokenMetricsService\nfrom exchange_metrics_service import ExchangeMetricsService\n\nimport plotly.express as px\nfrom apscheduler.schedulers.blocking import BlockingScheduler\n\napp = dash.Dash(\n __name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\n)\n\nserver = app.server\napp.config[\"suppress_callback_exceptions\"] = True\napp.title = 'DeFi Compare'\n\n## Pull new API call data\ntry:\n use_postgres = os.environ['USE_POSTGRES']\n if bool(use_postgres):\n db = PostgresDatabase() #comment out for local testing\n #db = SQLLiteDatabase() #enable for local testing\n else:\n db = SQLLiteDatabase()\nexcept:\n db = SQLLiteDatabase()\n\ntoken_metrics_service = TokenMetricsService(db=db)\nexchange_metrics_service = ExchangeMetricsService(db=db)\n\n##== PAGE HEADER =========================================================================================================\n\ndef build_banner():\n return html.Div(\n id=\"banner\",\n className=\"banner\",\n children=[\n html.Div(\n id=\"banner-text\",\n children=[\n html.H5(\"DeFi Compare (ALPHA)\"),\n html.H6(\"An open and fair DeFi comparison tool across all blockchains and decentralised finance applications\"),\n ],\n ),\n html.Div(\n id=\"banner-logo\",\n children=[\n html.Label(['DeFi Hackathon Project > ', html.A(' VOTE NOW ', href='https://airtable.com/shrsx1ltpQfTt9wT6')]),\n html.Label([' | ', html.A(' Twitter ', href='https://twitter.com/solanablog/')]),\n html.Label([' | ', html.A(' Discord ', href='https://discord.gg/mrpPmnJJ')]), \n html.Label([' | ', html.A(' Contribute ', href='https://github.com/solanax/deficompare')]), \n html.Img(id=\"logo\", src=app.get_asset_url(\"defi-compare-logo_wide.png\")),\n ],\n ),\n ],\n )\n\ndef generate_section_banner(title):\n return html.Div(className=\"section-banner\", children=title)\n\n##== PAGE HEADER =========================================================================================================\n\ndef build_dapp_table():\n df_symbol_1 = \"SRM\"\n df_symbol_2 = \"UNI\"\n\n df_exch_1_data = exchange_metrics_service.get_df_by_exchange(SERUM_EXCHANGE_CODE)\n df_exch_2_data = exchange_metrics_service.get_df_by_exchange(UNISWAP_EXCHANGE_CODE)\n\n print(df_exch_1_data)\n print(df_exch_2_data)\n\n df_exch_1_data_tailed = df_exch_1_data.tail(1)\n df_exch_2_data_tailed = df_exch_2_data.tail(1)\n\n df_exch_1_data_tailed.loc[:,\"id\"] = \"SRM\"\n df_exch_2_data_tailed.loc[:,\"id\"] = \"UNI\"\n\n df_exch_data_tailed = df_exch_1_data_tailed.append(df_exch_2_data_tailed)\n print(df_exch_data_tailed)\n\n return html.Div([\n dash_table.DataTable(\n id='table',\n columns=[{\"name\": i, \"id\": i} \n for i in df_exch_data_tailed.columns],\n data=df_exch_data_tailed.to_dict('records'),\n style_cell=dict(textAlign='left'),\n style_header=dict(backgroundColor=\"black\"),\n style_data=dict(backgroundColor=\"gray\")\n )\n ])\n\n##== FEE GRAPH =========================================================================================================\n\n@app.callback(Output('fee-graph-live', 'figure'),\n [Input('fee-graph-update', 'n_intervals')])\ndef build_fee_graph(n):\n df_token_1 = \"SOL\"\n df_token_2 = \"ETH\"\n\n df_token_1_data = token_metrics_service.get_df_by_token(SOLANA_TOKEN_CODE)\n df_token_2_data = token_metrics_service.get_df_by_token(ETHERIUM_TOKEN_CODE)\n\n # Create graph plots\n token_1_fee_plot = go.Scatter(\n x=df_token_1_data['datetime'],\n y=df_token_1_data['avg_tx_price'],\n mode='lines+markers',\n name='SOL Fees'\n )\n token_2_fee_plot = go.Scatter(\n x=df_token_2_data['datetime'],\n y=df_token_2_data['avg_tx_price'],\n mode='lines+markers',\n name='ETH Fees'\n )\n fee_graph_df = [token_1_fee_plot, token_2_fee_plot]\n\n fee_graph_title = 'Fee Comparison of ' + df_token_1 + ' and ' + df_token_2 + \" in USD\"\n\n return {\n \"data\": fee_graph_df,\n \"layout\": {\n \"paper_bgcolor\": \"rgba(0,0,0,0)\",\n \"plot_bgcolor\": \"rgba(0,0,0,0)\",\n \"xaxis\": dict(\n showline=False, showgrid=False, zeroline=False\n ),\n \"yaxis\": dict(\n showgrid=False, showline=False, zeroline=False\n ),\n \"legend\": dict(\n orientation=\"h\"\n ),\n \"autosize\": True,\n \"title\": fee_graph_title,\n },\n }\n\n##== LATENCY GRAPH =========================================================================================================\n\n@app.callback(Output('time-graph-live', 'figure'),\n [Input('time-graph-update', 'n_intervals')])\ndef build_time_graph(n):\n df_token_1 = \"SOL\"\n df_token_2 = \"ETH\"\n\n df_token_1_data = token_metrics_service.get_df_by_token(SOLANA_TOKEN_CODE)\n df_token_2_data = token_metrics_service.get_df_by_token(ETHERIUM_TOKEN_CODE)\n\n # Create graph plots\n token_1_time_plot = go.Scatter(\n x=df_token_1_data['datetime'],\n y=df_token_1_data['avg_tx_time'],\n mode='lines+markers',\n name='SOL Latency'\n )\n token_2_time_plot = go.Scatter(\n x=df_token_2_data['datetime'],\n y=df_token_2_data['avg_tx_time'],\n mode='lines+markers',\n name='ETH Latency'\n )\n time_graph_df = [token_1_time_plot, token_2_time_plot]\n\n time_graph_title = 'Latency Comparison of ' + df_token_1 + ' and ' + df_token_2 + \" in Seconds\"\n\n return {\n \"data\": time_graph_df,\n \"layout\": {\n \"paper_bgcolor\": \"rgba(0,0,0,0)\",\n \"plot_bgcolor\": \"rgba(0,0,0,0)\",\n \"xaxis\": dict(\n showline=False, showgrid=False, zeroline=False\n ),\n \"yaxis\": dict(\n showgrid=False, showline=False, zeroline=False\n ),\n \"legend\": dict(\n orientation=\"h\"\n ),\n \"autosize\": True,\n \"title\": time_graph_title,\n },\n }\n\n##== MAIN PAGE =========================================================================================================\n\ndef serve_layout():\n df_token_1 = \"SOL\"\n df_token_2 = \"ETH\"\n\n # Mock-up blockchain selector\n all_tokens = [\"Solana\", \"Ethereum\", \"Cardano\", \"Binance Smart Chain\"]\n\n fee_graph_title = 'Fee Comparison of ' + df_token_1 + ' and ' + df_token_2 + \" in USD\"\n time_graph_title = 'Latency Comparison of ' + df_token_1 + ' and ' + df_token_2 + \" in Seconds\"\n\n return html.Div(\n id=\"big-app-container\",\n children=[\n build_banner(),\n html.Div(\n id=\"app-container\",\n children=[\n # Main app\n html.Div(\n id=\"app-content\",\n children=[\n html.Div(\n id=\"select-blockchain\", #top-section-container\n #className=\"twelve columns\",\n children=[\n generate_section_banner(\"Select Blockchain to Compare\"),\n html.Div([\n dcc.Checklist(\n id=\"blockchain-checklist\",\n options=[\n {\"label\": \"Solana\", \"value\": \"SOL\", \"disabled\": False},\n {\"label\": \"Ethereum\", \"value\": \"ETH\", \"disabled\": False},\n {\"label\": \"Cardano\", \"value\": \"ADA\", \"disabled\": True},\n {\"label\": \"Binance Smart Chain\", \"value\": \"BSC\", \"disabled\": True}\n ],\n value=all_tokens[:2],\n labelStyle={'display': 'inline-block'}\n ),\n ]),\n ]\n ),\n html.Div(\n id=\"tabs\", \n className=\"tabs\",\n children=[\n dcc.Tabs(\n id=\"app-tabs\",\n className=\"custom-tabs\",\n children=[\n dcc.Tab(\n label='Blockchain Fees', \n className=\"custom-tab\", \n selected_className=\"custom-tab--selected\", \n children=[\n html.Div(\n id=\"fee-graph-container\",\n #className=\"twelve columns\",\n children=[\n generate_section_banner(fee_graph_title),\n dcc.Graph(\n id=\"fee-graph-live\",\n animate=True\n ),\n dcc.Interval(\n id='fee-graph-update',\n interval=1000*DASHBOARD_REFRESH_SECONDS,\n n_intervals=0\n ),\n ],\n ),\n ],\n ), \n dcc.Tab(\n label='Blockchain Latency',\n className=\"custom-tab\", \n selected_className=\"custom-tab--selected\", \n children=[\n html.Div(\n id=\"time-graph-container\",\n #className=\"twelve columns\",\n children=[\n generate_section_banner(time_graph_title),\n dcc.Graph(\n id=\"time-graph-live\",\n animate=True\n ),\n dcc.Interval(\n id='time-graph-update',\n interval=1000*DASHBOARD_REFRESH_SECONDS,\n n_intervals=0\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n ],\n ),\n generate_section_banner(\"DeFi Application Comparison Table\"),\n build_dapp_table(),\n ],\n ),\n ],\n )\n \n\n\napp.layout = serve_layout\n\n\n# Running the server\nif __name__ == \"__main__\":\n app.run_server(debug=False, port=8050)\n\n\n\n\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":13382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644203744","text":"from funciones_xml import *\n\nopcion=int(input('''Elige una opción del siguiente menú:\n\n1. Lista de libros\n2. Número de libros por autor\n3. Buscar libros por subcadena del argumento\n4. Buscar libros por género\n5. Comprar libro\n6. Salir\n\nOpción: '''))\nwhile opcion<1 or opcion>6:\n\tprint(\"Error. Introduce el número de la opción correcta.\")\n\topcion=int(input('''Elige una opción del siguiente menú:\n\n1. Lista de libros\n2. Número de libros por autor\n3. Buscar libros por subcadena del argumento\n4. Buscar libros por género\n5. Comprar libro\n6. Salir\n\nOpción: '''))\nwhile opcion!=6:\n\tif opcion<1 or opcion>6:\n\t\tprint(\"Error. Introduce el número de la opción correcta.\")\n\t\tprint()\n\telif opcion==1:\n\t\tfor libro in ListaLibros():\n\t\t\tprint(\"Título: \"+libro.get(\"titulo\"))\n\t\t\tprint(\"Año: \"+libro.get(\"anio\"))\n\t\t\tprint(\"Género(s): \")\n\t\t\tfor gen in libro.get(\"genero\"):\n\t\t\t\tprint(gen)\n\t\t\tprint()\n\telif opcion==2:\n\t\tfor autor in ListaAutores():\n\t\t\tprint(\"Nombre: \"+autor.get(\"nombre\")+\" \"+autor.get(\"apellido\"))\n\t\t\tprint(\"Sexo: \"+autor.get(\"sexo\"))\n\t\t\tprint(\"Número de libros registrados: \"+str(LibrosPorAutor()[ListaAutores().index(autor)]))\n\t\t\tprint()\n\telif opcion==3:\n\t\tsub=input(\"Introduce una subcadena: \")\n\t\tprint()\n\t\tFiltroArgumento(sub)\n\telif opcion==4:\n\t\tprint(\"Lista de géneros registrados:\")\n\t\tprint()\n\t\tfor gen in ListaGeneros():\n\t\t\tprint(gen)\n\t\tprint()\n\t\tgenero=input(\"Introduce el nombre de un género de la lista anterior: \")\n\t\tFiltroGenero(genero)\n\telif opcion==5:\n\t\ttitulo=input(\"Introduce el título de un libro: \")\n\t\tprint()\n\t\tInfoLibro(titulo)\n\t\tprint()\n\t\tcompra=input(\"¿Quieres comprar el libro? (s/n): \")\n\t\tprint()\n\t\tif compra==\"s\":\n\t\t\tEnlaceCompra(titulo)\n\t\tprint()\n\topcion=int(input('''Elige una opción del siguiente menú:\n\n1. Lista de libros\n2. Número de libros por autor\n3. Buscar libros por subcadena del argumento\n4. Buscar libros por género\n5. Comprar libro\n6. Salir\n\nOpción: '''))\n\nprint(\"Fin del programa\")","sub_path":"programa_xml.py","file_name":"programa_xml.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"107636783","text":"from __future__ import print_function, division\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nfrom torch.utils.data import Dataset, DataLoader\nimport numpy as np\nimport logging\nfrom torchvision import transforms, datasets, models\nimport os\nimport time\n# from model.residual_attention_network_pre import ResidualAttentionModel\n# based https://github.com/liudaizong/Residual-Attention-Network\nfrom model.residual_attention_network import ResidualAttentionModel_92_32input_update as ResidualAttentionModel\nfrom art.attacks import DeepFool, BasicIterativeMethod, CarliniL2Method, CarliniLInfMethod\nfrom art.attacks import FastGradientMethod, SaliencyMapMethod, ProjectedGradientDescent\nfrom art.classifiers import PyTorchClassifier\n\nfrom model.AttnVGG import CBAM_VGG16, LTPA_VGG16, RAM_VGG16, RAM_VGG16_v2\nfrom model.AttnVGG import CBAM_VGG19, LTPA_VGG19, RAM_VGG19, RAM_VGG19_v2\nfrom model.AttnResNet_v2 import CBAM_ResidualNet, RAM_ResNet18, LTPA_ResNet18, RAM_ResNet18_v2\n\nimport argparse\nimport math\nimport random\nfrom PIL import Image\n\n# Global Variables Area\n\nmodel_file = 'model_92_sgd.pkl'\n_classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')\n\n# Image Preprocessing\n_transform = transforms.Compose([\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop((32, 32), padding=4),\n transforms.ToTensor()\n])\n\n_test_transform = transforms.Compose([\n transforms.ToTensor()\n])\n\n# Normalization param\nmean = np.array([0.4914, 0.4822, 0.4465]).reshape((3, 1, 1))\nstd = np.array([0.2023, 0.1994, 0.2010]).reshape((3, 1, 1))\n\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n_globe_train= False\n\nclass CIFAR10_Dataset(Dataset):\n def __init__(self, config, train=True, target_transform=None):\n self.target_transform = target_transform\n self.train = train\n\n if self.train:\n self.train_data, self.train_labels = get_data(config, train)\n self.train_data = self.train_data.reshape((self.train_data.shape[0], 3, 32, 32))\n self.train_data = self.train_data.transpose((0, 2, 3, 1))\n else:\n self.test_data, self.test_labels = get_data(config)\n self.test_data = self.test_data.reshape((self.test_data.shape[0], 3, 32, 32))\n self.test_data = self.test_data.transpose((0, 2, 3, 1))\n\n def __getitem__(self, index):\n if self.train:\n img, label = self.train_data[index], self.train_labels[index]\n else:\n img, label = self.test_data[index], self.test_labels[index]\n\n img = Image.fromarray(img)\n\n if self.train:\n img = transform_train(img)\n else:\n img = transform_test(img)\n\n if self.target_transform is not None:\n label = self.target_transform(label)\n\n return img, label\n\n def __len__(self):\n if self.train:\n return len(self.train_data)\n else:\n return len(self.test_data)\n\n\nclass CIFAR10_PH(Dataset):\n def __init__(self, features, labels, feature_tfs, label_tfs):\n self.test_data, self.test_labels = features, labels\n self.ftfs = feature_tfs\n self.ltfs = label_tfs\n\n def __getitem__(self, index):\n img = self.test_data[index]\n label = self.test_labels[index]\n\n img = Image.fromarray(img)\n img = self.ftfs(img)\n\n if self.ltfs is not None:\n label = self.ltfs(label)\n\n return img, label\n\n def __len__(self):\n return len(self.test_data)\n\n\nclass UnNormalize(object):\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor image of size (C, H, W) to be normalized.\n Returns:\n Tensor: Normalized image.\n \"\"\"\n\n tensor[:, 0, :, :] = tensor[:, 0, :, :].mul(self.std[0]).add_(self.mean[0])\n tensor[:, 1, :, :] = tensor[:, 1, :, :].mul(self.std[1]).add_(self.mean[1])\n tensor[:, 2, :, :] = tensor[:, 2, :, :].mul(self.std[2]).add_(self.mean[2])\n\n return tensor\n\n\nclass usvt(torch.autograd.Function):\n \"\"\"ME-Net layer with universal singular value thresholding (USVT) approach.\n \"\"\"\n\n @staticmethod\n def forward(ctx, input, config):\n global _globe_train\n global mean, std, device\n batch_num, c, h, w = input.size()\n output = torch.zeros_like(input).cpu().numpy()\n\n for i in range(batch_num):\n img = (input[i] * 2 - 1).cpu().numpy()\n\n if config.me_channel == 'concat':\n img = np.concatenate((np.concatenate((img[0], img[1]), axis=1), img[2]), axis=1)\n if _globe_train:\n raise NotImplementedError('Training Mode is not supported in this script')\n else:\n mask = np.random.binomial(1, random.uniform(config.startp, config.endp), h * w * c).reshape(h, w * c)\n p_obs = len(mask[mask == 1]) / (h * w * c)\n u, sigma, v = np.linalg.svd(img * mask)\n S = np.zeros((h, w))\n for j in range(int(config.svdprob * h)):\n S[j][j] = sigma[j]\n S = np.concatenate((S, np.zeros((h, w * 2))), axis=1)\n W = np.dot(np.dot(u, S), v) / p_obs\n W[W < -1] = -1\n W[W > 1] = 1\n est_matrix = (W + 1) / 2\n for channel in range(c):\n output[i, channel] = est_matrix[:, channel * h:(channel + 1) * h]\n else:\n if _globe_train:\n raise NotImplementedError('Training Mode is not supported in this script')\n else:\n mask = np.random.binomial(1, random.uniform(config.startp, config.endp), h * w).reshape(h, w)\n p_obs = len(mask[mask == 1]) / (h * w)\n for channel in range(c):\n u, sigma, v = np.linalg.svd(img[channel] * mask)\n S = np.zeros((h, w))\n for j in range(int(config.svdprob * h)):\n S[j][j] = sigma[j]\n W = np.dot(np.dot(u, S), v) / p_obs\n W[W < -1] = -1\n W[W > 1] = 1\n output[i, channel] = (W + 1) / 2\n\n output = output - mean\n output /= std\n output = torch.from_numpy(output).float().to(device)\n return output\n\n @staticmethod\n def backward(ctx, grad_output):\n # BPDA, approximate gradients\n return grad_output\n\n\nclass RecosNet(nn.Module):\n \"\"\"Reconstruction layer.\n It is called by using the 'apply' method of different functions.\n \"\"\"\n def __init__(self, model):\n super(RecosNet, self).__init__()\n self.model = model\n\n def forward(self, input):\n global c_opt\n x = globals()['usvt'].apply(input, c_opt)\n return self.model(x)\n\n\nclass AttackPGD(nn.Module):\n def __init__(self, model, config):\n super(AttackPGD, self).__init__()\n self.model = model\n self.rand = config['random_start']\n self.step_size = config['step_size']\n self.epsilon = config['epsilon']\n self.num_steps = config['num_steps']\n assert config['loss_func'] == 'xent', 'Use cross-entropy as loss function.'\n\n def forward(self, inputs, targets, config):\n if not config.attack:\n return self.model(inputs), inputs\n\n x = inputs.detach()\n if self.rand:\n x = x + torch.zeros_like(x).uniform_(-self.epsilon, self.epsilon)\n for i in range(self.num_steps):\n x.requires_grad_()\n with torch.enable_grad():\n logits = self.model(x)\n loss = F.cross_entropy(logits, targets, reduction='sum')\n grad = torch.autograd.grad(loss, [x])[0]\n # print(grad)\n x = x.detach() + self.step_size * torch.sign(grad.detach())\n x = torch.min(torch.max(x, inputs - self.epsilon), inputs + self.epsilon)\n x = torch.clamp(x, 0, 1)\n\n return self.model(x), x\n\n# ------------------------------------------------------------------------------------------------\n\n\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\n\ndef get_data(config, train=False):\n data = None\n labels = None\n if train:\n for i in range(1, 6):\n batch = unpickle(config.data_dir + 'cifar-10-batches-py/data_batch_' + str(i))\n if i == 1:\n data = batch[b'data']\n else:\n data = np.concatenate([data, batch[b'data']])\n if i == 1:\n labels = batch[b'labels']\n else:\n labels = np.concatenate([labels, batch[b'labels']])\n\n data_tmp = data\n labels_tmp = labels\n # repeat n times for different masks\n for i in range(config.mask_num - 1):\n data = np.concatenate([data, data_tmp])\n labels = np.concatenate([labels, labels_tmp])\n else:\n batch = unpickle(config.data_dir + 'cifar-10-batches-py/test_batch')\n data = batch[b'data']\n labels = batch[b'labels']\n return data, labels\n\n\ndef target_transform(label):\n label = np.array(label)\n target = torch.from_numpy(label).long()\n return target\n\n\ndef general_test(model, optimizer, input_shape, nb_classes, test_loader, method, btrain=False,\n model_file='last_model_92_sgd.pkl'):\n global _classes\n if not btrain:\n model.load_state_dict(torch.load(model_file))\n model.eval()\n\n loss = nn.CrossEntropyLoss()\n warped_model = PyTorchClassifier(model, loss, optimizer, input_shape, nb_classes, clip_values=(.0, 1.))\n if method == 'Deepfool':\n adv_crafter = DeepFool(warped_model)\n elif method == 'BIM':\n adv_crafter = BasicIterativeMethod(warped_model, batch_size=20)\n elif method == 'JSMA':\n adv_crafter = SaliencyMapMethod(warped_model, batch_size=20)\n elif method == 'CW2':\n adv_crafter = CarliniL2Method(warped_model, batch_size=20)\n elif method == 'CWI':\n adv_crafter = CarliniLInfMethod(warped_model, batch_size=20)\n\n correct, total = 0, 0\n class_correct = list(0. for _ in range(10))\n class_total = list(0. for _ in range(10))\n\n for images, labels in test_loader:\n images = adv_crafter.generate(images.numpy())\n\n images = Variable(torch.from_numpy(images).cuda())\n labels = Variable(labels.cuda())\n\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.data).sum()\n c = (predicted == labels.data).squeeze()\n for i in range(20):\n label = labels.data[i]\n class_correct[label] += c[i]\n class_total[label] += 1\n\n print('Accuracy of the model on the test images: %d %%' % (100 * float(correct) / total))\n print('Accuracy of the model on the test images:', float(correct) / total)\n for i in range(10):\n print('Accuracy of %5s : %2d %%' % (\n _classes[i], 100 * class_correct[i] / class_total[i]))\n return correct / total\n\n\ndef general_test_v2(model, optimizer, input_shape, nb_classes, test_loader, method, conf, btrain=False,\n model_file='last_model_92_sgd.pkl'):\n global _classes\n if not btrain:\n checked_state = torch.load(model_file)['state_dict']\n model.load_state_dict(checked_state)\n assert isinstance(model, AttackPGD), 'Incorrect Model Configuration'\n model = model.model.eval()\n # model.eval()\n\n loss = nn.CrossEntropyLoss()\n warped_model = PyTorchClassifier(model, loss, optimizer, input_shape, nb_classes, clip_values=(.0, 1.))\n if method == 'Deepfool':\n adv_crafter = DeepFool(warped_model)\n elif method == 'BIM':\n adv_crafter = BasicIterativeMethod(warped_model, batch_size=32)\n elif method == 'JSMA':\n adv_crafter = SaliencyMapMethod(warped_model, batch_size=32)\n elif method == 'CW2':\n adv_crafter = CarliniL2Method(warped_model, batch_size=32)\n elif method == 'CWI':\n adv_crafter = CarliniLInfMethod(warped_model, batch_size=32)\n elif method == 'FGSM':\n adv_crafter = FastGradientMethod(warped_model, batch_size=32)\n elif method == 'PGD':\n adv_crafter = ProjectedGradientDescent(warped_model, batch_size=32)\n\n correct, total = 0, 0\n\n adv_dataset = adv_generalization(test_loader, adv_crafter, conf)\n temp_loader = DataLoader(dataset=adv_dataset, batch_size=32, shuffle=False, drop_last=True)\n # temp_loader = test_loader\n\n for images, labels in temp_loader:\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n\n outputs = model(images, conf)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.data).sum()\n\n print('Accuracy of the model on the test images: %d %%' % (100 * float(correct) / total))\n print('Accuracy of the model on the test images:', float(correct) / total)\n return correct / total\n\n\n# for test\ndef test(model, test_loader, btrain=False, model_file='last_model_92_sgd.pkl'):\n global _classes\n if not btrain:\n model.load_state_dict(torch.load(model_file)['state_dict'])\n model.eval()\n\n correct = 0\n total = 0\n #\n class_correct = list(0. for _ in range(10))\n class_total = list(0. for _ in range(10))\n\n for images, labels in test_loader:\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n outputs = model(images)\n _, predicted = torch.max(outputs.data, 1)\n total += labels.size(0)\n correct += (predicted == labels.data).sum()\n #\n c = (predicted == labels.data).squeeze()\n for i in range(20):\n label = labels.data[i]\n class_correct[label] += c[i]\n class_total[label] += 1\n\n print('Accuracy of the model on the test images: %d %%' % (100 * float(correct) / total))\n print('Accuracy of the model on the test images:', float(correct)/total)\n for i in range(10):\n print('Accuracy of %5s : %2d %%' % (\n _classes[i], 100 * class_correct[i] / class_total[i]))\n\n model.train()\n return correct / total\n\n\ndef adv_generalization(tgt_dl, adv, conf):\n fout, lout = None, None\n unnorm = UnNormalize(mean=(0.4914, 0.4822, 0.4465), std=(0.2023, 0.1994, 0.2010))\n # c_mean = torch.tensor([0.4914, 0.4822, 0.4465], dtype=torch.float32)\n # c_std = torch.tensor([0.2023, 0.1994, 0.2010], dtype=torch.float32)\n # unnorm = transforms.Normalize((-c_mean / c_std).tolist(), (1.0 / c_std).tolist())\n\n for batch_idx, (images, labels) in enumerate(tgt_dl):\n images = adv.generate(images.numpy())\n images = torch.from_numpy(images) * 255.\n images = images.clamp(0, 255)\n # images = unnorm(images) * 255.0\n\n if fout is None:\n fout = images\n else:\n fout = torch.cat((fout, images), dim=0)\n\n if lout is None:\n lout = labels\n else:\n lout = torch.cat((lout, labels), dim=0)\n\n global _test_transform\n res_data = CIFAR10_PH(fout.numpy().astype('uint8').transpose(0, 2, 3, 1), lout.numpy(),\n _test_transform, target_transform)\n\n return res_data\n\n\ndef main(opt):\n global _transform, _test_transform\n global device\n # Load dataset\n train_dataset = datasets.CIFAR10(root='./data/', train=True, transform=_transform, download=True)\n test_dataset = datasets.CIFAR10(root='./data/', train=False, transform=_test_transform)\n # test_dataset = CIFAR10_Dataset(opt, False, target_transform)\n\n # Get Data Loader\n train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=64, shuffle=True, num_workers=8)\n test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=32, shuffle=False, drop_last=True)\n\n if opt.model == 'cbam-vgg16':\n model = CBAM_VGG16()\n elif opt.model == 'cbam-vgg19':\n model = CBAM_VGG19()\n elif opt.model == 'ram-vgg16':\n model = RAM_VGG16()\n elif opt.model == 'ram-vgg19':\n model = RAM_VGG19()\n elif opt.model == 'ram-vgg16_v2':\n model = RAM_VGG16_v2()\n elif opt.model == 'ram_vgg19_v2':\n model = RAM_VGG19_v2()\n elif opt.model == 'ltpa-vgg16':\n model = LTPA_VGG16()\n elif opt.model == 'ltpa-vgg19':\n model = LTPA_VGG19()\n elif opt.model == 'cbam-resnet':\n model = CBAM_ResidualNet(\"CIFAR10\", 18, 10)\n elif opt.model == 'ram-resnet':\n model = RAM_ResNet18()\n elif opt.model == 'ram-resnet-v2':\n model = RAM_ResNet18_v2()\n elif opt.model == 'ltpa-resnet':\n model = LTPA_ResNet18()\n elif opt.model == 'vgg16':\n model = models.vgg16(pretrained=False)\n elif opt.model == 'vgg19':\n model = models.vgg19(pretrained=False)\n elif opt.model == 'resnet':\n model = models.resnet18(pretrained=False)\n else:\n raise Exception('Unsupported Model')\n\n # print(model)\n temp_config = {\n 'epsilon': opt.epsilon / 255.,\n 'num_steps': opt.iter,\n 'step_size': 2. / 255,\n 'random_start': True,\n 'loss_func': 'xent',\n }\n model = model.to(device)\n RecosNet_model = RecosNet(model)\n net = AttackPGD(RecosNet_model, temp_config)\n\n lr = opt.lr\n criterion = nn.CrossEntropyLoss()\n optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=0.0001)\n\n is_train, is_pretrain = opt.train, opt.pretrain\n acc_best = 0\n total_epoch = 300\n\n if is_train is True:\n if is_pretrain:\n model.load_state_dict((torch.load(model_file)))\n # Training\n for epoch in range(total_epoch):\n model.train()\n tims = time.time()\n for i, (images, labels) in enumerate(train_loader):\n images = Variable(images.cuda())\n labels = Variable(labels.cuda())\n\n # Forward + Backward + Optimize\n optimizer.zero_grad()\n outputs = model(images)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n if (i + 1) % 100 == 0:\n print(\"Epoch [%d/%d], Iter [%d/%d] Loss: %.4f\" % (\n epoch + 1, total_epoch, i + 1, len(train_loader), loss.item()))\n\n print('the epoch takes time:', time.time() - tims)\n print('evaluate test set:')\n acc = test(model, test_loader, btrain=True)\n if acc > acc_best:\n acc_best = acc\n print('current best acc,', acc_best)\n torch.save(model.state_dict(), model_file)\n # Decaying Learning Rate\n if (epoch + 1) / float(total_epoch) == 0.3 or (epoch + 1) / float(total_epoch) == 0.6 or (\n epoch + 1) / float(total_epoch) == 0.9:\n lr /= 10\n print('reset learning rate to:', lr)\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n print(param_group['lr'])\n # optimizer = torch.optim.Adam(model.parameters(), lr=lr)\n # optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=0.0001)\n # Save the Model\n torch.save(model.state_dict(), 'last_model_92_sgd.pkl')\n\n else:\n if opt.method == 'None':\n test(model, test_loader, btrain=False, model_file=opt.model_path)\n elif opt.method == 'fgsm':\n general_test_v2(net, optimizer, (32, 3, 32, 32), 10, test_loader, 'FGSM',\n conf=opt, btrain=False, model_file=opt.model_path)\n elif opt.method == 'Deepfool':\n general_test_v2(model, optimizer, (32, 3, 32, 32), 10, test_loader, 'Deepfool',\n conf=opt, btrain=False, model_file=opt.model_path)\n elif opt.method == 'BIM':\n general_test_v2(model, optimizer, (32, 3, 32, 32), 10, test_loader, 'BIM',\n conf=opt, btrain=False, model_file=opt.model_path)\n elif opt.method == 'JSMA':\n general_test_v2(model, optimizer, (32, 3, 32, 32), 10, test_loader, 'JSMA',\n conf=opt, btrain=False, model_file=opt.model_path)\n elif opt.method == 'CW2':\n general_test_v2(model, optimizer, (32, 3, 32, 32), 10, test_loader, 'CW2',\n conf=opt, btrain=False, model_file=opt.model_path)\n elif opt.method == 'CWI':\n general_test_v2(model, optimizer, (32, 3, 32, 32), 10, test_loader, 'CWI',\n conf=opt, btrain=False, model_file=opt.model_path)\n elif opt.method == 'PGD':\n general_test_v2(net, optimizer, (32, 3, 32, 32), 10, test_loader, 'PGD',\n conf=opt, btrain=False, model_file=opt.model_path)\n else:\n raise Exception(\"Unsupported Attack Method\")\n pass\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--method', type=str, default='None', help='None|fgsm|BIM|Deepfool|JSMA|CW2|CWI')\n parser.add_argument('--model', type=str, required=True)\n parser.add_argument('--model_path', type=str, required=True)\n parser.add_argument('--data_dir', default='data/', help='data path')\n\n # Default Training Setting, placeholder for some parameters only\n parser.add_argument('--lr', type=float, default=.1)\n parser.add_argument('--train', type=bool, default=False)\n parser.add_argument('--pretrain', type=bool, default=False)\n\n # Default ME setting, borrow from train_ori\n parser.add_argument('--startp', type=float, default=0.8)\n parser.add_argument('--endp', type=float, default=1)\n parser.add_argument('--me-type', type=str, default='usvt')\n parser.add_argument('--me-channel', type=str, default='concat')\n parser.add_argument('--svdprob', type=float, default=0.8, help='USVT hyper-param (default: 0.8)')\n parser.add_argument('--epsilon', type=float, default=8.)\n parser.add_argument('--iter', type=int, default=7)\n parser.add_argument('--no-augment', dest='augment', action='store_false')\n\n c_opt = parser.parse_args()\n\n if c_opt.augment:\n transform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)),\n ])\n else:\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n # transforms.Normalize((0.4914, 0.4822, 0.4465),\n # (0.2023, 0.1994, 0.2010)),\n ])\n\n main(c_opt)\n","sub_path":"Residual-Attention-Network/New_trainer_adv.py","file_name":"New_trainer_adv.py","file_ext":"py","file_size_in_byte":23356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"100680184","text":"from colorama import Back, init\r\n\r\n\r\n\r\ninit(autoreset = True)\r\n\r\n\r\n\r\nclass Alien:\r\n\tdef __init__(self, x, y):\r\n\t\tself.x = x\r\n\t\tself.y = y\r\n\t\tself.got_shot = False\r\n\t\tself.got_shot_counter = 0\r\n\r\n\t\t#Animations. A zero represents a black character, and a one represents a white character.\r\n\t\tself.normal_sprite = [\r\n\t\t[0, 0, 1, 1, 1, 0, 0],\r\n\t\t[0, 1, 0, 1, 0, 1, 0],\r\n\t\t[1, 1, 1, 1, 1, 1, 1]]\r\n\t\tself.explosion_sprite = [\r\n\t\t[1, 1, 0, 0, 0, 1, 1],\r\n\t\t[0, 0, 1, 1, 1, 0, 0],\r\n\t\t[1, 1, 0, 0, 0, 1, 1]]\r\n\r\n\r\n\tdef pos(self, x, y):\r\n\t\treturn \"\\x1b[\" + str(y) + \";\" + str(x) + \"H\"\r\n\r\n\r\n\tdef print_alien(self):\r\n\t\tif self.got_shot_counter <= 10:\r\n\t\t\tif self.got_shot:\r\n\t\t\t\tsprite = self.explosion_sprite\r\n\t\t\t\tself.got_shot_counter += 1\r\n\t\t\telse:\r\n\t\t\t\tsprite = self.normal_sprite\r\n\r\n\t\t\ty_counter = self.y\r\n\t\t\tfor row in sprite:\r\n\t\t\t\tx_counter = self.x\r\n\t\t\t\tfor char in row:\r\n\t\t\t\t\tif char == 1:\r\n\t\t\t\t\t\tprint(self.pos(x_counter, y_counter) + Back.WHITE + \" \")\r\n\t\t\t\t\tx_counter += 1\r\n\t\t\t\ty_counter += 1\r\n\r\n\r\n\tdef ret_shot_pos(self):\r\n\t\treturn self.x + 4\r\n\r\n\r\n\tdef hit_detector(self, player_shot_x, player_shot_y):\r\n\t\tfor a in range(7):\r\n\t\t\tif player_shot_x == self.x + a:\r\n\t\t\t\tfor b in range(3):\r\n\t\t\t\t\tif player_shot_y == self.y + b:\r\n\t\t\t\t\t\tself.got_shot = True\r\n\t\t\t\t\t\treturn True","sub_path":"Alien.py","file_name":"Alien.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"302661775","text":"from datetime import datetime\nimport logging\nimport time\nimport urllib2\nfrom django.conf import settings\nfrom django.db import IntegrityError\nfrom .parser import NotValidReviewException\nfrom .parser.factoryparser import FactoryParser\nfrom .models import Webpage, Artist, Label, Album, Review\nfrom .utils.rating import RatingNormalizer\n\nlogger = logging.getLogger(__name__)\n\n\nclass Crawler(object):\n @staticmethod\n def crawl_websites():\n logger.info(\"Start crawling webpages\")\n webpages = Webpage.objects.filter(enabled=True)\n for webpage in webpages:\n parser = FactoryParser.create_parser(webpage.url)\n reviews_urls = Crawler._get_reviews_urls(parser,\n webpage.url_reviews)\n\n for review_url in reviews_urls:\n if review_url == webpage.last_url_review:\n logger.info(\"No more new reviews found\")\n break\n try:\n Review.objects.get(url=review_url)\n except Review.DoesNotExist:\n pass\n else:\n logger.debug(\"Review already stored\")\n continue\n\n time.sleep(settings.THROTTLE_TIME)\n try:\n review_info = Crawler._get_review_info(parser, review_url)\n except NotValidReviewException as error:\n logger.info(error)\n continue\n try:\n Crawler._store_review_info(webpage.url, review_info)\n except IntegrityError as error:\n logger.debug(error)\n\n if reviews_urls:\n webpage.last_url_review = reviews_urls[0]\n\n webpage.last_check = datetime.now()\n webpage.save()\n\n @staticmethod\n def _get_reviews_urls(parser, url_reviews):\n logger.debug(\"Getting all urls from review page: %s\", url_reviews)\n reviews_html = Crawler._get_source(url_reviews)\n if reviews_html:\n reviews_urls = parser.fetch_url_reviews(reviews_html)\n else:\n reviews_urls = []\n return reviews_urls\n\n @staticmethod\n def _get_review_info(parser, url_review):\n logger.debug(\"Getting review info: %s\", url_review)\n review_html = Crawler._get_source(url_review)\n review_info = parser.get_review_info(review_html)\n review_info[\"url\"] = url_review\n return review_info\n\n @staticmethod\n def _get_source(url):\n try:\n request = urllib2.Request(\n url, headers={\"User-Agent\": settings.USER_AGENT})\n webpage = urllib2.urlopen(request)\n return webpage.read()\n except urllib2.HTTPError as error:\n logger.error(\"Could not get '%s'. Reason: %s\", url, error)\n return None\n\n @staticmethod\n def _store_review_info(webpage_url, review_info):\n logger.info(\"Storing review info: %s\", review_info[\"url\"])\n webpage = Webpage.objects.get(url=webpage_url)\n artist, created = Artist.objects.get_or_create(\n name=review_info[\"artist\"])\n if \"label\" in review_info:\n label, created = Label.objects.get_or_create(\n name=review_info[\"label\"])\n else:\n label = None\n album = Crawler._get_or_create_album(review_info[\"album\"],\n review_info[\"artist\"], label)\n if \"rating\" in review_info:\n rating = RatingNormalizer.normalize_rating(\n review_info[\"rating\"], review_info[\"rating_max\"])\n else:\n rating = None\n if \"body_review\" in review_info:\n body_review = review_info[\"body_review\"]\n else:\n body_review = None\n review = Review(rating=rating, url=review_info[\"url\"],\n body_review=body_review, album=album,\n webpage=webpage)\n review.save()\n\n @staticmethod\n def _get_or_create_album(album_name, artist_name, label):\n \"\"\"\n Gets album or creates a new one.\n\n This method should create an album only if the artist and album name\n given don't correspond to another album already stored.\n It must take into consideration things like little differences on the\n album or the artist.\n \"\"\"\n albums = Album.objects.filter(name=album_name)\n for album in albums:\n if album.artists.all()[0].name == artist_name:\n if label and not album.label:\n album.label = label\n album.save()\n return album\n album = Album(name=album_name, label=label)\n album.save()\n artist, created = Artist.objects.get_or_create(name=artist_name)\n album.artists.add(artist)\n return album\n","sub_path":"website/app/crawler.py","file_name":"crawler.py","file_ext":"py","file_size_in_byte":4909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"472830741","text":"import time\n\nimport torch, torch.nn as nn, torch.nn.functional as F\n\nfrom lib.roi_align.roi_align import CropAndResize, RoIAlign\n\nfrom . import torch_rcnn_utils\n\n\n\ndef ROIAlign(feature_maps, rois, config, pool_size, mode='bilinear'):\n \"\"\"Implements ROI Align on the features.\n\n Params:\n - pool_shape: [height, width] of the output pooled regions. Usually [7, 7]\n - image_shape: [height, width, chanells]. Shape of input image in pixels\n\n Inputs:\n - boxes: [batch, num_boxes, (x1, y1, x2, y2)] in normalized\n coordinates. Possibly padded with zeros if not enough\n boxes to fill the array.\n - Feature maps: List of feature maps from different levels of the pyramid.\n Each is [batch, channels, height, width]\n\n Output:\n Pooled regions in the shape: [batch, num_boxes, height, width, channels].\n The width and height are those specific in the pool_shape in the layer\n constructor.\n \"\"\"\n \"\"\"\n [ x2-x1 x1 + x2 - W + 1 ]\n [ ----- 0 --------------- ]\n [ W - 1 W - 1 ]\n [ ]\n [ y2-y1 y1 + y2 - H + 1 ]\n [ 0 ----- --------------- ]\n [ H - 1 H - 1 ]\n \"\"\"\n # feature_maps= [P2, P3, P4, P5]\n rois = rois.detach()\n crop_resize = CropAndResize(pool_size, pool_size, 0)\n\n roi_number = rois.size()[1]\n\n pooled = rois.data.new(\n config.IMAGES_PER_GPU * rois.size(\n 1), 256, pool_size, pool_size).zero_()\n\n rois = rois.view(\n config.IMAGES_PER_GPU * rois.size(1),\n 4)\n\n # Loop through levels and apply ROI pooling to each. P2 to P5.\n x_1 = rois[:, 0]\n y_1 = rois[:, 1]\n x_2 = rois[:, 2]\n y_2 = rois[:, 3]\n\n roi_level = torch_rcnn_utils.log2_graph(\n torch.div(torch.sqrt((y_2 - y_1) * (x_2 - x_1)), 224.0))\n\n roi_level = torch.clamp(torch.clamp(\n torch.add(torch.round(roi_level), 4), min=2), max=5)\n\n # P2 is 256x256, P3 is 128x128, P4 is 64x64, P5 is 32x32\n # P2 is 4, P3 is 8, P4 is 16, P5 is 32\n for i, level in enumerate(range(2, 6)):\n\n scaling_ratio = 2 ** level\n\n height = float(config.IMAGE_MAX_DIM) / scaling_ratio\n width = float(config.IMAGE_MAX_DIM) / scaling_ratio\n\n ixx = torch.eq(roi_level, level)\n\n box_indices = ixx.view(-1).int() * 0\n ix = torch.unsqueeze(ixx, 1)\n level_boxes = torch.masked_select(rois, ix)\n if len(level_boxes) == 0 or level_boxes.size()[0] == 0:\n continue\n level_boxes = level_boxes.view(-1, 4)\n\n crops = crop_resize(feature_maps[i], torch.div(\n level_boxes, float(config.IMAGE_MAX_DIM)\n )[:, [1, 0, 3, 2]], box_indices)\n\n indices_pooled = ixx.nonzero()[:, 0]\n pooled[indices_pooled.data, :, :, :] = crops.data\n\n pooled = pooled.view(config.IMAGES_PER_GPU, roi_number,\n 256, pool_size, pool_size)\n pooled = torch.autograd.Variable(pooled).cuda()\n return pooled\n\n\n############################################################\n# Bbox Layer\n############################################################\nclass RCNNHead(nn.Module):\n def __init__(self, num_classes, config):\n\n super(RCNNHead, self).__init__()\n self.num_classes = num_classes\n self.config = config\n # Setup layers\n self.mrcnn_class_conv1 = nn.Conv2d(\n 256, 1024, kernel_size=self.config.POOL_SIZE, stride=1, padding=0)\n self.mrcnn_class_bn1 = nn.BatchNorm2d(1024, eps=0.001)\n\n# self.dropout = nn.Dropout(p=0.5, inplace=True)\n\n self.mrcnn_class_conv2 = nn.Conv2d(\n 1024, 1024, kernel_size=1, stride=1, padding=0)\n self.mrcnn_class_bn2 = nn.BatchNorm2d(1024, eps=0.001)\n\n # Classifier head\n self.mrcnn_class_logits = nn.Linear(1024, self.num_classes)\n self.mrcnn_bbox_fc = nn.Linear(1024, self.num_classes * 4)\n\n def forward(self, x, rpn_rois):\n start = time.time()\n x = ROIAlign(x, rpn_rois, self.config, self.config.POOL_SIZE)\n\n spend = time.time()-start\n print('first roalign', spend)\n roi_number = x.size()[1]\n\n x = x.view(self.config.IMAGES_PER_GPU * roi_number,\n 256, self.config.POOL_SIZE,\n self.config.POOL_SIZE)\n\n x = self.mrcnn_class_conv1(x)\n x = self.mrcnn_class_bn1(x)\n x = F.relu(x, inplace=True)\n# x = self.dropout(x)\n x = self.mrcnn_class_conv2(x)\n x = self.mrcnn_class_bn2(x)\n x = F.relu(x, inplace=True)\n\n shared = x.permute(0, 2, 3, 1).contiguous().view(x.size(0), -1)\n # Classifier head\n mrcnn_class_logits = self.mrcnn_class_logits(shared)\n mrcnn_probs = F.softmax(mrcnn_class_logits, dim=-1)\n\n x = self.mrcnn_bbox_fc(shared)\n mrcnn_bbox = x.view(x.size()[0], self.num_classes, 4)\n\n mrcnn_class_logits = mrcnn_class_logits.view(self.config.IMAGES_PER_GPU,\n roi_number,\n mrcnn_class_logits.size()[-1])\n mrcnn_probs = mrcnn_probs.view(self.config.IMAGES_PER_GPU,\n roi_number,\n mrcnn_probs.size()[-1])\n # BBox head\n # [batch, boxes, num_classes , (dy, dx, log(dh), log(dw))]\n mrcnn_bbox = mrcnn_bbox.view(self.config.IMAGES_PER_GPU,\n roi_number,\n self.config.NUM_CLASSES,\n 4)\n\n return mrcnn_class_logits, mrcnn_probs, mrcnn_bbox\n\n\n# rcnn head confidence loss\ndef rcnn_class_loss(target_class_ids, pred_class_logits, active_class_ids, config):\n \"\"\"Loss for the classifier head of Mask RCNN.\n target_class_ids: [batch, num_rois]. Integer class IDs. Uses zero\n padding to fill in the array.\n pred_class_logits: [batch, num_rois, num_classes]\n active_class_ids: [batch, num_classes]. Has a value of 1 for\n classes that are in the dataset of the image, and 0\n for classes that are not in the dataset.\n \"\"\"\n\n # Find predictions of classes that are not in the dataset.\n pred_class_logits = pred_class_logits.contiguous().view(-1, config.NUM_CLASSES)\n\n target_class_ids = target_class_ids.contiguous().view(-1).type(torch.cuda.LongTensor)\n # Loss\n loss = F.cross_entropy(\n pred_class_logits, target_class_ids, weight=None, size_average=True)\n\n # Erase losses of predictions of classes that are not in the active\n # classes of the image.\n # loss = loss * pred_active\n\n # Computer loss mean. Use only predictions that contribute\n # to the loss to get a correct mean.\n # loss = tf.reduce_sum(loss) / tf.reduce_sum(pred_active)\n return loss\n\n\n# rcnn head bbox loss\ndef rcnn_bbox_loss(target_bbox, target_class_ids, pred_bbox):\n \"\"\"Loss for Mask R-CNN bounding box refinement.\n\n target_bbox: [batch, num_rois, (dy, dx, log(dh), log(dw))]\n target_class_ids: [batch, num_rois]. Integer class IDs.\n pred_bbox: [batch, num_rois, num_classes, (dy, dx, log(dh), log(dw))]\n \"\"\"\n # Reshape to merge batch and roi dimensions for simplicity.\n target_class_ids = target_class_ids.contiguous().view(-1)\n target_bbox = target_bbox.contiguous().view(-1, 4)\n pred_bbox = pred_bbox.contiguous().view(-1, pred_bbox.size()[2], 4)\n # print(target_class_ids)\n\n # Only positive ROIs contribute to the loss. And only\n # the right class_id of each ROI. Get their indicies.\n positive_roi_ix = torch.gt(target_class_ids, 0)\n # print(positive_roi_ix)\n positive_roi_class_ids = torch.masked_select(target_class_ids, positive_roi_ix)\n\n indices = target_class_ids\n # indices = torch.stack([positive_roi_ix, positive_roi_class_ids], dim=1)\n # print(indices)\n # Gather the deltas (predicted and true) that contribute to loss\n # target_bbox = torch.gather(target_bbox, positive_roi_ix)\n # pred_bbox = torch.gather(pred_bbox, indices)\n\n loss = F.smooth_l1_loss(pred_bbox, target_bbox, size_average=True)\n return loss\n\n","sub_path":"network/rcnn_head.py","file_name":"rcnn_head.py","file_ext":"py","file_size_in_byte":8227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"318326445","text":"from sqlalchemy import create_engine, orm\nfrom sqlalchemy.ext.declarative import declarative_base\n\n__all__ = ('Base', 'engine', 'session', 'DB_FILENAME')\n\nBase = declarative_base()\nDB_FILENAME = 'database.sqlite'\nengine = create_engine(f'sqlite:///{DB_FILENAME}')\nSession = orm.sessionmaker(bind=engine)\nsession: orm.Session = Session()\n\n\ndef database_init():\n Base.metadata.create_all(engine)\n","sub_path":"twitchbot/database/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"171183967","text":"\"\"\"Publish message to slack\n\nShould be run as daemon\n\"\"\"\nimport json\nimport os\nimport time\n\nimport slack\nfrom dotenv import load_dotenv\nfrom redis import Redis\n\n\nload_dotenv()\n\nredis_client = Redis(host='redis_db', port=6379, db=0)\nps = redis_client.pubsub(ignore_subscribe_messages=True)\nps.subscribe('web-to-slack')\n\nslack_client = slack.WebClient(token=os.environ['SLACK_API_TOKEN'])\n\nwhile True:\n message = ps.get_message()\n if message:\n json_message = json.loads(message['data'].decode())\n slack_client.chat_postMessage(\n channel='#client-notifications',\n text=json_message['message']\n )\n\n time.sleep(0.01)\n","sub_path":"slack-publisher.py","file_name":"slack-publisher.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"203357482","text":"import time\r\n\r\nfrom selenium.common.exceptions import (\r\n TimeoutException as Timeout,\r\n StaleElementReferenceException as StaleReference,\r\n ElementClickInterceptedException as ClickIntercepted, TimeoutException,\r\n JavascriptException, NoSuchElementException, WebDriverException)\r\nfrom selenium.webdriver import ActionChains\r\nfrom selenium.webdriver.support.select import Select\r\n\r\nimport admin\r\nimport gen\r\nimport nav\r\nimport splash\r\nimport stats\r\n\r\nlast_boss = \"\"\r\n\r\n\r\n# TODO:\r\n# Print boss hp after initial encounter\r\n\r\ndef checks(raid=False, forced=False):\r\n global last_boss\r\n hp_bar_css = (\".hp_frame_top > div:nth-child(1) > div:nth-child(2) > \"\r\n \"div:nth-child(2) > div:nth-child(1)\")\r\n gen.print_temp(\"boss routine\")\r\n\r\n try:\r\n bn = admin.wd.find(\"css\", \".quest_boss_status_1\").text\r\n\r\n try:\r\n hp_bar = admin.wd.find(\"css\", hp_bar_css).get_attribute(\"style\")\r\n hp = int((hp_bar.split()[-1])[:-2])\r\n except TimeoutException:\r\n hp = 1\r\n\r\n if bn != last_boss:\r\n print(f\"{time.strftime('%H:%M:%S')} {bn}\")\r\n last_boss = bn\r\n time.sleep(1)\r\n # checks: boss, bp/cool-down, decides: fight, get assist, wait to fight\r\n if stats.bp() == 6:\r\n fight(bn)\r\n elif forced:\r\n fight(bn, True)\r\n elif hp < 30:\r\n req_assist()\r\n elif ('Red Oni' in bn) or ('Speed Demon' in bn):\r\n engage(hp, bn)\r\n else:\r\n req_assist()\r\n\r\n except (Timeout, ClickIntercepted, StaleReference, AttributeError):\r\n if nav.at_card_limit():\r\n gen.sell_cards()\r\n\r\n gen.print_temp(\"boss routine: Timeout\")\r\n pass\r\n\r\n\r\n# Boss is on my hit-list (speed demon or red oni).\r\n# Checks BP/CD, boss HP decide: ask assist, wait to fight, or fight immediately\r\ndef engage(hp, bn):\r\n from _main import raid as event\r\n # print(f\"{time.strftime('%H:%M:%S')} engaging\")\r\n if hp < 35 & event is False:\r\n req_assist()\r\n elif stats.bp() == 0:\r\n # print(f\"{time.strftime('%H:%M:%S')} bp == 0\")\r\n if 'Red Oni' in bn and hp == 100 and stats.bp_cd() > 5:\r\n fight(bn)\r\n splash.stall_for_bp()\r\n fight(bn)\r\n elif stats.bp_cd() <= 10 and stats.bp_cd() is not None:\r\n # print(f\"{time.strftime('%H:%M:%S')} bp_cd < 10\")\r\n splash.stall_for_bp()\r\n fight(bn)\r\n else:\r\n fight(bn)\r\n\r\n\r\n# automated fight, picks 1st player for partner, weak attack\r\ndef fight(boss_name, forced=False):\r\n # print(f\"{time.strftime('%H:%M:%S')} boss_fight\")\r\n pick_deck(boss_name)\r\n try:\r\n\r\n bs = admin.wd.execute_script(\r\n \"return document.getElementById('battle_start_button');\")\r\n admin.wd.execute_script(\"arguments[0].click();\", bs)\r\n # time.sleep(1)\r\n admin.wd.find(\"class\", \"friend_frame\").click()\r\n # time.sleep(1)\r\n admin.wd.find(\"id\", \"quest_attack_1\").click()\r\n if (\"Red Oni\" in boss_name) or forced:\r\n try:\r\n do_bp()\r\n # time.sleep(1)\r\n admin.wd.find(\"id\", \"quest_attack_1\").click()\r\n # time.sleep(1)\r\n admin.wd.find(\"id\", \"quest_attack_1\").click()\r\n except AttributeError:\r\n admin.wd.find(\"css\", \".back_button_column_1\").click()\r\n admin.wd.find(\"id\", \"quest_attack_1\")\r\n\r\n skip_animation()\r\n admin.wd.find(\"css\", \"a.closePopup:nth-child(6) > div:nth-child(1)\").click()\r\n\r\n return\r\n\r\n except Timeout:\r\n try:\r\n admin.wd.find(\"css\", \"a.closePopup:nth-child(6) > div:nth-child(1)\").click()\r\n except AttributeError:\r\n raise admin.MyException()\r\n nav.main_page()\r\n # print(f\"{time.strftime('%H:%M:%S')} fight: Timeout\")\r\n return\r\n except StaleReference:\r\n gen.print_temp(\"boss_fight: StaleReference\")\r\n pass\r\n except JavascriptException:\r\n print(f\"{time.strftime('%H:%M:%S')} fight: Javascriptexception\")\r\n pass\r\n except AttributeError:\r\n # raise admin.MyException()\r\n pass\r\n except WebDriverException:\r\n gen.print_temp(\"web driver exception in boss fight function\")\r\n pass\r\n\r\n\r\ndef pick_deck(boss_name):\r\n anti_demon = \"button_deck_change_1\"\r\n anti_taimanin = \"button_deck_change_2\"\r\n anti_ufs = \"button_deck_change_3\"\r\n\r\n try:\r\n if \"Yatsu\" in boss_name:\r\n deck = admin.wd.find(\"id\", f\"{anti_taimanin}\")\r\n if deck is not None:\r\n deck.click()\r\n elif \"X\" in boss_name:\r\n deck = admin.wd.find(\"id\", f\"{anti_ufs}\")\r\n if deck is not None:\r\n deck.click()\r\n else:\r\n deck = admin.wd.find(\"id\", f\"{anti_demon}\")\r\n if deck is not None:\r\n deck.click()\r\n\r\n except NoSuchElementException:\r\n print(\"no such element 2.0\")\r\n\r\n\r\ndef req_assist():\r\n try:\r\n admin.wd.find(\"class\", \"raid_help_button\").click()\r\n admin.wd.find(\"class\", \"back_button_column_1\").click()\r\n gen.stall_for_assist(n00b=True)\r\n # print(f\"{time.strftime('%H:%M:%S')} requested assist\")\r\n\r\n except (Timeout, AttributeError):\r\n # print(f'{t} already requested assist')\r\n splash.stall_for_assist()\r\n except (ClickIntercepted, StaleReference):\r\n gen.print_temp(\"req_assist: ClickIntercepted or StaleReference\")\r\n pass\r\n\r\n\r\n# auto-clicks 'canvas' at coordinates of 'skip' button, times out afterwards\r\ndef skip_animation():\r\n admin.wd.wait_for(\"id\", \"canvas_box\")\r\n\r\n for i in range(1, 30):\r\n try:\r\n # gen.print_temp(\"trying action chain click\")\r\n canvas = admin.wd.find(\"id\", \"canvas_box\")\r\n # canvas = admin.wd.find(\"id\", \"canvas_box\")\r\n ac = ActionChains(admin.wd)\r\n ac.move_to_element_with_offset(canvas, 250, 75).click()\r\n ac.perform()\r\n time.sleep(0.1)\r\n\r\n except (Timeout, AttributeError):\r\n gen.print_temp(\"skip animation: Timeout\")\r\n break\r\n\r\n\r\ndef do_bp():\r\n Select(admin.wd.find(\"class\", \"selector\")).select_by_value(\"2\")\r\n admin.wd.find(\"class\", \"decision_button_column_2\").click()\r\n time.sleep(0.5)\r\n\r\n try:\r\n admin.wd.find(\"class\", \"decision_button_column_2\").click()\r\n time.sleep(0.5)\r\n\r\n except TimeoutException:\r\n use = admin.wd.find(\"class\", \"decision_button_column_2\")\r\n use.click()\r\n time.sleep(0.5)\r\n","sub_path":"f2/boss.py","file_name":"boss.py","file_ext":"py","file_size_in_byte":6572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"41345970","text":"#!/usr/bin/python3\n\n'''\n\nIn England the currency is made up of pound, £, and pence, p, and there are eight coins in general circulation:\n\n 1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) and £2 (200p).\n\nIt is possible to make £2 in the following way:\n\n 1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p\n\nHow many different ways can £2 be made using any number of coins?\n\n'''\n\n\nclass Main():\n\n coins = [200, 100, 50, 20, 10, 5, 2, 1]\n\n def __init__(self):\n self.answer = self.possibilities(200)\n\n def possibilities(self, amountToFill, coinPos=0):\n possibilities = 0\n coin = self.coins[coinPos]\n maxFittingCoins = amountToFill // coin\n for fittingCoins in range(0, maxFittingCoins + 1):\n amountLeft = amountToFill - fittingCoins * coin\n if amountLeft < 1:\n possibilities += 1\n elif coin == 1:\n continue\n else:\n possibilities += self.possibilities(amountLeft, coinPos + 1)\n\n return possibilities\n","sub_path":"problem0031.py","file_name":"problem0031.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"511385111","text":"class ImagePlaneScene(object):\n\n def __init__(self, master_model):\n self._master_model = master_model\n\n def create_graphics(self):\n image_plane_model = self._master_model.get_image_plane_model()\n region = image_plane_model.get_region()\n scene = region.getScene()\n coordinate_field = image_plane_model.get_coordinate_field()\n duration_field = image_plane_model.get_duration_field()\n\n scene.beginChange()\n scene.removeAllGraphics()\n field_module = region.getFieldmodule()\n xi = field_module.findFieldByName('xi')\n lines = scene.createGraphicsLines()\n lines.setExterior(True)\n lines.setName('plane-lines')\n lines.setCoordinateField(coordinate_field)\n surfaces = scene.createGraphicsSurfaces()\n surfaces.setName('plane-surfaces')\n surfaces.setCoordinateField(coordinate_field)\n temp1 = field_module.createFieldComponent(xi, [1, 2])\n temp2 = field_module.createFieldTimeValue(self._master_model.get_timekeeper())\n temp3 = field_module.createFieldDivide(temp2, duration_field)\n texture_field = field_module.createFieldConcatenate([temp1, temp3])\n surfaces.setTextureCoordinateField(texture_field)\n scene.endChange()\n\n def set_image_material(self):\n image_plane_model = self._master_model.get_image_plane_model()\n image_material = image_plane_model.get_material()\n scene = image_plane_model.get_region().getScene()\n surfaces = scene.findGraphicsByName('plane-surfaces')\n surfaces.setMaterial(image_material)\n","sub_path":"mapclientplugins/electrodearraydetectorstep/scene/imageplanescene.py","file_name":"imageplanescene.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"77559187","text":"# coding: utf-8\nimport argparse\nimport os\nimport sys\nfrom typing import Dict\n\nfrom . import DATA_DIR\nfrom .form import UtokyoHealthManagementReportForm\nfrom .utils import KwargsParamProcessor\n\nhere = os.path.abspath(os.path.dirname(__file__))\nARGUMENT_KEYS = [\"params\", \"browser\", \"quiet\"]\n\n\ndef UHMRF(argv: list = sys.argv[1:]):\n parser = argparse.ArgumentParser(\n prog=\"Answer UHMRF\",\n description=\"Auto fill in form about 'UTokyo Health Management Report Form'\",\n add_help=True,\n )\n parser.add_argument(\n \"--quiet\",\n action=\"store_true\",\n help=\"Whether you want to be quiet or not. (default=False)\",\n )\n parser.add_argument(\n \"--browser\",\n action=\"store_true\",\n help=\"Whether you want to run Chrome with GUI browser. (default=False)\",\n )\n parser.add_argument(\n \"-P\",\n \"--params\",\n action=KwargsParamProcessor,\n help=\"Specify the kwargs. You can specify by -P username=USERNAME -P password=PASSWORD\",\n )\n args = parser.parse_args(argv)\n\n path = os.path.join(DATA_DIR, \"UHMRF.json\")\n secrets_dict: Dict[str, str] = {\n \"\": os.getenv(\"UHMRF_PLACE\", \"\"),\n \"\": os.getenv(\"UTOKYO_ACCOUNT_MAIL_ADDRESS\", \"\"),\n \"\": os.getenv(\"UTOKYO_ACCOUNT_PASSWORD\", \"\"),\n }\n secrets_dict.update(\n {f\"<{k}>\": v for k, v in args.__dict__.items() if k not in ARGUMENT_KEYS}\n )\n model = UtokyoHealthManagementReportForm(\n path=path, secrets_dict=secrets_dict, verbose=not args.quiet\n )\n model.run(browser=args.browser)\n","sub_path":"form_auto_fill_in/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"238563648","text":"import pytest\n\nfrom lotus.factories import ArticleFactory\nfrom lotus.utils.tests import html_pyquery\n\n\n@pytest.mark.skip(reason=\"on hold until models are finished\")\ndef test_article_detail_404(db, client):\n \"\"\"\n Try to reach unexisting article should return a 404 response.\n \"\"\"\n article = ArticleFactory(title=\"article1\")\n\n url = \"/{article_pk}/1/\".format(article_pk=article.id)\n\n response = client.get(url)\n\n assert response.status_code == 404\n\n\n@pytest.mark.skip(reason=\"on hold until models are finished\")\ndef test_article_detail_noblog(db, client):\n \"\"\"\n If required article ID in url does not exists, article detail should return a\n 404 response.\n \"\"\"\n url = \"/42/\"\n\n response = client.get(url)\n\n assert response.status_code == 404\n\n\n@pytest.mark.skip(reason=\"on hold until models are finished\")\ndef test_article_detail_content(db, client):\n \"\"\"\n Article content should be displayed correctly.\n \"\"\"\n article = ArticleFactory()\n\n url = \"/{article_pk}/\".format(\n article_pk=article.id,\n )\n\n response = client.get(url)\n\n assert response.status_code == 200\n\n dom = html_pyquery(response)\n article_title = dom.find(\".article-detail h2\")\n article_content = dom.find(\".article-detail div.content\")\n\n assert article_title.text() == article.title\n # Avoid text() method to remove white spaces since content may contain some\n # line breaks\n assert article_content.text(squash_space=False) == article.content\n","sub_path":"tests/030_views/032_article.py","file_name":"032_article.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"450535499","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## Para el archivo `data.csv`, imprima por cada fila, la columna 1 y la suma \n## de los valores de la columna 5.\n##\n## Rta/\n## E,22\n## A,14\n## B,14\n## ....\n## C,8\n## E,11\n## E,16\n##\n## >>> Escriba su codigo a partir de este punto <<<\n##\ndata = [x[:-1].split('\\t') for x in open('data.csv').readlines()];\nres =[]\nfor row in data: \n values = [int(x.split(':')[1]) for x in row[4].split(',')]\n print(row[0] + ',' + str(sum(values)))\n\n\n\n","sub_path":"03-python=1/q12=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"54587895","text":"import turtle\n#ablak\nablak=turtle.Screen()\nablak.setup(width=800,height=600)\nablak.bgcolor(\"black\")\nablak.title(\"PONG\")\nablak.tracer(0)\n#bal ütő\nbal_uto=turtle.Turtle()\nbal_uto.speed(0)\nbal_uto.shape(\"square\")\nbal_uto.shapesize(stretch_wid=5,stretch_len=1)\nbal_uto.color(\"blue\")\nbal_uto.penup()\nbal_uto.goto(-350,0)\n#jobb uto\njobb_uto=turtle.Turtle()\njobb_uto.speed(0)\njobb_uto=turtle.Turtle()\njobb_uto.shape(\"square\")\njobb_uto.shapesize(stretch_wid=5,stretch_len=1)\njobb_uto.color(\"red\")\njobb_uto.penup()\njobb_uto.goto(350,0)\n#labda\nlabda=turtle.Turtle()\nlabda.speed(1)\nlabda.shape(\"circle\")\nlabda.color(\"yellow\")\nlabda.penup()\nlabda.goto(0,0)\nlabda.változásX=1\nlabda.változásY=-1\n\n\n#pontszám\njobb_pontszám=0\nbal_pontszám=0\npontszám=turtle.Turtle()\npontszám.speed(0)\npontszám.color(\"white\")\npontszám.penup()\npontszám.hideturtle()\npontszám.goto(0,260)\npontszám.write(f\"Jobb játékos: {jobb_pontszám} - Bal játékos: {bal_pontszám}\", align=\"center\", font=(\"Curier\",24,\"normal\"))\n\n\n\ndef bal_uto_fel():\n y=bal_uto.ycor()\n y+=30\n bal_uto.sety(y)\n\ndef bal_uto_le():\n y=bal_uto.ycor()\n y-=30\n bal_uto.sety(y)\n\ndef jobb_uto_fel():\n y=jobb_uto.ycor()\n y+=60\n jobb_uto.sety(y)\n\ndef jobb_uto_le():\n y=jobb_uto.ycor()\n y-=60\n jobb_uto.sety(y)\n\n\nablak.onkey(bal_uto_fel, \"w\")\nablak.onkey(bal_uto_le, \"s\")\n\nablak.onkey(jobb_uto_fel, \"Up\")\nablak.onkey(jobb_uto_le, \"Down\")\n\nablak.listen()\n\n\nwhile True:\n # a képernyő frissítése\n ablak.update()\n\n labda.setx(labda.xcor()+labda.változásX)\n labda.sety(labda.ycor()+labda.változásY)\n\n #tetejéről pattanjon vissza\n if labda.ycor()>288:\n labda.sety(288)\n labda.változásY*=-1\n\n #aljáról pattanjon vissza\n if labda.ycor()<-288:\n labda.sety(-288)\n labda.változásY*=-1\n\n #jobb oldal érintése\n if labda.xcor()>388:\n labda.goto(0,0)\n labda.változásX*=-1\n bal_pontszám+=1\n pontszám.clear()\n pontszám.write(f\"Jobb játékos: {jobb_pontszám} - Bal játékos: {bal_pontszám}\", align=\"center\", font=(\"Curier\",24,\"normal\"))\n\n\n #bal oldal érintése\n if labda.xcor()<-388:\n labda.goto(0,0)\n labda.változásX*=-1\n jobb_pontszám+=1\n pontszám.clear()\n pontszám.write(f\"Jobb játékos: {jobb_pontszám} - Bal játékos: {bal_pontszám}\", align=\"center\", font=(\"Curier\",24,\"normal\"))\n\n #jobb oldali ütőről visszapattan\n if jobb_uto.xcor()-20 < labda.xcor() < jobb_uto.xcor() and jobb_uto.ycor()-40 < labda.ycor() < jobb_uto.ycor()+40:\n labda.setx(jobb_uto.xcor()-20)\n labda.változásX*=-1\n\n #bal oldali ütőről visszapattan\n if bal_uto.xcor()+20 > labda.xcor() > bal_uto.xcor() and bal_uto.ycor()-40 < labda.ycor() < bal_uto.ycor()+40:\n labda.setx(bal_uto.xcor()+20)\n labda.változásX*=-1","sub_path":"pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":2866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"396081085","text":"\"\"\"votes\n\nRevision ID: b0d3f8a7bbf2\nRevises: 2440d94fbc42\nCreate Date: 2019-04-22 22:17:12.904965\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = 'b0d3f8a7bbf2'\ndown_revision = '2440d94fbc42'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('votes',\n sa.Column('upvote_id', sa.Integer(), nullable=True),\n sa.Column('downvote_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['downvote_id'], ['user.id'], ),\n sa.ForeignKeyConstraint(['upvote_id'], ['user.id'], )\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('votes')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/b0d3f8a7bbf2_votes.py","file_name":"b0d3f8a7bbf2_votes.py","file_ext":"py","file_size_in_byte":824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"189906952","text":"# -*- coding: utf-8 -*-\n\n# from datetime import datetime\nimport MetaTrader5 as mt5\nimport pandas as pd\n# import plotly.graph_objects as go\nimport matplotlib.pyplot as plt\nimport math\nfrom keras.layers import Dense, LSTM\nfrom sklearn.preprocessing import MinMaxScaler\nfrom keras.models import Sequential\nimport numpy as np\nfrom pandas import Series\nfrom numpy.random import randn\n\n# display data on the MetaTrader 5 package\nprint(\"MetaTrader5 package author: \", mt5.__author__)\nprint(\"MetaTrader5 package version: \", mt5.__version__)\n\n# import the 'pandas' module for displaying data obtained in the tabular form\npd.set_option('display.max_columns', 500) # number of columns to be displayed\npd.set_option('display.width', 1500) # max table width to display\n\n# establish connection to MetaTrader 5 terminal\nif not mt5.initialize():\n print(\"initialize() failed, error code =\", mt5.last_error())\n quit()\n\n# get 10000 GBPUSD D1 bars from the last 100 day\nrates = mt5.copy_rates_from_pos(\"EURUSD\", mt5.TIMEFRAME_D1, 1, 5000)\n# Thấy dòng trên chứ !? Bạn nhìn thấy số 1 không, cạnh 5000 ấy. Ừm đúng r đó, nó đại diện cho số ngày trong MetaTrader5 đó.\n# số 0 là hiện tại, số 1 là hôm qua, số 2 là ngày kia và cứ thế đếm ngc lại nha\n# Cái Dự báo nó chạy cho 1 ngày tiếp theo của ngày hiện tại mà thớt chọn đó :3\n\n# shut down connection to the MetaTrader 5 terminal\nmt5.shutdown()\n# display each element of obtained data in a new line\n# print(\"Display obtained data 'as is'\")\n# for rate in rates:\n# print(rate)\n\n# create DataFrame out of the obtained data\nrates_frame = pd.DataFrame(rates)\n# convert time in seconds into the datetime format\nrates_frame['time'] = pd.to_datetime(rates_frame['time'], unit='s')\n\n# display data\nprint(\"\\nDisplay dataframe with data\")\nprint(rates_frame)\n\n# get close price and time\ndClose = rates_frame.filter(['close'])\ndTime = rates_frame.filter(['time'])\n# Show collums of those 2 dClose + dTime\nprint(rates_frame[['time', 'close']])\n\n# Visualize the closing price history in chart\nplt.figure(figsize=(20, 8))\nplt.title('Close Price History from MT5')\nplt.plot(dTime, dClose) # (Y,X)\nplt.xlabel('Date', fontsize=10)\nplt.ylabel('Close Price USD ($)', fontsize=10)\nplt.grid()\nplt.show()\n\n# EVERYTHING IS DONE WITH GETTING DATA FROM MT5\n# WE'RE GONNA USING THE CLOSE PRCIE TO TRAIN THEN PREDICT THE PRICE\n\n\n# Converting the dataframe to a numpy array\ndataset = dClose.values\n# Get /Compute the number of rows to train the model on\ntraining_data_len = math.ceil(len(dataset) * .8)\n\n# Scale the all of the data to be values between 0 and 1\nscaler = MinMaxScaler(feature_range=(0, 1))\nscaled_data = scaler.fit_transform(dataset)\n\n# Create the scaled training data set\ntrain_data = scaled_data[0:training_data_len, :]\n# Split the data into x_train and y_train data sets\nx_train = []\ny_train = []\nfor i in range(60, len(train_data)):\n x_train.append(train_data[i - 60:i, 0])\n y_train.append(train_data[i, 0])\n\n# Convert x_train and y_train to numpy arrays\nx_train, y_train = np.array(x_train), np.array(y_train)\n\n# Reshape the data into the shape accepted by the LSTM\nx_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))\n\n# Build the LSTM network model\nmodel = Sequential()\nmodel.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1], 1)))\nmodel.add(LSTM(units=50, return_sequences=False))\nmodel.add(Dense(units=25))\nmodel.add(Dense(units=1))\n\n# Compile the model\nmodel.compile(optimizer='adam', loss='mean_squared_error')\n\n# Train the model\nmodel.fit(x_train, y_train, batch_size=10, epochs=1)\n\n# EROR FROM HERE\n# Test data set\ntest_data = scaled_data[training_data_len - 60:, :]\n# Create the x_test and y_test data sets\nx_test = []\ny_test = dataset[training_data_len:,\n :] # Get all of the rows from index 1603 to the rest and all of the columns (in this case it's only column 'Close'), so 2003 - 1603 = 400 rows of data\nfor i in range(60, len(test_data)):\n x_test.append(test_data[i - 60:i, 0])\n\n# Convert x_test to a numpy array\nx_test = np.array(x_test)\n\n# Reshape the data into the shape accepted by the LSTM\nx_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))\n\n# Getting the models predicted price values\npredictions = model.predict(x_test)\npredictions = scaler.inverse_transform(predictions) # Undo scaling\n\n# Calculate/Get the value of RMSE\nrmse = np.sqrt(np.mean(((predictions - y_test) ** 2)))\nrmse\n\n# Plot/Create the data for the graph\n\ntrain = dClose[:training_data_len]\nvalid = dClose[training_data_len:]\nvalid['Predictions'] = np.squeeze(predictions)\n# valid['Predictions'] = predictions\n\n\"\"\"\n#Visualize the data\nplt.figure(figsize=(16,8))\nplt.title('Model')\nplt.xlabel('Date', fontsize=18)\nplt.ylabel('Close Price USD ($)', fontsize=18)\nplt.plot(dTime,train[['close']])\n#plt.plot(dTime,dClose) \nplt.legend(['Train', 'Val', 'Predictions'], loc='lower right')\nplt.show()\n\"\"\"\n\nprint(\"DATA after TRAINING\")\nvalid\n\n# SHOW THE VALUE OF PREDICTION\nplt.figure(figsize=(16, 8))\nplt.title('Model')\nplt.xlabel('Date or DATA', fontsize=18)\nplt.ylabel('Close Price USD ($)', fontsize=18)\nplt.plot(valid)\nplt.legend(['Train', 'Val', 'Predictions'], loc='lower right')\nplt.show()\n\n# THIS IS THE PREDICTION, IN WHICH THIS DUDE DO HIS JOB\n\n\n# Get the quote\napple_quote = rates_frame\napple_quote\n# Create a new dataframe\nnew_df = apple_quote.filter(['close'])\nnew_df\n# Get the last 60 day closing price\nlast_60_days = new_df[-60:].values\n# Scale the data to be values between 0 and 1\nlast_60_days_scaled = scaler.transform(last_60_days)\n# Create an empty list\nX_test = []\n# Append the past 60 days\nX_test.append(last_60_days_scaled)\n# Convert the X_test data set to a numpy array\nX_test = np.array(X_test)\n# Reshape the data\nX_test = np.reshape(X_test, (X_test.shape[0], X_test.shape[1], 1))\n# Get the predicted scaled price\npred_price = model.predict(X_test)\n# undo the scaling\npred_price = scaler.inverse_transform(pred_price)\nprint(\"Dự báo cho ngày tiếp theo: \", pred_price)","sub_path":"ai/lstm/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":6060,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"74625629","text":"from janis_core import WorkflowBuilder, String\n\n# Import bioinformatics types\nfrom janis_bioinformatics.data_types import FastqGzPairedEnd, FastaWithIndexes\n\n# Import bioinformatics tools\nfrom janis_bioinformatics.tools.bwa import BwaMemLatest\nfrom janis_bioinformatics.tools.samtools import SamToolsView_1_9\nfrom janis_bioinformatics.tools.gatk4 import (\n Gatk4MarkDuplicates_4_1_4,\n Gatk4SortSam_4_1_4,\n Gatk4SetNmMdAndUqTags_4_1_4,\n)\n\n# Construct the workflow here\nw = WorkflowBuilder(\"preprocessingWorkflow\")\n\n# inputs\nw.input(\"sample_name\", String)\nw.input(\"read_group\", String)\nw.input(\"fastq\", FastqGzPairedEnd)\nw.input(\"reference\", FastaWithIndexes)\n\n# Use `bwa mem` to align our fastq paired ends to the reference genome\nw.step(\n \"bwamem\", # step identifier\n BwaMemLatest(\n reads=w.fastq,\n readGroupHeaderLine=w.read_group,\n reference=w.reference,\n markShorterSplits=True, # required for MarkDuplicates\n ),\n)\n\n# Use `samtools view` to convert the aligned SAM to a BAM\n# - Use the output `out` of the bwamem step\nw.step(\n \"samtoolsview\", SamToolsView_1_9(sam=w.bwamem.out),\n)\n\n# Use `gatk4 MarkDuplicates` on the output of samtoolsview\n# - The output of BWA is query-grouped, providing \"queryname\" is good enough\nw.step(\n \"markduplicates\",\n Gatk4MarkDuplicates_4_1_4(bam=w.samtoolsview.out, assumeSortOrder=\"queryname\"),\n)\n# Use `gatk4 SortSam` on the output of markduplicates\n# - Use the \"coordinate\" sortOrder\nw.step(\"sortsam\", Gatk4SortSam_4_1_4(bam=w.markduplicates.out, sortOrder=\"coordinate\",))\n\n# Use `gatk4 SetNmMdAndUqTags` to calculate standard tags for BAM\nw.step(\n \"fix_tags\", Gatk4SetNmMdAndUqTags_4_1_4(bam=w.sortsam.out, reference=w.reference,),\n)\n\n# Output our final bam\nw.output(\"out_bam\", source=w.fix_tags.out)\n","sub_path":"resources/part1/preprocessing_solution.py","file_name":"preprocessing_solution.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"392934981","text":"import sys\nimport re\n\n\nclass IdentifyExport:\n\n DROIDTYPE = \"droid\" # backward compatibility for now...\n\n # specific hashes\n DROIDMD5TYPE = \"droid_md5\"\n DROIDSHA1TYPE = \"droid_md5\"\n DROIDSHA256TYPE = \"droid_md5\"\n DROIDNOHASH = \"droid_nohash\"\n DROIDTYPEBOM = \"droid_BOM\"\n FIDOTYPE = \"fido\"\n UNKTYPE = \"unknown\"\n\n SFTYPE = \"siegfried\"\n SFCSVTYPE = \"siegfried csv\"\n\n droid_md5 = (\n '\"ID\",\"PARENT_ID\",\"URI\",\"FILE_PATH\",\"NAME\",\"METHOD\",\"STATUS\"'\n + ',\"SIZE\",\"TYPE\",\"EXT\",\"LAST_MODIFIED\",\"EXTENSION_MISMATCH\",'\n + '\"MD5_HASH\",\"FORMAT_COUNT\",\"PUID\",\"MIME_TYPE\",\"FORMAT_NAME\",'\n + '\"FORMAT_VERSION\"'\n )\n\n droid_sha1 = (\n '\"ID\",\"PARENT_ID\",\"URI\",\"FILE_PATH\",\"NAME\",\"METHOD\",\"STATUS\"'\n + ',\"SIZE\",\"TYPE\",\"EXT\",\"LAST_MODIFIED\",\"EXTENSION_MISMATCH\",'\n + '\"SHA1_HASH\",\"FORMAT_COUNT\",\"PUID\",\"MIME_TYPE\",\"FORMAT_NAME\",'\n + '\"FORMAT_VERSION\"'\n )\n\n droid_sha256 = (\n '\"ID\",\"PARENT_ID\",\"URI\",\"FILE_PATH\",\"NAME\",\"METHOD\",\"STATUS\"'\n + ',\"SIZE\",\"TYPE\",\"EXT\",\"LAST_MODIFIED\",\"EXTENSION_MISMATCH\",'\n + '\"SHA256_HASH\",\"FORMAT_COUNT\",\"PUID\",\"MIME_TYPE\",\"FORMAT_NAME\",'\n + '\"FORMAT_VERSION\"'\n )\n\n droid_nohash = (\n '\"ID\",\"PARENT_ID\",\"URI\",\"FILE_PATH\",\"NAME\",\"METHOD\",\"STATUS\"'\n + ',\"SIZE\",\"TYPE\",\"EXT\",\"LAST_MODIFIED\",\"EXTENSION_MISMATCH\",'\n + '\"HASH\",\"FORMAT_COUNT\",\"PUID\",\"MIME_TYPE\",\"FORMAT_NAME\",'\n + '\"FORMAT_VERSION\"'\n )\n\n fido_re = \"^(OK|KO),[0-9]+,(fmt|x-fmt)\\/[0-9]{1,4},\"\n\n sf_orig = \"---\" + \"\\x0A\" + \"siegfried :\"\n\n sfcsv_re = \"^filename,filesize,modified,errors,md5,namespace,id,format,version,mime,basis,warning$\"\n\n # UTF8 with BOM\n droid_utf8 = \"\\xEF\\xBB\\xBF\"\n droid_utf8_md5 = droid_utf8 + droid_md5\n droid_utf8_sha1 = droid_utf8 + droid_sha1\n droid_utf8_sha256 = droid_utf8 + droid_sha256\n droid_utf8_nohash = droid_utf8 + droid_nohash\n\n def exportid(self, export):\n\n f = open(export, \"rb\")\n droid_magic = f.readline()\n sf_magic = droid_magic + f.readline()\n f.close()\n\n if '\"' not in droid_magic.strip():\n magic = ['\"{}\"'.format(field) for field in droid_magic.strip().split(\",\")]\n droid_magic = \",\".join(magic)\n\n if (\n droid_magic.strip() == self.droid_md5\n or droid_magic.strip() == self.droid_sha1\n or droid_magic.strip() == self.droid_sha256\n or droid_magic.strip() == self.droid_nohash\n ):\n return self.DROIDTYPE\n elif (\n droid_magic.strip() == self.droid_utf8_md5\n or droid_magic.strip() == self.droid_utf8_sha1\n or droid_magic.strip() == self.droid_utf8_sha256\n or droid_magic.strip() == self.droid_utf8_nohash\n ):\n return self.DROIDTYPEBOM\n elif self.sf_orig in sf_magic.strip():\n return self.SFTYPE\n elif re.search(re.compile(self.fido_re), droid_magic) is not None:\n return self.FIDOTYPE\n elif re.search(re.compile(self.sfcsv_re), droid_magic) is not None:\n return self.SFCSVTYPE\n else:\n return self.UNKTYPE\n","sub_path":"sqlitefid/libs/IdentifyExportClass.py","file_name":"IdentifyExportClass.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"597039371","text":"\"\"\"\n\nUtility for importing all of the information in a FGD file into a format that this package understands.\n\nIt is assumed that the input FGD uses the typical convention of newline placement: Blank lines are okay, but anything\nsquished onto one line or broken into two lines is not.\n\nAuthor: Eli Zupke\n\nFurther reading: https://developer.valvesoftware.com/wiki/FGD\n\n\"\"\"\nimport re\n#import vmflib2\nimport datetime\nimport os.path\nimport keyword\n\n\nDOCSTRING = \"\\\"\\\"\\\"{0}\\\"\\\"\\\"\"\nSTART_DOCSTRING_TEXT = \"\\nHelper classes for creating maps in any Source Engine game that uses {0}.\\n\" \\\n \"This file was auto-generated by import_fgd.py on {1}.\\n\"\n\nAUTOGENERATED_TODO_TEXT = \"TODO: This class was automatically generated, and may need correction or expansion.\"\n\nCLASS_TEXT = \"class {0}({1}):\"\nINIT_TEXT = \"def __init(self, {0}):\"\n#IMPORT_TEXT = \"import {0}\"\n\n\ndef to_camel_case(string: str):\n \"Convert an underscore_string to a CamelCaseString.\"\n out = \"\"\n\n upper_next = True\n\n for c in string:\n if c == \"_\":\n upper_next = True\n else:\n if upper_next:\n out += c.upper()\n else:\n out += c.lower()\n upper_next = False\n return out\n\nclass propertyType:\n \"\"\"Represents a type of property (such as string, integer, choices, etc.) that can be found in FGD files.\"\"\"\n def __init__(self, base_type):\n # This is the type of object that this property type represents\n self.base_type = base_type\n\n \"\"\"Returns the string we should use to represent this in a type hint in a python source file.\"\"\"\n def get_type_hint(self) -> str:\n # If we had to represent this as a string, then the python file we are generating will likely also have to\n # represent this as a string, so enclose it with quotes. We could likely encase everything in quotes safely, but\n # checking just feels right.\n if self.base_type is None:\n return \"\"\n if type(self.base_type) == str:\n return ': \"{0}\"'.format(self.base_type)\n elif type(self.base_type) == type:\n self.base_type: type\n return ': {0}'.format(self.base_type.__name__)\n else:\n return ': {0}'.format(self.base_type)\n\n\nclass propertyInstance:\n \"\"\"Represents a single property in a single class\"\"\"\n def __init__(self, name: str, type: propertyType, short_description: str, default, long_description: str):\n\n if keyword.iskeyword(name):\n print(\"Warning: property '{0}' is reserved keyword!\".format(name))\n name += \"_\"\n\n self.name = name\n self.type = type\n self.short_description = short_description\n self.default = default\n self.long_description = long_description\n\n def represent(self):\n \"\"\"Represent this property as an assignment in the output python file.\"\"\"\n out = \"\"\n ind2 = \"\\n\" + indent_level(2)\n out += \"{0}# {1} : {2}\" \\\n \"{0}self.{3}{5} = {3}\".format(ind2, self.short_description, self.long_description, self.name, self.default,\n self.type.get_type_hint())\n\n return out\n\n def init_line_represent(self):\n \"\"\"Represent this property as an arg in the init function line\"\"\"\n out = \", {0}{2}={1}\".format(self.name, self.default, self.type.get_type_hint())\n return out\n\n\nclass propertyFiller(propertyInstance):\n \"\"\"Prints a blank line in the property list\"\"\"\n def __init__(self):\n propertyInstance.__init__(self, \"\", None, \"\", \"\", \"\")\n\n def represent(self):\n return \"\\n\"\n\n def init_line_represent(self):\n return \"\"\n\n\n\n\nclass fgdClass:\n \"\"\"Represents a single class found in the FGD file (usually an entity)\"\"\"\n def __init__(self, class_name: str, argument_string: str, line_number: int, fgd_name: str, class_lookup: dict):\n\n # Whether this is a base class that can be safely ignored.\n self.is_base = (class_name == \"BaseClass\")\n self.is_solid = (class_name == \"SolidClass\")\n self.is_filter = (class_name == \"FilterClass\")\n self.is_point_class = (class_name not in (\"SolidClass\", \"BaseClass\"))\n self.properties = []\n\n if self.is_point_class:\n self.add_property(propertyInstance(\"origin\", property_types[\"origin\"], \"Origin\", \"\\\"0 0 0\\\"\", \"This entity's location in 3D space.\"))\n self.ent_name = \"name_not_found\"\n\n m = re.match(r\"([^=]*)=\\s*([^\\s:]*)(?: ?: ?\\\"([^\\n]*)\\\")?\", argument_string)\n if m is None:\n raise IOError(\"Can't match '{0}'\\nFile: {1}, Line {2}\".format(argument_string, fgd_name, line_number))\n\n args, self.ent_name, self.description = m.group(1, 2, 3)\n\n if self.ent_name == \"worldspawn\":\n # Kind of a hack, as we don't want this one entity to be auto-generated.\n self.is_base = True\n\n if not self.description:\n self.description = \"\"\n\n # Note: The only thing we need to know in the args section is the 'base' property.\n base_match = re.search(\"base\\(([^()]*)\\)\", args)\n if base_match:\n parents = base_match.group(1).split(\",\")\n for parent in [class_lookup[p.strip().lower()] for p in parents]:\n for prop in parent.properties:\n self.add_property(prop)\n self.add_property(propertyFiller())\n\n self.name = to_camel_case(self.ent_name)\n self.parent = \"Entity\"\n\n self.fgd_loc = \"{0}, line {1}\".format(fgd_name, line_number)\n\n def add_property(self, prop: propertyInstance):\n\n if not isinstance(prop, type(propertyFiller())):\n # Remove duplicate properties\n self.properties = list(filter(lambda x: x.name != prop.name, self.properties))\n\n self.properties.append(prop)\n\n def represent(self):\n \"\"\"Represent this class as a string in the output python file.\"\"\"\n # Three levels of indentations, for future reference\n ind1 = \"\\n\" + indent_level(1)\n ind2 = \"\\n\" + indent_level(2)\n ind3 = \"\\n\" + indent_level(3)\n\n docstring = \"{1}Auto-generated from {0}.{1}{2}{1}\".format(self.fgd_loc, ind1, self.description)\n\n out = CLASS_TEXT.format(self.name, self.parent)\n out += ind1\n out += DOCSTRING.format(docstring)\n\n # Create the header for the constructor method for this FGD object\n out += ind1\n out += \"def __init__(self, vmf_map: \\\"ValveMap\\\"\"\n # Add all of the properties as arugments in our header\n for p in self.properties:\n p: propertyInstance\n out += p.init_line_represent()\n out += \"):\"\n\n out += \"{0}{1}.__init__(self, \\\"{2}\\\", vmf_map)\".format(ind2, self.parent, self.ent_name)\n\n out += \"\\n\"\n for p in self.properties:\n p: propertyInstance\n out += p.represent()\n\n if len(self.properties) > 0:\n out += \"\\n{0}self.auto_properties.extend([\".format(ind2)\n for p in self.properties:\n p: propertyInstance\n if type(p) == type(propertyFiller()):\n continue\n out += \"\\\"{0}\\\", \".format(p.name)\n out = out[:-2]\n out += \"])\\n\"\n\n\n # TODO: ensure nothing else needs to be added here\n\n return out\n\n\n\nstring_type = propertyType(str)\ninteger_type = propertyType(int)\nfloat_type = propertyType(float)\n\n# This means we have to type to represent this class. We should fix that at some point@\nno_type = propertyType(None)\n\ncolor255_type = propertyType(\"RGB\")\norigin_type = propertyType(\"Origin\")\n\n# Which property type to use for each type listed in the file.\nproperty_types = {\n \"choices\": no_type,\n \"flags\": no_type,\n \"axis\": no_type,\n \"angle\": origin_type,\n \"angle_negative_pitch\": origin_type,\n \"color255\": color255_type,\n \"color1\": no_type,\n \"origin\": origin_type,\n \"sidelist\": no_type,\n \"vecline\": no_type,\n \"vector\": origin_type,\n \"integer\": integer_type,\n \"node_dest\": integer_type,\n \"float\": float_type,\n \"string\": string_type,\n \"target_source\": string_type,\n \"sound\": string_type,\n \"sprite\": string_type,\n \"studio\": string_type,\n \"target_destination\": string_type,\n \"target_name_or_class\": string_type,\n \"scene\": string_type,\n \"npcclass\": string_type,\n \"filterclass\": string_type,\n \"material\": string_type,\n \"decal\": string_type,\n \"instance_file\": string_type,\n \"instance_variable\": string_type,\n \"instance_parm\": string_type,\n \"pointentityclass\": string_type,\n\n}\n\nignore_classes = [\"MaterialExclusion\", \"AutoVisGroup\", \"mapsize\"]\nentity_classes = [\"BaseClass\", \"PointClass\", \"NPCClass\", \"SolidClass\", \"KeyFrameClass\", \"MoveClass\", \"FilterClass\"]\n\ndef fgd_name_to_py_name(fgd_path:str):\n \"\"\"Converts the name of a given FGD file or FGD file path to the name of its corresponding vmflib2 game file.\"\"\"\n\n # Ignore file directories\n fgd_name = os.path.split(fgd_path)[1]\n m = re.match(r\"([^/\\\\]*)\\.[fF][gG][dD]\", fgd_name)\n\n if m and m.group(1):\n return \"{0}.py\".format(m.group(1))\n else:\n raise ValueError(\"FGD path/name {0} does not look like a valid FGD path/name!\".format(fgd_name))\n\ndef indent_level(level:int):\n \"\"\"Returns a string containing indentation to the given level, in spaces\"\"\"\n return \" \" * level\n\ndef create_python_file(output_path:str, fgd_name:str, imports:list, classes:list):\n\n classes.sort(key=lambda x : x.ent_name)\n\n with open(output_path, \"w\") as output_file:\n\n date = str(datetime.datetime.now())\n\n # Write the starting docstring\n output_file.write(DOCSTRING.format(START_DOCSTRING_TEXT.format(fgd_name, date)))\n output_file.write(\"\\n\\n\")\n\n # Write the import statements\n for i in imports:\n output_file.write(i)\n output_file.write(\"\\n\")\n output_file.write(\"\\n\\n\")\n\n for c in classes:\n c: fgdClass\n output_file.write(c.represent())\n output_file.write(\"\\n\\n\")\n\n\n\ndef import_fgd(fgd_path:str):\n\n output_name = fgd_name_to_py_name(fgd_path)\n output_dir, _ = os.path.split(fgd_path)\n\n output_path = os.path.join(output_dir, output_name)\n\n # A list of all of the files our output file will need to import\n imports = [\"from vmflib2.vmf import *\"]\n\n\n # Just so that we can find other classes, here's all the classes, keyed by their names (what appears in the FGD file)\n class_lookup = dict()\n\n classes = read_fgd(fgd_path, class_lookup)\n\n fgd_name = os.path.split(fgd_path)[1]\n\n create_python_file(output_path, fgd_name, imports, classes)\n\n\ndef read_fgd(fgd_path, class_lookup):\n\n # A list of all of the classes that we need to add to the file\n classes = []\n\n loc_prefix = os.path.split(fgd_path)[0]\n\n with open(fgd_path, \"r\") as input_file:\n\n # How many open brackets we've seen minus how many close brackets we've seen\n bracket_level = 0\n\n # Whether we're currently ignoring the class that we're reading\n ignoring_class = False\n\n current_class = None\n\n fgd_name = os.path.split(fgd_path)[1]\n\n for line_number, line in enumerate(input_file, start=1):\n\n clean_line = line.strip()\n # Remove comments\n if clean_line.find(\"//\") > -1:\n clean_line = clean_line[:clean_line.find(\"//\")]\n\n string_match = re.match(\"\\\"([^\\\"]*)\\\"+?\", clean_line)\n property_match = re.match(\"([^()]*)\\(([^()]*)\\)(\\s*:\\s*[^\\n]*)?\", clean_line)\n input_match = re.match(\"input\\s*([^()]*)\\(([^()]*)\\)(\\s*:\\s*[^\\n]*)?\", clean_line)\n output_match = re.match(\"output\\s*([^()]*)\\(([^()]*)\\)(\\s*:\\s*[^\\n]*)?\", clean_line)\n\n if clean_line.find(\"@\") == 0:\n # We've hit a new class definition\n if bracket_level > 0:\n raise IOError(\"FGD file has a class definition inside another other class definition! \"\n \"\\nLine: {0}, '{1}'\".format(line_number, line))\n class_name, argument_string = re.match(\"@([\\S]*)(?:\\s*([^\\n]*))?\", clean_line).group(1, 2)\n argument_string: str\n\n ignoring_class = class_name in ignore_classes\n if class_name == \"include\":\n new_path = os.path.join(loc_prefix, argument_string.replace(\"\\\"\", \"\"))\n read_fgd(new_path, class_lookup)\n else:\n if not class_name in entity_classes:\n print(\"Warning: class name {0} not in list of entity class names. tentatively ignoring \"\n \"line {1}.\".format(class_name, line_number))\n ignoring_class = True\n if not ignoring_class:\n current_class = fgdClass(class_name, argument_string, line_number, fgd_name, class_lookup)\n # Add current_class to the class list if not a base class\n if not current_class.is_base:\n classes.append(current_class)\n # And to the lookup\n class_lookup[current_class.ent_name.lower()] = current_class\n\n elif clean_line.find(\"[\") == 0:\n # TODO: handle open brackets at end of line, like dod.fgd\n bracket_level += 1\n elif clean_line.find(\"]\") == 0:\n bracket_level -= 1\n if bracket_level < 0:\n raise IOError(\"FGD file has an errant close bracket! \"\n \"\\nFile: {2}, Line: {0}, '{1}'\".format(line_number, line, fgd_name))\n elif bracket_level == 0:\n current_class = None\n elif string_match and current_class and bracket_level == 0:\n # We've got part of the description!\n current_class.description += string_match.group(1)\n else:\n if current_class and bracket_level == 1 and not ignoring_class:\n if string_match:\n # TODO add onto last description\n pass\n elif input_match:\n # TODO add inputs\n pass\n elif output_match:\n # TODO add output\n pass\n elif property_match:\n prop_name = property_match.group(1)\n prop_type = property_types[property_match.group(2).lower().strip()]\n args = property_match.group(3)\n prop_short_desc = \"TODO: Replace this filler.\"\n prop_long_desc = \"\"\n prop_default = \"\\\"\\\"\"\n if not args:\n # print(clean_line)\n pass\n else:\n property_args_match = re.match(\"\\s*:\\s*\\\"([^\\\"]*)\\\"(?:\\s*:\\s*([^:=]*))?(?:\\s*:\\s*\\\"([^\\\"]*)\\\")?\", property_match.group(3))\n if property_args_match.group(1):\n prop_short_desc = property_args_match.group(1)\n if property_args_match.group(2):\n prop_default = property_args_match.group(2)\n # Hack to fix strings cut off by found ':' char. A better fix would be a regex that ignored ':''s in strings.\n if prop_default.count(\"\\\"\") == 1:\n prop_default += \"\\\"\"\n prop_default = prop_default.strip()\n if property_args_match.group(3):\n prop_long_desc = property_args_match.group(3)\n prop = propertyInstance(prop_name, prop_type, prop_short_desc, prop_default, prop_long_desc)\n current_class.add_property(prop)\n return classes\n\ndef import_all():\n for f in os.listdir(os.path.join(\"vmflib2\", \"games\")):\n _, ext = os.path.splitext(f)\n if ext.lower() != \".fgd\":\n continue\n print(\"Importing {0}\".format(f))\n import_fgd(os.path.join(\"vmflib2\", \"games\", f))\n\ndef main():\n import sys\n\n\n if len(sys.argv) < 2:\n print(\"The location of the FGD file to import is a required argument!\")\n exit(2)\n elif sys.argv[1].lower() in (\"-h\", \"--help\"):\n print(\"Usage: import_fgd.py [Path to the FGD file to import]\")\n exit(0)\n elif len(sys.argv) > 2:\n print(\"This script can only handle one argument. If you don't like that, feel free to contribute to the project on Github.\")\n exit(2)\n file = sys.argv[1]\n if not os.path.exists(file):\n print(\"File '{0}' was not found.\".format(file))\n exit(2)\n print(\"Importing {0}\".format(file))\n import_fgd(file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/import_fgd.py","file_name":"import_fgd.py","file_ext":"py","file_size_in_byte":17100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"254391700","text":"from gloria.service.decorator import task, Property\nimport time\nimport datetime\nfrom datetime import datetime\nimport calendar\nimport threading\nimport logging\nimport read_ini\nimport sqlite3\n\n\n@task(autostart=True)\nclass GarbageDay:\n \"\"\"Notifies you what type of garbage to throw: Recycling or Garbage\"\"\" \n\n def __init__(self):\n self.sig_stop = threading.Event()\n self._sleep_time = 3600\n self._db_file = read_ini.g_config['db_file']\n self._garbage_day = read_ini.g_config['garbage_day']\n self._email = read_ini.g_config['email']\n self._this_week_garbage_type = None\n self._next_week_garbage_type = None\n\n def start(self):\n logging.info('Starting {0}'.format(self.__class__.__name__))\n logging.info('db_file: {0}'.format(self._db_file))\n logging.info('Garbage day: {0}'.format(self._garbage_day))\n logging.info('email: {0}'.format(self._email))\n\n def run(self):\n while not self.sig_stop.is_set():\n if self.is_garbage_day_tomorrow():\n self._this_week_garbage_type = self.get_garbage_type()\n logging.info('This week garbage type: {0}'.format(self._this_week_garbage_type)) \n self.set_next_garbage_type()\n self._next_week_garbage_type = self.get_garbage_type()\n logging.info('Next week garbage type: {0}'.format(self._next_week_garbage_type)) \n self.notify()\n time.sleep(self._sleep_time)\n\n def is_garbage_day_tomorrow(self):\n cur_dow = calendar.day_name[(datetime.today().weekday() + 1) % 7]\n logging.info('Today + 1: {0}, GD: {1}'.format(cur_dow, self._garbage_day))\n return True if cur_dow == self._garbage_day else False\n\n def get_garbage_type(self):\n dbCon = sqlite3.connect(self._db_file, timeout=30)\n dbCur = dbCon.cursor()\n dbCur.execute('SELECT type FROM garbage WHERE next = 1')\n return dbCur.fetchone()[0]\n\n def set_next_garbage_type(self):\n dbCon = sqlite3.connect(self._db_file, timeout=30)\n dbCur = dbCon.cursor()\n dbCur.execute('UPDATE garbage SET next = 2 where next = 1')\n dbCur.execute('UPDATE garbage SET next = 1 where next = 0')\n dbCur.execute('UPDATE garbage SET next = 0 where next = 2')\n dbCon.commit()\n\n def notify(self):\n import smtplib\n from email.mime.text import MIMEText\n\n msg = MIMEText('Next week: ' + self._next_week_garbage_type)\n msg['Subject'] = 'This week garbage type: ' + self._this_week_garbage_type\n msg['To'] = self._email\n s = smtplib.SMTP('localhost')\n s.sendmail(self._email, [self._email], msg.as_string())\n logging.info('Notification sent')\n\n def stop(self):\n logging.info('Stopping {0} (tid {1})'.format(self.__class__.__name__, threading.current_thread().name))\n self.sig_stop.set()\n","sub_path":"test/services/garbage-day/task/gd.py","file_name":"gd.py","file_ext":"py","file_size_in_byte":2914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"293903511","text":"from decimal import Decimal, InvalidOperation, ROUND_HALF_UP\nimport argparse\nimport re\n\n\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('price', help='unformatted price')\n return parser.parse_args()\n\n\ndef format_price(price):\n if not isinstance(price, str):\n price = str(price)\n price = price.strip()\n if not re.fullmatch(r'[-|+]?\\d*\\.*\\d*', price):\n return None\n\n try:\n decimal_price = Decimal(price).quantize(\n Decimal('.01'),\n rounding=ROUND_HALF_UP\n )\n precision = 0 if decimal_price % 1 == 0 else 2\n except InvalidOperation:\n return None\n return '{0:,.{1:}f}'.format(decimal_price, precision).replace(',', ' ')\n\n\nif __name__ == '__main__':\n arguments = parse_arguments()\n print(format_price(arguments.price))\n","sub_path":"format_price.py","file_name":"format_price.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"644082842","text":"import numpy as np\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom tensorflow.keras.applications.inception_resnet_v2 import preprocess_input\nfrom tensorflow.keras.optimizers import Adam\nimport matplotlib\nmatplotlib.use('agg')\nimport matplotlib.pyplot as plt\n\nclass TrainingPlot(keras.callbacks.Callback):\n #source: https://github.com/kapil-varshney/utilities/blob/master/training_plot/trainingplot.py\n def __init__(self, filename='/nfs/home/xwang/Keras_Transfer_Learning_June/output/training_plot_keras.png'):\n self.filename = filename\n\n # This function is called when the training begins\n def on_train_begin(self, logs={}):\n # Initialize the lists for holding the logs, losses and accuracies\n self.losses = []\n self.acc = []\n self.val_losses = []\n self.val_acc = []\n self.logs = []\n\n # This function is called at the end of each epoch\n def on_epoch_end(self, epoch, logs={}):\n\n # Append the logs, losses and accuracies to the lists\n self.logs.append(logs)\n self.losses.append(logs.get('loss'))\n self.acc.append(logs.get('accuracy'))\n self.val_losses.append(logs.get('val_loss'))\n self.val_acc.append(logs.get('val_accuracy'))\n #loss: 7.6934 - accuracy: 0.7840 - val_loss: 7.6934 - val_accuracy: 0.7837\n\n # Before plotting ensure at least 2 epochs have passed\n if len(self.losses) > 1:\n\n N = np.arange(0, len(self.losses))\n\n # You can chose the style of your preference\n # print(plt.style.available) to see the available options\n plt.style.use(\"seaborn\")\n\n # Plot train loss, train acc, val loss and val acc against epochs passed\n plt.figure()\n plt.plot(N, self.losses, label = \"train_loss\")\n plt.plot(N, self.acc, label = \"train_acc\")\n plt.plot(N, self.val_losses, label = \"val_loss\")\n plt.plot(N, self.val_acc, label = \"val_acc\")\n plt.title(\"Training Loss and Accuracy [Epoch {}]\".format(epoch))\n plt.xlabel(\"Epoch #\")\n plt.ylabel(\"Loss/Accuracy\")\n plt.legend()\n # Make sure there exists a folder called output in the current directory\n # or replace 'output' with whatever direcory you want to put in the plots\n plt.savefig(self.filename)\n plt.close()\n\n\nif __name__ == \"__main__\":\n # Data loading \n dataset_Keras_PATH = \"/nfs/home/xwang/Keras_Transfer_Learning_June/Dataset_Keras_Folder/\"\n kras_class_folder_path = \"/nfs/home/xwang/Keras_Transfer_Learning_June/Dataset_Keras_Folder/class_kras/\"\n nokras_class_folder_path = \"/nfs/home/xwang/Keras_Transfer_Learning_June/Dataset_Keras_Folder/class_nokras/\"\n \n # Create trainning dataset.\n train_dataset = tf.keras.preprocessing.image_dataset_from_directory(dataset_Keras_PATH, validation_split=0.3, subset=\"training\", seed=2020, batch_size=200, image_size=(512, 512))\n # create validation dataset. \n validation_dataset = tf.keras.preprocessing.image_dataset_from_directory(dataset_Keras_PATH, validation_split=0.3, subset=\"validation\", seed=2020, batch_size=200,image_size=(512, 512))\n\n # Instantiate a base model and load pre-trained weights into it\n base_model = InceptionResNetV2(\n include_top=False,\n weights='imagenet',\n input_shape=(512, 512, 3)\n )\n\n # Freeze base model\n base_model.trainable = False\n\n # - Create a new model on top of the output of one (or several) layers from the base model.\n \n inputs = keras.Input(shape=(512, 512, 3))\n x = base_model(inputs, training=False)\n \n x = keras.layers.GlobalAveragePooling2D()(x)\n x = keras.layers.Dropout(0.5)(x)\n \n outputs = keras.layers.Dense(2, activation='softmax', name='softmax')(x)\n current_model = keras.Model(inputs, outputs)\n print(current_model.summary())\n\n #Cross-entropy is the default loss function to use for binary classification problems.\n #It is intended for use with binary classification where the target values are in the set {0, 1}.\n #loss_fn = keras.losses.BinaryCrossentropy()\n optimizer_adam = keras.optimizers.Adam(1e-3)#learning rate is default to 0.001\n \n # Create an instance of the TrainingPlot class with the filename.\n plot_losses = TrainingPlot()\n \n epochs = 50\n \n callbacks_plotloss = [\n plot_losses\n #keras.callbacks.ModelCheckpoint(\"save_at_{epoch}.h5\"),\n ]\n \n current_model.compile(\n optimizer=optimizer_adam,\n loss=\"binary_crossentropy\",\n metrics=[\"accuracy\"],\n )\n \n #Configure the dataset for performance\n train_dataset = train_dataset.prefetch(buffer_size=200)\n validation_dataset = validation_dataset.prefetch(buffer_size=200)\n\n #Train the model using callback to the TrainingPlot class object\n current_model.fit(\n train_dataset, epochs=epochs, callbacks=callbacks_plotloss, validation_data=validation_dataset,\n )","sub_path":"Stage 3 Transfer Learning with Keras Application /output_plot/train_50_epoch_keras.py","file_name":"train_50_epoch_keras.py","file_ext":"py","file_size_in_byte":5088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"586363817","text":"#!/usr/bin/env python3\n# -*- coding = utf-8 -*-\n'''\n @Author : RuiJia Li \n @Time : 2022/4/8 23:34\n @File : 最长公共子序列\n @Desc :\n'''\n\n\"\"\"\n给定两个字符串 text1 和 text2,返回这两个字符串的最长 公共子序列 的长度。如果不存在 公共子序列 ,返回 0 。\n\n一个字符串的 子序列 ��指这样一个新的字符串:它是由原字符串在不改变字符的相对顺序的情况下删除某些字符(也可以不删除任何字符)后组成的新字符串。\n\n例如,\"ace\" 是 \"abcde\" 的子序列,但 \"aec\" 不是 \"abcde\" 的子序列。\n两个字符串的 公共子序列 是这两个字符串所共同拥有的子序列。\n\n输入:text1 = \"abcde\", text2 = \"ace\" \n输出:3 \n解释:最长公共子序列是 \"ace\" ,它的长度为 3 。\n\n从小的序列开始遍历 第一个text-短的,第二个text-长的\n双指针? 一个指向第一个text,一个指向第二个text ,当 text1[i] == text2[j] 长度+1\n当text1[i] != text2[j]时,j+1 对比 相等i+1 \n\n不要求连续--直接利用双指针,遍历\n利用while :\n while ilen(text2):\n text1,text2 = text2,text1\n n1,n2 = len(text1),len(text2)\n i ,j = 0,0\n sim_length = 0\n while i bool:\n if -1 < x < image.shape[0] and -1 < y < image.shape[1]:\n return False\n return True\n\n\ndef point_on_line(x0: int, y0: int, x1: int, y1: int, x_coord: int) -> int:\n return int((y1 - y0) * (x_coord - x0) / (x1 - x0) + y0)\n\n\ndef measure_width(skeleton: np.ndarray, shell: np.ndarray, x: int, y: int, prev_x: int, prev_y: int) -> float:\n next_x = -1\n next_y = -1\n\n for i in range(-1, 2, 2):\n for j in range(-1, 2):\n if not out_of_bounds(skeleton, x + i, y + j) and skeleton[x + i][y + j] == 1 and shell[x + i][y + j] != 0:\n next_x, next_y = x + i, y + j\n break\n\n if next_x == -1:\n for j in range(-1, 2, 2):\n if not out_of_bounds(skeleton, x, y + j) and skeleton[x][y + j] == 1 and shell[x][y + j] != 0:\n next_x, next_y = x, y + j\n break\n\n if next_x == -1:\n next_x, next_y = prev_x, prev_y\n prev_x, prev_y = x, y\n\n support_vector = [[-1 * (next_y - y) + x, (next_x - x) + y], [-1 * (prev_y - y) + x, (prev_x - x) + y]]\n check_points = [[x, y], [x, y]]\n result_points = [[0, 0], [0, 0]]\n\n while True:\n check_points[0][0] += 1\n check_points[0][1] += 1\n check_points[1][0] -= 1\n check_points[1][1] -= 1\n\n if support_vector[0][0] == support_vector[1][0]:\n check_points[0][0] = support_vector[0][0]\n check_points[1][0] = support_vector[0][0]\n elif support_vector[0][1] == support_vector[1][1]:\n check_points[0][1] = support_vector[0][1]\n check_points[1][1] = support_vector[0][1]\n else:\n check_points[0][1] = point_on_line(support_vector[0][0], support_vector[0][1],\n support_vector[1][0], support_vector[1][1],\n check_points[0][0])\n check_points[1][1] = point_on_line(support_vector[0][0], support_vector[0][1],\n support_vector[1][0], support_vector[1][1],\n check_points[1][0])\n\n if out_of_bounds(skeleton, check_points[0][0], check_points[0][1]) or \\\n not shell[check_points[0][0]][check_points[0][1]]:\n result_points[0] = check_points[0]\n result_points[1][0] = 2 * x - result_points[0][0]\n result_points[1][1] = 2 * y - result_points[0][1]\n\n return math.dist(result_points[0], result_points[1])\n\n if out_of_bounds(skeleton, check_points[1][0], check_points[1][1]) or \\\n not shell[check_points[1][0]][check_points[1][1]]:\n result_points[1] = check_points[1]\n result_points[0][0] = 2 * x - result_points[1][0]\n result_points[0][1] = 2 * y - result_points[1][1]\n\n return math.dist(result_points[0], result_points[1])\n\n\ndef skeleton_walker(skeleton: np.ndarray, shell: np.ndarray, result: List[float], x: int, y: int,\n pixels_num: int, current_cell_data: List[float], prev_x: int, prev_y: int) -> None:\n if pixels_num > 10:\n current_width = measure_width(skeleton, shell, x, y, prev_x, prev_y)\n current_cell_data.append(current_width)\n pixels_num = 0\n\n for i in range(-1, 2, 2):\n for j in range(-1, 2):\n if not out_of_bounds(skeleton, x + i, y + j) and \\\n skeleton[x + i, y + j] == 1 and \\\n shell[x + i, y + j] != 0:\n skeleton[x + i, y + j] = 0\n skeleton_walker(skeleton, shell, result, x + i, y + j, pixels_num + 1, current_cell_data, x, y)\n\n for j in range(-1, 2, 2):\n if not out_of_bounds(skeleton, x, y + j) and \\\n skeleton[x][y + j] == 1 and \\\n shell[x][y + j] != 0:\n skeleton[x, y + j] = 0\n skeleton_walker(skeleton, shell, result, x, y + j, pixels_num + 1, current_cell_data, x, y)\n\n if len(current_cell_data) > 1:\n result.append(np.average(current_cell_data[1:]))\n current_cell_data.clear()\n current_cell_data.append(0)\n\n skeleton[x, y] = 0\n\n\ndef read_wsi(path: Path) -> np.ndarray:\n file_extension = path.suffix\n slideio_driver_by_file_extension = {\n '.svs': 'SVS',\n '.afi': 'AFI',\n '.scn': 'SCN',\n '.czi': 'CZI',\n '.zvi': 'ZVI',\n '.ndpi': 'NDPI',\n '.tiff': 'GDAL',\n '.tif': 'GDAL',\n }\n slideio_driver = slideio_driver_by_file_extension[file_extension]\n slide = slideio.open_slide(str(path), slideio_driver)\n scene = slide.get_scene(0)\n full_resolution_width = scene.rect[2]\n print('full_resolution_width', full_resolution_width)\n region = scene.read_block(size=(round(full_resolution_width / 8), 0))\n return region\n\n\ndef main():\n sys.setrecursionlimit(100000)\n image = iio.imread(r'D:\\Temp\\kidney\\Kidney.png')\n # image = read_wsi(Path(r'D:\\Temp\\kidney\\108_2022_MSB.svs'))\n image_hed = ski.color.rgb2hed(image)\n\n skel = ski.morphology.skeletonize(image_hed[:, :, 2] > 0.10)\n iio.imwrite(r'D:\\Temp\\kidney\\Kidney-skel.png', (skel * 255).astype(np.uint8))\n\n result = [0]\n stop_flag = False\n while not stop_flag:\n stop_flag = True\n for i in range(skel.shape[0]):\n for j in range(skel.shape[1]):\n if skel[i, j] == 1:\n stop_flag = False\n skel[i, j] = 0\n pixels_num = 0\n current_cell_data = [0]\n skeleton_walker(skel, image_hed[:, :, 2] > 0.10, result, i, j, pixels_num, current_cell_data, -1, -1)\n\n print(result[1:])\n print(np.average(result[1:]))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"biocell/src/bsmu/biocell/plugins/renal_tubule_analyzer.py","file_name":"renal_tubule_analyzer.py","file_ext":"py","file_size_in_byte":5992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"57515475","text":"#!/usr/bin/env python3\nimport json\nimport pickle\n\n#####\n# SLACK API Wrappers\n#####\ndef invite_user(slack_client, user, channel):\n \"\"\"\n Invite a user to a given channel\n \"\"\"\n response = slack_client.api_call(\"channels.invite\",\n channel=channel,\n user=user)\n return response\n\ndef kick_user(slack_client, user_id, channel_id):\n response = slack_client.api_call(\"channels.kick\",\n channel=channel_id,\n user=user_id)\n return response\n\n\ndef set_purpose(slack_client, channel, purpose):\n \"\"\"\n Set the purpose of a given channel\n \"\"\"\n response = slack_client.api_call(\"channels.setPurpose\",\n purpose=purpose, channel=channel)\n\n return response\n\ndef get_members(slack_client):\n \"\"\"\n Get a list of all members\n \"\"\"\n response = slack_client.api_call(\"users.list\", presence=True)\n return response\n\ndef get_member(slack_client, user_id):\n \"\"\"\n Get a member for a given user_id\n \"\"\"\n response = slack_client.api_call(\"users.info\", user=user_id)\n return response\n\ndef create_channel(slack_client, name):\n \"\"\"\n Create a channel with a given name\n \"\"\"\n response = slack_client.api_call(\"channels.create\",\n name=name, validate=False)\n\n return response\n\ndef get_channel_info(slack_client, channel_id):\n \"\"\"\n Get the channel info of a given channel ID\n \"\"\"\n response = slack_client.api_call(\"channels.info\",\n channel=channel_id)\n\n return response\n\n\n#######\n# Helper functions\n#######\ndef load_json(string):\n \"\"\"\n Return a JSON object based on its string representation.\n Returns false if the string isn't valid JSON.\n \"\"\"\n try:\n json_object = json.loads(string)\n except ValueError as e:\n return False\n return json_object\n\n\n#######\n# Database manipulation\n#######\ndef get_ctf_by_channel_id(database, channel_id):\n \"\"\"\n Fetch a CTF object in the database with a given channel ID\n If true, a CTF object is returned.\n Else, the function returns False\n \"\"\"\n ctfs = pickle.load(open(database, \"rb\"))\n for ctf in ctfs:\n if ctf.channel_id == channel_id:\n return ctf\n\n return False\n\ndef get_challenge_by_name(database, challenge_name, ctf_channel_id):\n \"\"\"\n Fetch a Challenge object in the database with a given name and ctf channel ID\n If true, a Challenge object\n Else, the function returns False\n \"\"\"\n ctfs = pickle.load(open(database, \"rb\"))\n for ctf in ctfs:\n if ctf.channel_id == ctf_channel_id:\n for challenge in ctf.challenges:\n if challenge.name == challenge_name:\n return challenge\n\n return False\n\ndef get_challenges_for_user_id(database, user_id, ctf_channel_id):\n \"\"\"\n Fetch a list of all challenges a user is working on for\n a given CTF.\n This should technically only return 0 or 1 challenge, as a user\n can only work on 1 challenge at a time.\n \"\"\"\n\n ctfs = pickle.load(open(database, \"rb\"))\n ctf = list(filter(lambda ctf: ctf.channel_id == ctf_channel_id, ctfs))[0]\n\n challenges = []\n for challenge in ctf.challenges:\n for player in challenge.players:\n if player.user_id == user_id:\n challenges.append(challenge)\n\n return challenges\n\n","sub_path":"helpers/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"339676719","text":"# Pasan Bandara - UofM - 7882322\n# Assingment 2 Part 1\nfrom __future__ import division\nimport numpy as np\nfrom sys import exit\n\n\ndef compute_loss(X,Y,Y_decompose,W,classes,no_of_training_examples,no_of_input_features_with_b,no_of_classes,lemda):\n bottom = np.dot(X,W)\n bottom_e = np.exp(bottom)\n if np.any(np.isfinite(bottom_e)) and np.any(np.isin(0,bottom_e)) == 0: #Check exp saturation\n bottom_sum = np.sum(bottom_e,axis=1).reshape(no_of_training_examples,1)\n bottom_sum = np.repeat(bottom_sum, no_of_classes, axis=1)\n div = np.log(bottom_e/bottom_sum)\n Mux = np.sum(np.sum(np.multiply(Y_decompose,div),axis=1))\n fin = -1*Mux/no_of_training_examples\n W_forced = np.copy(W)\n W_forced[0,:] = 0 #Remove bias from the weight matrix to calculate the regularization term\n reg = lemda/2*np.sum(np.sum(np.dot(W_forced.transpose(),W_forced),axis=1))\n final = fin+ reg\n return final\n else:\n print(\"Saturation Detected! Aborted! Saved so far learned data!\")\n print(\"Exiting...\")\n exit()\n\ndef main():\n lemda = 0.01\n no_of_training_examples = 2\n no_of_input_features_with_b = 4 #(with 1s for bias)\n no_of_classes = 2\n classes = np.array([0,1])\n batch_number = [1]\n\n W = np.array([[0.12,0.13],[-0.12,0.14],[-0.15,0.16],[-0.15,-0.16]])\n\n X_ori = np.array([[2,3,4],[5,6,7]])\n temp2 = np.ones((no_of_training_examples,1)) #1s for bias\n X = np.concatenate((temp2, X_ori), axis=1)\n Y = np.array([[1],[0]])\n Y_decompose = np.zeros((no_of_training_examples,no_of_classes))\n for k in classes:\n for row in range(0,no_of_training_examples):\n if Y[row,0] == k:\n Y_decompose[row,k] = 1\n else:\n Y_decompose[row,k] = 0\n loss = compute_loss(X,Y,Y_decompose,W,classes,no_of_training_examples,no_of_input_features_with_b,no_of_classes,lemda)\n print(\"Loss Value:\",loss)\n\n\n\nif __name__== \"__main__\":\n main()\n","sub_path":"part1_final_final_final.py","file_name":"part1_final_final_final.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"653770580","text":"import scipy.optimize as scipy\nimport numpy as np\n\nA = [[1, 2, -500],\n [1, 1, -350],\n [-2, -1, 600],\n [1, 2, 0],\n [-1, -2, 0]]\n\nb = [0, 0, 0, 1, -1]\nc = [-3, -4, 0]\nres = scipy.linprog(c, A, b).x\nnp.set_printoptions(precision=3)\nnp.set_printoptions(suppress=True)\nprint(\"X result:\", res)\n\nx1 = res[0] / res[2]\nprint(\"A: {0:5.2f}\".format(x1))\n\nx2 = res[1] / res[2]\nprint(\"B: {0:5.2f}\".format(x2))\n","sub_path":"l10/l10z1.py","file_name":"l10z1.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"124097877","text":"import requests\nimport json\n\nDEVELOPER_KEY = \"AIzaSyBe1fYOrb73NXvMBtdNCSD6Cxi5-uhMVHw\"\nVIDEO_URL_BASE = \"https://www.googleapis.com/youtube/v3/search?part=snippet&type=video\"\nCOMMENT_URL_BASE = \"https://www.googleapis.com/youtube/v3/commentThreads?part=snippet\"\n\n\ndef build_url(baseurl, parameterlist):\n for parameter in parameterlist:\n baseurl = baseurl + \"&\" + parameter + \"=\" + parameterlist[parameter]\n print(\"URL was generated: \", baseurl)\n return baseurl\n\ndef get_response(url):\n response = requests.get(url)\n response_json = json.loads(response.text)\n return response_json\n\n# fieldarray = [[\"items\"],[\"id\",\"videoId\"]] --> json[\"items\"][\"id\"][\"videoId\"]\ndef get_desired_field(json, fieldarray):\n if len(fieldarray) is not 0:\n field = fieldarray[0]\n del fieldarray[0]\n return get_desired_field(json1[field], fieldarray)\n else:\n return json\n\ndef main():\n vidIds = []\n parameters_list = {\"key\": DEVELOPER_KEY, \"q\": \"La+La+Land\"}\n url = build_url(VIDEO_URL_BASE, parameters_list)\n r = get_response(url)\n \n number_of_pages_after_first = 0\n while True:\n for i in get_desired_field(r, [\"items\"]):\n vidIds.append(get_desired_field(i, [\"id\", \"videoId\"]))\n\n if \"nextPageToken\" not in r or number_of_pages_after_first is 10: # the value could be increased for more data\n break\n else:\n # get next search result json\n parameters_list = {\"key\": DEVELOPER_KEY, \"q\": \"La+La+Land\", \"pageToken\": r[\"nextPageToken\"]}\n url = build_url(VIDEO_URL_BASE, parameters_list)\n r = get_response(url)\n number_of_pages_after_first += 1\n fs = open(\"comments.txt\", \"w\")\n fs.write(\"Collected videos, total:{0} IDs:{1} \\n\\n\".format(5 * (number_of_pages_after_first + 1), vidIds))\n\n for vid_id in vidIds:\n parameters_list = {\"key\": DEVELOPER_KEY, \"videoId\": vid_id}\n url = build_url(COMMENT_URL_BASE, parameters_list)\n r = get_response(url)\n while True:\n if \"items\" in r: # if comments are disabled response does not have items but error\n for j in get_desired_field(r, [\"items\"]):\n fs.write(\"{0}{1}\".format(\n get_desired_field(j, [\"snippet\", \"topLevelComment\", \"snippet\", \"textDisplay\"]),\n \"\\n\\n\"))\n if \"nextPageToken\" not in r:\n break\n else:\n # get nex page of comment json\n parameters_list = {\"key\": DEVELOPER_KEY, \"videoId\": vid_id, \"pageToken\": r[\"nextPageToken\"]}\n url = build_url(COMMENT_URL_BASE, parameters_list)\n r = get_response(url)\n print(\"Done: \", vid_id)\n\n fs.close()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"ExtractData.py","file_name":"ExtractData.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"219884105","text":"import random\r\nimport time\r\n\r\ndef Order(player,n,order,i,computer,tern):\r\n \r\n if i>n-1:\r\n i=0\r\n while chckWiner(i,n):\r\n time.sleep(0.1)\r\n \r\n \r\n \r\n \r\n print(\"serve cards -> {}\".format(serv_cards))\r\n print(\"player{}->{}\".format(i,player[i]))\r\n if computer==1 and tern=='C':\r\n card_number=checkPresent(player,i,serv_cards,order,n,computer,tern)\r\n tern='H'\r\n elif computer==1 and tern=='H':\r\n card_number=int(input(\"Enter card no to serve\"))\r\n tern='C'\r\n else:\r\n card_number=int(input(\"Enter card no to serve\"))\r\n if card_number<0 or card_number>len(player[i])-1:\r\n print(\"invalid\")\r\n player[i].append(random.choice(list(final_desk)))\r\n \r\n elif len(serv_cards)==0 or checkMatchning(player[i][card_number],serv_cards[len(serv_cards)-1],player,n,order,i,card_number,computer,tern):\r\n serv_cards.append(player[i][card_number]) \r\n player[i].remove(player[i][card_number])\r\n print(\"card removed\")\r\n else:\r\n print(\"invalid\")\r\n player[i].append(random.choice(list(final_desk)))\r\n \r\n \r\n if order==0 or order==10:\r\n if i>=n-1:\r\n i=0\r\n else:\r\n i=i+1\r\n elif order==1:\r\n if i<=0:\r\n i=n-1\r\n else:\r\n i=i-1\r\n removeFromFinal(final_desk,serv_cards)\r\n else:\r\n print(\"Game Over\")\r\n \r\ndef chckWiner(i,n):\r\n if i<0:\r\n i=n+i\r\n print(len(player[i]))\r\n if len(player[i])==0:\r\n print(\"player {} win\".format(i))\r\n return False\r\n return True\r\n\r\n\r\ndef removeFromFinal(final_desk,serv_cards):\r\n final_desk=set(final_desk)-set(serv_cards)\r\n return final_desk \r\n \r\ndef checkPresent(player,i,serv_cards,order,n,computer,tern):\r\n for card1_no in range(len(player[i])):\r\n \r\n if len(serv_cards)==0 or (checkMatchning(player[i][card1_no],serv_cards[len(serv_cards)-1],player,n,order,i,card1_no,computer,tern)):\r\n if len(serv_cards)!=0:\r\n card1_no=checkAnotherOption(serv_cards[len(serv_cards)-1],card1_no,player[i])\r\n return card1_no\r\n return -1\r\n\r\n\r\n\r\ndef checkAnotherOption(serv_card,card_no,List):\r\n res0=serv_card.split('_')\r\n color=res0[0]\r\n for cno in range(len(List)):\r\n res=List[cno].split(\"_\")\r\n if res[0]==color and res[1].isdigit():\r\n card_no=cno\r\n return card_no\r\n \r\n\r\ndef chooseColor(player,i):\r\n color=[]\r\n for cr in player[i]:\r\n res2=cr.split('_')\r\n color.append(res2[0])\r\n cl=most_frequent(color)\r\n print(cl)\r\n return cl\r\n\r\ndef most_frequent(List): \r\n counter = 0\r\n color = List[0] \r\n \r\n for i in List: \r\n curr_frequency = List.count(i) \r\n if(curr_frequency> counter): \r\n counter = curr_frequency \r\n color = i \r\n return color\r\n \r\n\r\n\r\n \r\ndef checkMatchning(card1,card2,player,n,order,i,card_number,computer,tern):\r\n res1=card1.split('_')\r\n res2=card2.split('_')\r\n if res1 == res2:\r\n return True\r\n elif res1[0]==res2[0]:\r\n checkAction(res1,order,i,player,n,card_number,computer,tern)\r\n return True\r\n elif res1[1]==res2[1]:\r\n checkAction(res1,order,i,player,n,card_number,computer,tern)\r\n return True\r\n else:\r\n return False\r\n\r\n\r\n\r\n\r\ndef drawTwo(player,i,n):\r\n if i>=n-1:\r\n player[0].append(random.choice(list(final_desk)))\r\n player[0].append(random.choice(list(final_desk)))\r\n else:\r\n player[i].append(random.choice(list(final_desk)))\r\n player[i].append(random.choice(list(final_desk)))\r\n \r\n\r\n\r\ndef wild(i,card_number,computer,tern,player):\r\n print(\"i {0},card number {1},computer {2},term {3},player {4}\".format(i,card_number,computer,tern,player))\r\n if tern=='H':\r\n print(\"choose color:\")\r\n print(\" R->RED\")\r\n print(\" Y->YELLOW\")\r\n print(\" G->GREEN\")\r\n print(\" B->.BLUE\")\r\n color=input(\"Enter color\")\r\n else:\r\n color=chooseColor(player,i)\r\n print(\"color= {}\".format(color))\r\n player[i][card_number]='{}_wild'.format(color)\r\n\r\ndef checkAction(res1,order,i,player,n,card_number,computer,tern):\r\n if res1[1]==\"Reverse\" and (order==0 or order==10):\r\n if i==0:\r\n serv_cards.append(player[i][card_number])\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,1,n,computer,tern)\r\n else:\r\n serv_cards.append(player[i][card_number])\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,1,i-1,computer,tern)\r\n elif res1[1]==\"Reverse\" and order==1:\r\n if i==n:\r\n serv_cards.append(player[i][card_number])\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,0,0,computer,tern)\r\n else:\r\n Order(player,n,1,i+1,computer,tern)\r\n \r\n elif res1[1]==\"DrawTwo\" and (order==0 or order==10):\r\n drawTwo(player,i+1,n)\r\n elif res1[1]==\"DrawTwo\" and order==1:\r\n drawTwo(player,i-1,n)\r\n elif res1[1]==\"Wild\":\r\n res1[0]=wild(i,card_number,computer,tern,player)\r\n print(\"color {}\".format(res1[0]))\r\n elif res1[1]==\"WildDrawFour\":\r\n \r\n if order==0 or order==10:\r\n drawTwo(player,i+1,n)\r\n drawTwo(player,i+1,n)\r\n elif order==1:\r\n drawTwo(player,i-1,n)\r\n drawTwo(player,i-1,n)\r\n elif res1[1]==\"Skip\":\r\n if i==0 and (order==0 or order==10): \r\n player[i].remove(player[i][card_number])\r\n Order(player,n,order,i+2,computer,tern)\r\n elif i==0 and order==1:\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,order,n-1,computer,tern)\r\n elif i==n and order==1:\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,order,n-2,computer,tern)\r\n elif i==n and (order==0 or order==10):\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,order,1,computer,tern)\r\n elif order==0 or order==10:\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,order,i+2,computer,tern)\r\n else :\r\n player[i].remove(player[i][card_number])\r\n Order(player,n,order,i-2,computer,tern)\r\n return res1[0]\r\n\r\n \r\n \r\n \r\n \r\n\r\nred_desk=(['R_1','R_2','R_3','R_4','R_5','R_6','R_7','R_8','R_9','R_Reverse','R_DrawTwo','R_Wild','R_WildDrawFour','R_Skip'])\r\nyellow_desk=(['Y_1','Y_2','Y_3','Y_4','Y_5','Y_6','Y_7','Y_8','Y_9','Y_Reverse','Y_DrawTwo','Y_Wild','Y_WildDrawFour','Y_Skip'])\r\nGreen_desk=(['G_1','G_2','G_3','G_4','G_5','G_6','G_7','G_8','G_9','G_Reverse','G_DrawTwo','G_Wild','G_WildDrawFour','G_Skip'])\r\nblue_desk=(['B_1','B_2','B_3','B_4','B_5','B_6','B_7','B_8','B_9','B_Reverse','B_DrawTwo','B_Wild','B_WildDrawFour','B_Skip'])\r\n\r\nserv_cards=[]\r\nfinal_desk= red_desk+yellow_desk+Green_desk+blue_desk\r\nprint(\"1.Play with human\")\r\nprint(\"2.play with computer (only 2) \")\r\nchoice=int(input(\"Enter choice:\"))\r\nif choice==1:\r\n n=int(input(\"Enter no of player\"))\r\n no_of_cards=int(input(\"enter initial number of cards\"))\r\n player=[]\r\n for i in range(0,n):\r\n sample=random.sample(final_desk,no_of_cards)\r\n player.append(sample) \r\n \r\n Order(player,n,10,0,0,'H')\r\nelse:\r\n no_of_cards=int(input(\"enter initial number of cards\"))\r\n player=[]\r\n for i in range(0,2):\r\n final_desk=list(final_desk)\r\n sample=random.sample(final_desk,no_of_cards)\r\n final_desk=set(final_desk)-set(sample)\r\n player.append(list(sample)) \r\n Order(player,2,10,0,1,'C')\r\n \r\n \r\n\r\n ","sub_path":"Practics/UNO3.py","file_name":"UNO3.py","file_ext":"py","file_size_in_byte":7899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"500142308","text":"# import sklearn\nfrom sklearn.naive_bayes import GaussianNB\nimport numpy as np\nimport json\nimport random\n\ndef get_config(configfile):\n\tconfig = []\n\ttry:\n\t\tFile = open(configfile,'r')\n\t\tconfig = json.load(File)\n\texcept Exception as e:\n\t\tprint(str(e))\n\telse:\n\t\tFile.close()\n\treturn config\n\ndef get_matrix(config):\n\tmatrix = {}\n\ttry:\n\t\tFile = open(config['matrix'],'r')\n\t\tmatrix = json.load(File)\n\texcept Exception as e:\n\t\tprint(str(e))\n\telse:\n\t\tFile.close()\n\treturn matrix\n\ndef get_target(config, len):\n\ttar = []\n\tfor i in range(len):\n\t\ttar.append(random.randint(0,1))\n\treturn\ttar\n\nconfig = get_config('config.json')\nmatrix = get_matrix(config)\nfeatures = list()\ncount = 0\nfor file in matrix:\n\t# if count ==0:\n\t# \tprint([tag for tag in matrix[file]])\n\t# print([matrix[file][tag] for tag in matrix[file]])\n\tfeatures.append([matrix[file][tag] for tag in matrix[file]])\n\tcount+=1\n\ntarget = get_target(config, len(features))\n# print(target)\n# print(len(features))\n\narray_features = np.array(features)\nclassifier = GaussianNB()\nmodel = classifier.fit(array_features,target)\noutput_target = model.predict(array_features)\nc = 0\nfor i in range(len(target)):\n\tif(target[i] == output_target[i]):\n\t\tc+=1\nprint(c,len(target))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"ml-models/mlmodels.py","file_name":"mlmodels.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"425349246","text":"from chalice import Chalice\n\napp = Chalice(app_name='hackday-outage-banner-chalice')\nimport re\nfrom datetime import datetime\nimport boto3\n\nfrom urllib.parse import unquote\n\napp = Chalice(app_name='hackday-outage-banner-chalice')\n\ndef extractDates(message):\n startTimeMatch = re.search('Start: (\\d\\d:\\d\\d)', message)\n startDateMatch = re.search('Start: .+? on (.+? \\d\\d\\d\\d)', message)\n endTimeMatch = re.search('End: (\\d\\d:\\d\\d)', message)\n endDateMatch = re.search('End: .+? on (.+? \\d\\d\\d\\d)', message)\n\n startTime = startTimeMatch.group(1)\n startDate = startDateMatch.group(1)\n endTime = endTimeMatch.group(1)\n endDate = endDateMatch.group(1)\n\n return startTime, startDate, endTime, endDate\n\ndef populateTemplate(startTime, startDate, endTime, endDate):\n with open(\"chalicelib/example.md\", \"r\") as fin:\n try:\n with open(\"/tmp/example-updated.md\", \"w\") as fout:\n for line in fin:\n line = line.replace('{startTime}', startTime)\n line = line.replace('{startDate}', startDate)\n line = line.replace('{endTime}', endTime)\n line = line.replace('{endDate}', endDate)\n fout.write(line)\n except Exception as e:\n print (e)\n\ndef convertTimeToSimpleTime(time):\n return datetime.strptime(time, \"%H:%M\").strftime(\"%I:%M %p\")\n\ndef convertDateToSimpleDate(date):\n return datetime.strptime(date, \"%A %d %B %Y\").strftime(\"%Y-%m-%d\")\n\ndef uploadTemplate(startDate, endDate):\n s3 = boto3.resource('s3')\n \n try:\n # s3.Object('taxonline-stack-dev8-web', '/tmp/example-updated.md').put(Metadata={'foo': 'bar'})\n s3.meta.client.upload_file('/tmp/example-updated.md', 'myob-ex-taxonline-development-public', 'outage/arl-au/info.md',\\\n ExtraArgs={\n \"Metadata\": {\n \"datetime-format\": \"YYYY-MM-DD HH:mm:ss\",\n \"end-datetime-aest\": endDate,\n \"start-datetime-aest\": startDate,\n },\n # \"GrantFullControl\": \"id=3d631a4d7a1072068695276efb3870abd81efdf6c3ea1c2b6a3e161f4e35efd9\",\n \"CacheControl\": \"no-cache\",\n \"ContentType\": \"text/markdown\",\n 'ACL': 'public-read'\n },)\n except Exception as e:\n print (e)\n\ndef decode_query(raw_request):\n # print('RAW_REQUEST = ' + raw_request)\n starting_index = raw_request.index('&text=') + 6\n # print('starting_index', starting_index)\n ending_index = raw_request.index('&response_url')\n # print('ending_index', ending_index)\n return raw_request[starting_index:ending_index]\n\n@app.route('/', methods=['POST'], content_types=['application/x-www-form-urlencoded', 'application/json'])\ndef index():\n try:\n request = app.current_request\n message = unquote(decode_query(request.raw_body.decode(\"utf-8\"))).replace('+', ' ')\n print ('MESSAGE = ' + message)\n except Exception as e:\n print (e)\n startTime, startDate, endTime, endDate = extractDates(message)\n startTimeSimple, endTimeSimple = convertTimeToSimpleTime(startTime), convertTimeToSimpleTime(endTime)\n startDateSimple = convertDateToSimpleDate(startDate)\n endDateSimple = convertDateToSimpleDate(endDate)\n\n startDateTimeMetadata = startDateSimple + ' ' + startTime + ':00'\n endDateTimeMetadata = endDateSimple + ' ' + endTime + ':00'\n \n populateTemplate(startTimeSimple, startDate, endTimeSimple, endDate)\n uploadTemplate(startDateTimeMetadata, endDateTimeMetadata)\n \n return {'success': 'true'}\n\n# @app.route('/')\n# def index():\n# return {'hello': 'world'}\n\n# The view function above will return {\"hello\": \"world\"}\n# whenever you make an HTTP GET request to '/'.\n#\n# Here are a few more examples:\n#\n# @app.route('/hello/{name}')\n# def hello_name(name):\n# # '/hello/james' -> {\"hello\": \"james\"}\n# return {'hello': name}\n#\n# @app.route('/users', methods=['POST'])\n# def create_user():\n# # This is the JSON body the user sent in their POST request.\n# user_as_json = app.current_request.json_body\n# # We'll echo the json body back to the user in a 'user' key.\n# return {'user': user_as_json}\n#\n# See the README documentation for more examples.\n#\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"167949065","text":"from Crypto.Cipher import AES\r\nfrom random import randint\r\ndef xor_string(s1,s2):\r\n ans=b''\r\n if(type(s1)==type(\"\")):\r\n s1=s1.encode()\r\n if(type(s2)==type(\"\")):\r\n s2=s2.encode()\r\n for i in range(len(s1)):\r\n ans = ans + bytes([s1[i]^s2[i]])\r\n return ans\r\ndef random_key(size):\r\n ans=b''\r\n for i in range(size):\r\n byte = randint(0,255)\r\n ans = ans + bytes([byte])\r\n return ans\r\nkey = random_key(16)\r\naes = AES.new(key,AES.MODE_ECB)\r\ndef xor_bytes(s1,s2):\r\n ans=b''\r\n for i in range(len(s1)):\r\n ans=ans + bytes([s1[i]^s2[i]])\r\n return ans\r\nIV = random_key(16)\r\ndef AES_CBC_encrypt(m):\r\n c0 = cipher = IV\r\n ans = b''\r\n for i in range(0,len(m),16):\r\n c0 = cipher\r\n cipher = aes.encrypt(xor_bytes(c0,m[i:i+16]))\r\n ans = ans + cipher\r\n return ans\r\ndef AES_CBC_decrypt(c):\r\n ans = b''\r\n cipher = IV\r\n for i in range(0,len(c),16):\r\n c0 = cipher\r\n cipher = c[i:i+16]\r\n xored_against = aes.decrypt(cipher)\r\n ans = ans + xor_string(xored_against,c0)\r\n return ans\r\ndef oracle(m):\r\n prepend = b\"comment1=cooking%20MCs;userdata=\"\r\n append = b\";comment2=%20like%20a%20pound%20of%20bacon\"\r\n if(type(m)==type(\"\")):\r\n m = m.encode()\r\n m = m.replace(b\";\",b\"\\\";\\\"\")\r\n m = m.replace(b\"=\",b\"\\\"=\\\"\")\r\n m = prepend + m + append \r\n pad = 16 - len(m)%16\r\n if(pad!=16):\r\n m = m + bytes([pad]) * pad\r\n return AES_CBC_encrypt(m)\r\ndef is_admin(s):\r\n if(type(s)==type(\"\")):\r\n s = s.encode()\r\n plain = AES_CBC_decrypt(s)\r\n if(plain.find(b\";admin=True;\")!=-1):\r\n return True\r\n return False\r\ndef detect_block_size():\r\n j = 1\r\n state = 0\r\n state0 = 0\r\n while(True):\r\n state0 = state\r\n if(j==1):\r\n m = b'A' * j\r\n cipher = oracle(m)\r\n state = len(cipher)\r\n j = j + 1\r\n continue\r\n else:\r\n m = b'A' * j\r\n state = len(oracle(m))\r\n j = j + 1\r\n if(state0 != state):\r\n return state-state0\r\nblock_size = detect_block_size()\r\ndef attacker():\r\n attack = b\"A\" * 16 + b';dmi=ru;'\r\n cipher = oracle(attack)\r\n new_cipher = bytearray(cipher)\r\n new_cipher[34] = new_cipher[34] ^ ord(b'\"') ^ ord(b'a')\r\n new_cipher[38] = new_cipher[38] ^ ord(b'\"') ^ ord(b'n')\r\n new_cipher[40] = new_cipher[40] ^ ord(b'\"') ^ ord(b'T')\r\n new_cipher[43] = new_cipher[43] ^ ord(b'\"') ^ ord(b'e')\r\n new_cipher = bytes(new_cipher)\r\n return is_admin(new_cipher)\r\nprint(attacker())","sub_path":"2018csb1237_16.py","file_name":"2018csb1237_16.py","file_ext":"py","file_size_in_byte":2594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"262881413","text":"import numpy as np\nimport pprint\nimport sys\nif \"../\" not in sys.path:\n sys.path.append(\"../\") \nfrom lib.envs.gridworld import GridworldEnv\n#print(\"The sys path is %s\" %sys.path)\npp = pprint.PrettyPrinter(indent=2)\nenv = GridworldEnv()\nenv._render()\ndef policy_eval(policy, env, discount_factor=1.0, theta=0.00001):\n \"\"\"\n Evaluate a policy given an environment and a full description of the environment's dynamics.\n \n Args:\n policy: [S, A] shaped matrix representing the policy.\n env: OpenAI env. env.P represents the transition probabilities of the environment.\n env.P[s][a] is a (prob, next_state, reward, done) tuple.\n theta: We stop evaluation once our value function change is less than theta for all states.\n discount_factor: lambda discount factor.\n \n Returns:\n Vector of length env.nS representing the value function.\n \"\"\"\n # Start with a random (all 0) value function\n cnt = 0\n V = np.zeros(env.nS)\n VV = np.zeros(env.nS)\n \n while True:\n delta = 0\n if cnt <4:\n print(V.reshape(env.shape))\n cnt = cnt+1\n # For each state, perform a \"full backup\"\n for s in range(env.nS):\n v = 0\n # Look at the possible next actions\n #print(\"\")\n for a, action_prob in enumerate(policy[s]):\n # For each action, look at the possible next states...\n #print(\"111\")\n for prob, next_state, reward, done in env.P[s][a]:\n #print(prob)\n #print(done)\n # Calculate the expected value\n v += action_prob * prob * (reward + discount_factor * V[next_state])\n # How much our value function changed (across any states)\n delta = max(delta, np.abs(v - V[s]))\n VV[s] = v\n # Stop evaluating once our value function change is below a threshold\n V=VV\n if delta < theta:\n break\n return np.array(V)\n\n\n\nrandom_policy = np.ones([env.nS, env.nA]) / env.nA\nv = policy_eval(random_policy, env)\nprint(\"Value Function:\")\nprint(v)\nprint(\"\")\n\nprint(\"Reshaped Grid Value Function:\")\nprint(v.reshape(env.shape))\nprint(\"\")\n\nexpected_v = np.array([0, -14, -20, -22, -14, -18, -20, -20, -20, -20, -18, -14, -22, -20, -14, 0])\nnp.testing.assert_array_almost_equal(v, expected_v, decimal=2)\n","sub_path":"DPpy/policyevaluationsolution.py","file_name":"policyevaluationsolution.py","file_ext":"py","file_size_in_byte":2403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"369272635","text":"import geocoder\nimport time\nimport sys\nimport json\nimport oauth2\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.basemap import Basemap\nimport numpy as np\nfrom numpy import array\nimport matplotlib.colors as colors\n\nCONSUMER_KEY = \"YourConsumerKey\";\nCONSUMER_SECRET = \"YourConsumerSecret\";\nACCESS_TOKEN = \"YourAccessToken\";\nACCESS_SECRET = \"YourAccessSecret\";\n\ndef oauth_req(url, key, secret, http_method='GET', post_body='', http_headers=None):\n consumer = oauth2.Consumer(key=CONSUMER_KEY, secret=CONSUMER_SECRET)\n token = oauth2.Token(key=key, secret=secret)\n client = oauth2.Client(consumer, token)\n resp, content = client.request( url, method=http_method, body=post_body, headers=http_headers )\n return content\ndef call_followers_api(cursor,username):\n response = oauth_req('https://api.twitter.com/1.1/followers/ids.json?cursor={0}&screen_name={1}&count=5000'.format(cursor,username),ACCESS_TOKEN, ACCESS_SECRET)\n return response\ndef call_users_show_api_user_id(user_id):\n response = oauth_req('https://api.twitter.com/1.1/users/show.json?user_id={}'.format(user_id),ACCESS_TOKEN, ACCESS_SECRET)\n return response\ndef call_users_show_api_username(screen_name):\n response = oauth_req('https://api.twitter.com/1.1/users/show.json?screen_name={}'.format(screen_name),ACCESS_TOKEN, ACCESS_SECRET)\n return response\ndef geocode(place):\n return geocoder.arcgis('{0}'.format(place.encode('ascii','ignore')))\n #in next commit geocoder timeout will be handled too\ndef get_all_followers_ids(twitter_username):\n followers_list=list()\n cursor = -1\n followers = json.loads(call_followers_api(cursor,twitter_username))\n i = 0\n while(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Rate limit exceeded'):\n print('Sleeping for 5 min...')\n time.sleep(300)\n followers = json.loads(call_followers_api(cursor,twitter_username))\n print(followers)\n if(followers.has_key('errors') == True):\n print(str(followers['errors'][0]['message']))\n if(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Sorry, that page does not exist.'):\n print(\"err\")\n if (followers.has_key(\"ids\")):\n global total_followers_count \n total_followers_count = len(followers['ids'])\n next_cursor = followers['next_cursor']\n for id in followers['ids']:\n followers_list.append(id)\n i=i+1\n if(i%1000==0):\n print(i)\n while(next_cursor!=0):\n followers = json.loads(call_followers_api(next_cursor,twitter_username))\n while(followers.has_key('errors') == True and str(followers['errors'][0]['message'])=='Rate limit exceeded'):\n print('Sleeping for 5 min...')\n time.sleep(300)\n followers = json.loads(call_followers_api(cursor,twitter_username))\n print(followers)\n next_cursor = followers['next_cursor']\n for id in followers['ids']:\n followers_list.append(id)\n i=i+1\n if(i%1000==0):\n print(i)\n else:\n print(\"Profile private or non existant. Exit...\")\n sys.exit()\n return followers_list\n\ndef draw_basemap():\n m = Basemap(projection='mill',llcrnrlat=-90,urcrnrlat=90,llcrnrlon=-180,urcrnrlon=180,resolution='c')\n m.drawcoastlines()\n m.drawcountries()\n m.drawmapboundary()\n return m\ndef map_followers_hexbin_heatmap(username):\n lats=[]\n longs=[]\n for user_id in (get_all_followers_ids(username)): \n user_info = json.loads(call_users_show_api_user_id(user_id))\n user_xy = geocode(user_info['location'])\n if user_xy.lat is not None and user_xy.lng is not None:\n lats.append(user_xy.lat)\n longs.append(user_xy.lng)\n m = draw_basemap()\n x,y = m(longs, lats) \n m.hexbin(array(x), array(y), gridsize=30, mincnt=1, cmap='summer')\n m.colorbar(location='bottom')\n plt.title(\"{} out of {} followers successfully geocoded of {}\".format(len(lats),total_followers_count, username))\n plt.show()\n\ndef map_followers_gclines(username):\n m = draw_basemap()\n x=0\n try:\n user_loc = geocode((json.loads(call_users_show_api_username(username)))['location']) \n except:\n print(\"User's location can not be geocoded. Exiting...\")\n sys.exit()\n for user_id in (get_all_followers_ids(username)): \n user_info = json.loads(call_users_show_api_user_id(user_id))\n user_xy = geocode(user_info['location'])\n if user_xy.lat is not None and user_xy.lng is not None:\n m.drawgreatcircle(user_loc.lng,user_loc.lat, user_xy.lng, user_xy.lat,linewidth=2,color='b')\n x=x+1\n plt.title(\"{} out of {} followers successfully geocoded of {}\".format(x,total_followers_count, username))\n plt.show()\n#USAGE:\n#map_followers_hexbin_heatmap(\"TwitterUserName\")\n#map_followers_gclines(\"TwitterUsername\")\n\n","sub_path":"mapping_twitter_followers.py","file_name":"mapping_twitter_followers.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"539609751","text":"\r\nhashrate=dict();\r\nhashrate['timetravel']=88.2\t\t\t\t\t\t\t\t\t\t\t\t#tpruvot \r\nhashrate['tribus']=160 #tpruvot mar\r\nhashrate['cryptonight']=2450/pow(2.0,20) \t\t\t\t #tpruvot mar\r\nhashrate['neoscrypt']=3.8 #tpruvot mar\r\nhashrate['lyra2v2']=160 #alexis\r\nhashrate['skein']=2120 #alexis\r\nhashrate['myr-gr']=280 #alexis\r\nhashrate['groestl']=150 #tpruvot mar\r\nhashrate['spread']=29.0 #sp-spread\r\nhashrate['nist5']=195 #alexis\r\nhashrate['whirl']=187 #alexis\r\nhashrate['luffa']=1240 #tpruvot mar\r\nhashrate['penta']=475 #tpruvot mar\r\nhashrate['bastion']=47 #tpruvot mar\r\nhashrate['keccak']=3025 #alexis\r\nhashrate['x15']=36 #alexis\r\nhashrate['skein2']=1810 #alexis\r\nhashrate['lbry']=1182 #alexis\r\nhashrate['yescrypt']=0\r\nhashrate['xevan']=14\r\nhashrate['lyra2z']=6.25 #tpruvot mar\r\nhashrate['eqhash']=1718/pow(2.0,21); #ewbf sol/s->h/s\r\nhashrate['ethash']=105; #claymore\r\nhashrate['decred-dual']=1485/pow(2.0,20) #claymore\r\nhashrate['sia-dual']=2300 #claymore\r\nhashrate['sia']=7000 #alexis\r\nhashrate['decred']=11060/pow(2.0,20) #alexis\r\nhashrate['skunk']=100 #tpruvot mar\r\nhashrate['hmq1725']=16.8 #tpruvot mar\r\nhashrate['x11evo']=61.77 #alexis\r\nhashrate['blake2s']=15000 #alexis\r\nhashrate['x17']=45 #alexis\r\nhashrate['c11']=64\t\t\t #alexis adjusting for chaincoin coin distribution\r\nhashrate['sib']=48 #alexis\r\nhashrate['veltor']=140 #alexis\r\nhashrate['m7']=0\r\nhashrate['pascal']=3880\r\nhashrate['bitcore']=58 #tpruvot\r\nhashrate['hsr']=35\t\t\t\t\t\t\t#tpruvot\r\n\r\n\r\n#Wallet miners (algo,stratum,user,pass)\r\nalexis='../ccminer-alexis-oct2016/ccminer -a %s -o %s -u %s -p %s'\r\ntpruvot='../ccminer-tpruvot-mar2017/ccminer -a %s -o %s -u %s -p %s'\r\nspread='wine ../ccminer-sp-spread9/spreadminer.exe %.0s -o %s -u %s -p %s'\r\n\r\nI_wallet_mine=list();\r\nI_wallet_mine.append(['log','skein2',alexis]);\r\nI_wallet_mine.append(['aur','myr-gr',alexis]);\r\nI_wallet_mine.append(['aur','skein',alexis]);\r\nI_wallet_mine.append(['spr','spread',spread,' -x 30']);\r\nI_wallet_mine.append(['chc','c11',alexis]);\r\nI_wallet_mine.append(['xlr','nist5',alexis]);\r\nI_wallet_mine.append(['vlt','veltor',alexis]);\r\nI_wallet_mine.append(['boat','hmq1725',tpruvot]);\r\nI_wallet_mine.append(['j','keccak',alexis]);\r\n\r\n\r\n#Pool miners\r\neth_pool='us1.ethermine.org:4444'\r\ndcr_pool='stratum+tcp://yiimp.ccminer.org:3252'\r\nsia_pool='stratum+tcp://sia-us-east1.nanopool.org:7777'\r\nzec_pool='us1-zcash.flypool.org --port 13333'\r\n\r\neth_addr='0xd1fede8eaa0e3c4e2f1ebcd069dc6f8abe9abf2f.x'\r\ndcr_addr='DsmJdGd8JtQNijg66JhUbC6BNwLVZnoUWnJ';\r\nsia_addr='9ff06bc95bcf5d409514f9ae20a73b63653ce0c5f7fabef043fe749a8013905b1a91e1131830/x';\r\nzec_addr='t1TDtxcYDrrWXsWHvr2s8LxFEsFCuCQzYeW.x'\r\n\r\ni_decred=42;\r\ni_sia=70;\r\n","sub_path":"config_1070.py","file_name":"config_1070.py","file_ext":"py","file_size_in_byte":4156,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"433599390","text":"'''\r\nCreated on Nov 11, 2019\r\n\r\n@author: Aice\r\n'''\r\n\r\nimport pyodbc,importlib,random,time\r\nfrom datetime import datetime\r\n\r\ncard_collection = importlib.import_module(\"card\")\r\n\r\ndef get_player(entity):\r\n if hasattr(entity,'owner'):\r\n return entity.owner\r\n elif hasattr(entity,'hero_power'):\r\n return entity\r\n else:\r\n return None\r\n \r\ndef cleaned(name):\r\n return name.replace(\" \",\"_\").replace(\"-\",\"_\").replace(\"'\",\"\").replace(\":\",\"\").replace(\"!\",\"\").replace(\"+\",\"\").replace(\".\",\"\").replace(\",\",\"\").replace(\"(\",\"\").replace(\")\",\"\").replace('\"',\"\")\r\n\r\ndef get_connection():\r\n#Hide sensitive information in a configuration file, and read from it\r\n config_file=open(\"database.config\",\"r\")\r\n params=[]\r\n for line in config_file:\r\n params.append(line.split(\"=\")[1].rstrip(\"\\n\"))\r\n driver,server,database,uid,pwd = params[0],params[1],params[2],params[3],params[4]\r\n \r\n #Create database connection with parameters read from the configuration file\r\n conn = pyodbc.connect(driver=driver,\r\n server=server,\r\n database=database,\r\n uid=uid,\r\n pwd=pwd)\r\n \r\n return conn\r\n\r\n\r\n\r\ndef create_tables():\r\n \r\n conn=get_connection()\r\n cursor = conn.cursor()\r\n \r\n # execute SQL query\r\n cursor.execute(\"IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='card_meta' and xtype='U')\\\r\n CREATE TABLE card_meta (\\\r\n card_name nvarchar (50) NOT NULL,\\\r\n type nvarchar (20),\\\r\n class nvarchar (20),\\\r\n race nvarchar (50),\\\r\n cardset nvarchar (50),\\\r\n rarity nvarchar (20),\\\r\n cost int,\\\r\n attack int,\\\r\n health int,\\\r\n durability int,\\\r\n craft_cost int,\\\r\n disenchant_cost int,\\\r\n artist nvarchar(50) ,\\\r\n card_text ntext,\\\r\n back_text ntext,\\\r\n lore ntext,\\\r\n PRIMARY KEY (card_name))\")\r\n \r\n cursor.execute(\"IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='card_abilities' and xtype='U')\\\r\n CREATE TABLE card_abilities (\\\r\n card_name nvarchar (50) NOT NULL,\\\r\n ability nvarchar (50),\\\r\n FOREIGN KEY (card_name) REFERENCES card_meta (card_name))\")\r\n \r\n cursor.execute(\"IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='card_tags' and xtype='U')\\\r\n CREATE TABLE card_tags (\\\r\n card_name nvarchar (50) NOT NULL,\\\r\n tag nvarchar (50),\\\r\n FOREIGN KEY (card_name) REFERENCES card_meta (card_name))\")\r\n \r\n cursor.execute(\"IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='players' and xtype='U')\\\r\n CREATE TABLE players (\\\r\n username varchar (20) NOT NULL,\\\r\n password varchar (20) NOT NULL,\\\r\n current_hero varchar (20),\\\r\n deck text,\\\r\n db_username varchar (20),\\\r\n PRIMARY KEY (username))\")\r\n \r\n cursor.execute(\"IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='events' and xtype='U')\\\r\n CREATE TABLE [dbo].[events](\\\r\n [eventID] int IDENTITY(100000,1) PRIMARY KEY,\\\r\n [sessionID] int NULL,\\\r\n [player] [nvarchar](50) NULL,\\\r\n [event] [nvarchar](200) NULL,\\\r\n [register_time] [datetime2](7) NULL,\\\r\n [resolve_time] [datetime2](7) NULL,\\\r\n [event_type] [nvarchar](50) NULL)\")\r\n \r\n cursor.execute(\"IF NOT EXISTS (SELECT * FROM sysobjects WHERE name='matches' and xtype='U')\\\r\n CREATE TABLE [dbo].[matches](\\\r\n [sessionID] int IDENTITY(100000,1) PRIMARY KEY,\\\r\n [player1] [nvarchar](50) NULL,\\\r\n [player2] [nvarchar](50) NULL,\\\r\n [queue_in_time] [datetime2](7) NULL,\\\r\n [match_time] [datetime2](7) NULL,\\\r\n [random_seed] int NULL,\\\r\n [player1_hand_str] text NULL,\\\r\n [player2_hand_str] text NULL,\\\r\n [player1_deck_str] text NULL,\\\r\n [player2_deck_str] text NULL)\")\r\n \r\n # make the change persistent\r\n cursor.commit()\r\n \r\n #close connection after using it\r\n conn.close()\r\n\r\ndef insert_card(card_name=\"\",card_type=\"\",card_class=\"Neutral\",race=\"\",cardset=\"Basic\",rarity=\"Common\",\\\r\n cost=0,attack=0,health=0,durability=0,\\\r\n craft_cost=-1,disenchant_cost=-1,artist=\"\",card_text=\"\",back_text=\"\",lore=\"\",play_format=\"\"):\r\n \r\n conn=get_connection()\r\n cursor = conn.cursor()\r\n \r\n SQL_TEMPLATE=\"INSERT INTO [Hearth].[dbo].[card_meta]\\\r\n (card_name,type,class,race,cardset,rarity,cost,attack,health,durability,\\\r\n craft_cost,disenchant_cost,artist,card_text,back_text,lore,format)\\\r\n VALUES ('$card_name$','$type$','$class$','$race$','$cardset$','$rarity$',\\\r\n $cost$,$attack$,$health$,$durability$,$craft_cost$,\\\r\n $disenchant_cost$,'$artist$','$card_text$','$back_text$','$lore$','$format$')\"\r\n \r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$card_name$\", card_name.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$type$\", card_type)\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$class$\", card_class)\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$race$\", race)\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$cardset$\", cardset.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$rarity$\", rarity)\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$cost$\", str(cost))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$attack$\", str(attack))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$health$\", str(health))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$durability$\", str(durability))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$craft_cost$\", str(craft_cost))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$disenchant_cost$\", str(disenchant_cost))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$artist$\", artist.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$card_text$\", card_text.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$back_text$\", back_text.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$lore$\", lore.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$format$\", play_format)\r\n \r\n print(SQL_TEMPLATE)\r\n cursor.execute(SQL_TEMPLATE)\r\n \r\n cursor.commit()\r\n conn.close()\r\n\r\ndef insert_ability(card_name,ability): \r\n \r\n conn=get_connection()\r\n cursor = conn.cursor() \r\n \r\n SQL_TEMPLATE=\"INSERT INTO [Hearth].[dbo].[card_abilities] VALUES ('$card_name$','$ability$')\"\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$card_name$\", card_name.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$ability$\", ability.replace(\"'\",\"''\"))\r\n \r\n print(SQL_TEMPLATE)\r\n cursor.execute(SQL_TEMPLATE)\r\n \r\n cursor.commit()\r\n conn.close()\r\n\r\ndef insert_tag(card_name,tag): \r\n \r\n conn=get_connection()\r\n cursor = conn.cursor() \r\n \r\n SQL_TEMPLATE=\"INSERT INTO [Hearth].[dbo].[card_tags] VALUES ('$card_name$','$tag$')\"\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$card_name$\", card_name.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$tag$\", tag.replace(\"'\",\"''\"))\r\n \r\n print(SQL_TEMPLATE)\r\n cursor.execute(SQL_TEMPLATE)\r\n \r\n cursor.commit()\r\n conn.close()\r\n\r\ndef get_card_metadata(card_name): \r\n metadata=[]\r\n \r\n conn=get_connection()\r\n cursor = conn.cursor() \r\n \r\n SQL_TEMPLATE=\"SELECT [card_name],[type],[class],[race],[cardset],[rarity],[cost],[attack],[health],[durability],\\\r\n [craft_cost],[disenchant_cost],[artist],[card_text],[back_text],[lore]\\\r\n FROM [Hearth].[dbo].[card_meta] WHERE [card_name]='$name$'\"\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$name$\", card_name.replace(\"'\",\"''\"))\r\n cursor.execute(SQL_TEMPLATE)\r\n results=cursor.fetchall()\r\n \r\n if len(results)>0:\r\n metadata.extend(results[0])\r\n \r\n abilities=[]\r\n SQL_TEMPLATE=\"SELECT [ability] FROM [Hearth].[dbo].[card_abilities] WHERE [card_name]='$name$'\"\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$name$\", card_name.replace(\"'\",\"''\"))\r\n cursor.execute(SQL_TEMPLATE)\r\n results=cursor.fetchall()\r\n for ability in results:\r\n abilities.append(ability[0].strip())\r\n metadata.append(abilities)\r\n \r\n tags=[]\r\n SQL_TEMPLATE=\"SELECT [tag] FROM [Hearth].[dbo].[card_tags] WHERE [card_name]='$name$'\"\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$name$\", card_name.replace(\"'\",\"''\"))\r\n cursor.execute(SQL_TEMPLATE)\r\n results=cursor.fetchall()\r\n for tag in results:\r\n tags.append(tag[0].strip())\r\n metadata.append(tags)\r\n\r\n else:\r\n metadata = [card_name,\"\",\"Neutral\",\"\",\"Basic\",\"Common\",1,1,1,1,-1,-1,\"\",\"\",\"\",\"\",[],[]]\r\n \r\n conn.close()\r\n \r\n return metadata\r\n\r\ndef check_username_in_database(username):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT * FROM [Hearth].[dbo].[players] WHERE [username]='\"+username+\"' AND [db_username]=CURRENT_USER\")\r\n result1 = len(cursor.fetchall())>0\r\n \r\n cursor.execute(\"SELECT * FROM [Hearth].[dbo].[players] WHERE [username]='\"+username+\"' AND [db_username]!=CURRENT_USER\")\r\n result2 = len(cursor.fetchall())>0\r\n \r\n conn.close()\r\n \r\n return result1,result2\r\n\r\ndef get_user_info(username):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT [username],[password],[current_hero],[deck] FROM [Hearth].[dbo].[players] WHERE [username]='\"+username+\"'\")\r\n player_name, password, current_hero, deck= cursor.fetchone()\r\n conn.close()\r\n \r\n return player_name, password, current_hero, deck\r\n\r\ndef create_new_player(username,password):#Create game level logins\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n encrypted_password=encrypt(password)\r\n SQL=\"INSERT INTO [Hearth].[dbo].[players] VALUES ('\"+username+\"','\"+encrypted_password+\"',\"+\"null\"+\",\"+\"null\"+\",CURRENT_USER)\"\r\n #print(SQL)\r\n cursor.execute(SQL)\r\n print(\"Your player info has been successfully created and stored into database.\\n\")\r\n conn.commit()\r\n conn.close()\r\n\r\ndef create_login(username,stid):#Create database level logins\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n password=\"A@\"+stid\r\n cursor.execute(\"IF NOT EXISTS (SELECT [loginname] FROM master.dbo.syslogins WHERE [name]='\"+username+\"') CREATE LOGIN [\"+username+\"] WITH PASSWORD = '\"+password+\"'\")\r\n cursor.execute(\"USE [Hearth]\")\r\n cursor.execute(\"IF NOT EXISTS (SELECT [name] FROM sys.database_principals WHERE [name]='\"+username+\"') CREATE USER [\"+username+\"] FOR LOGIN [\"+username+\"]\")\r\n print(\"Login [\"+username+\"] is created \\n\")\r\n conn.commit()\r\n conn.close()\r\n \r\ndef grant_permission(player_db_name):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n\r\n cursor.execute(\"GRANT SELECT ON [Hearth].[dbo].[card_meta] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT SELECT ON [Hearth].[dbo].[card_tags] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT SELECT ON [Hearth].[dbo].[card_abilities] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT SELECT ON [Hearth].[dbo].[events] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT INSERT ON [Hearth].[dbo].[events] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT UPDATE ON [Hearth].[dbo].[events] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT SELECT ON [Hearth].[dbo].[matches] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT INSERT ON [Hearth].[dbo].[matches] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT UPDATE ON [Hearth].[dbo].[matches] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT SELECT ON [Hearth].[dbo].[players] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT INSERT ON [Hearth].[dbo].[players] TO [\"+player_db_name+\"]\")\r\n cursor.execute(\"GRANT UPDATE ON [Hearth].[dbo].[players] TO [\"+player_db_name+\"]\")\r\n \r\n print(player_db_name+\" permissions processed.\\n\")\r\n conn.commit()\r\n conn.close()\r\n \r\ndef add_card_to_draft(player,card):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT deck from [Hearth].[dbo].[players] WHERE [username]='\"+player.player_name+\"'\")\r\n deck_str=cursor.fetchone()\r\n if deck_str[0] is not None:\r\n new_deck_str=deck_str[0]+\";\"+card.name\r\n else:\r\n new_deck_str=card.name\r\n \r\n cursor.execute(\"UPDATE [Hearth].[dbo].[players] SET [deck]='\"+new_deck_str.replace(\"'\",\"''\")+\"' WHERE [username]='\"+player.player_name+\"'\")\r\n conn.commit()\r\n conn.close()\r\n\r\ndef update_current_hero(player):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"UPDATE [Hearth].[dbo].[players] SET [current_hero]='\"+player.hero_name.replace(\"'\",\"''\")+\"' WHERE [username]='\"+player.player_name+\"'\")\r\n conn.commit()\r\n conn.close()\r\n\r\ndef update_winner(player):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"UPDATE [Hearth].[dbo].[matches] SET [winner]='\"+player.name+\"' WHERE [sessionID]='\"+str(player.board.sessionID)+\"'\")\r\n conn.commit()\r\n conn.close()\r\n \r\ndef get_random_cards(filter_str=\"[cost]>=0\",owner=\"\",k=3,standard=True,by_ability=False):\r\n \r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n \r\n SQL_TEMPLATE = \"SELECT [card_name],[class] FROM [Hearth].[dbo].[card_meta] WHERE (\"+filter_str+\")\"\r\n if by_ability:\r\n SQL_TEMPLATE = \"SELECT [Hearth].[dbo].[card_meta].[card_name],[Hearth].[dbo].[card_meta].[class] FROM [Hearth].[dbo].[card_meta],[Hearth].[dbo].[card_abilities] \\\r\n WHERE [Hearth].[dbo].[card_meta].[card_name]=[Hearth].[dbo].[card_abilities].[card_name]\\\r\n AND (\"+filter_str+\") AND TRIM([ability])='\"+by_ability+\"'\"\r\n if standard:\r\n SQL_TEMPLATE=SQL_TEMPLATE+\" AND [format]='standard'\"\r\n\r\n print(SQL_TEMPLATE)\r\n cursor.execute(SQL_TEMPLATE)\r\n results = cursor.fetchall()\r\n refined_results = adjust_class_cards(results)\r\n card_names=random.sample(refined_results,k=min(k,len(refined_results)))\r\n cards=[]\r\n for card_name in card_names:\r\n try:\r\n card=getattr(card_collection,cleaned(card_name[0]))(owner=owner)\r\n except:\r\n card=getattr(card_collection,cleaned(\"Wisp\"))(owner=owner)\r\n cards.append(card)\r\n conn.commit()\r\n conn.close()\r\n \r\n return cards\r\n\r\ndef adjust_class_cards(results):\r\n refined_results=[]\r\n neutral_cards=[]\r\n class_cards=[]\r\n for result in results:\r\n card_name=result[0]\r\n card_class=result[1]\r\n \r\n if card_class==\"Neutral\":\r\n neutral_cards.append((card_name,card_class))\r\n else:\r\n class_cards.append((card_name,card_class))\r\n \r\n '''if card_class!=\"Neutral\" or random.uniform(0,1)>class_chance:\r\n refined_results.append((card_name,card_class))'''\r\n\r\n #return refined_results \r\n return class_cards+random.sample(neutral_cards,k=min(max(10,len(class_cards)),len(neutral_cards)))\r\n \r\n \r\ndef set_standard(card_name,uncollectible_flag=False):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n SQL_TEMPLATE = \"UPDATE [Hearth].[dbo].[card_meta] SET [format]='$FORMAT$' WHERE [card_name]='$CARD_NAME$'\"\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$CARD_NAME$\",card_name.replace(\"'\",\"''\"))\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$FORMAT$\",{True:'uncollectible',False:'standard'}[uncollectible_flag])\r\n try:\r\n cursor.execute(SQL_TEMPLATE)\r\n except:\r\n print(SQL_TEMPLATE)\r\n conn.commit()\r\n conn.close()\r\n\r\ndef search_opponent(player):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n if player.vsAI:\r\n sessionID,seed=queue_in(player)\r\n opponent_name=\"AI\"\r\n cursor.execute(\"UPDATE [Hearth].[dbo].[matches] SET [match_time]='\"+str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))+\"' WHERE [sessionID]='\"+str(sessionID)+\"'\") \r\n cursor.execute(\"UPDATE [Hearth].[dbo].[matches] SET [player2]='AI' WHERE [sessionID]='\"+str(sessionID)+\"'\") \r\n else:\r\n SQL_TEMPLATE = \"SELECT TOP (1) [player1],[random_seed],[sessionID] FROM [Hearth].[dbo].[matches] WHERE [player2] is NULL AND [player1]!='\"+player.name+\"' AND datediff(SECOND, [queue_in_time], CURRENT_TIMESTAMP)<14460 ORDER BY [queue_in_time] DESC\"\r\n results=[]\r\n try:\r\n cursor.execute(SQL_TEMPLATE)\r\n results = cursor.fetchall() \r\n except:\r\n print(SQL_TEMPLATE)\r\n \r\n if len(results)>0:\r\n opponent_name,seed,sessionID=results[0][0],results[0][1],results[0][2] \r\n cursor.execute(\"UPDATE [Hearth].[dbo].[matches] SET [match_time]='\"+str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))+\"' WHERE [sessionID]='\"+str(sessionID)+\"'\") \r\n cursor.execute(\"UPDATE [Hearth].[dbo].[matches] SET [player2]='\"+player.name+\"' WHERE [sessionID]='\"+str(sessionID)+\"'\") \r\n else:\r\n sessionID,seed=queue_in(player)\r\n FOUND = False\r\n timer=0.0\r\n while not FOUND:\r\n time.sleep(5)\r\n timer+=5.0\r\n cursor.execute(\"SELECT TOP (1) [player2],[random_seed],[sessionID] FROM [Hearth].[dbo].[matches] WHERE [player1]='\"+player.name+\"' AND [player2] IS NOT NULL AND [sessionID]='\"+str(sessionID)+\"'\")\r\n matches=cursor.fetchall()\r\n if len(matches)>0:\r\n FOUND=True\r\n opponent_name,seed,sessionID=matches[0][0],matches[0][1],matches[0][2]\r\n elif timer>60:\r\n print(\"Updating queue\\n\")\r\n sessionID,seed=queue_in(player) #Requeue\r\n timer=0.0\r\n else:\r\n print(\"Still searching opponent (will only match player who queued in within 60 seconds)\")\r\n \r\n conn.commit()\r\n conn.close() \r\n \r\n return opponent_name,seed,sessionID\r\n \r\ndef queue_in(player):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n \r\n SQL_TEMPLATE = \"INSERT INTO [Hearth].[dbo].[matches] ([player1],[player2],[queue_in_time],[random_seed]) VALUES ('$PLAYER1$',NULL,'$QUEUE_TIME$',$SEED$)\"\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$PLAYER1$\",player.name)\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$QUEUE_TIME$\",str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n SQL_TEMPLATE = SQL_TEMPLATE.replace(\"$SEED$\",str(random.randint(1,100)))\r\n \r\n try:\r\n cursor.execute(SQL_TEMPLATE) \r\n except:\r\n print(SQL_TEMPLATE)\r\n \r\n cursor.execute(\"SELECT TOP (1) [sessionID],[random_seed] FROM [Hearth].[dbo].[matches] WHERE [player1]='\"+player.name+\"' ORDER BY [queue_in_time] DESC\") \r\n result=cursor.fetchone()\r\n sessionID=result[0]\r\n seed=result[1]\r\n \r\n conn.commit()\r\n conn.close()\r\n \r\n return sessionID,seed\r\n\r\ndef update_player_starting_cards(player):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT [player1] FROM [Hearth].[dbo].[matches] WHERE [sessionID]='\"+str(player.board.sessionID)+\"'\")\r\n player1_name=cursor.fetchone()[0]\r\n player_no={True:1,False:2}[player1_name==player.name]\r\n\r\n SQL_TEMPLATE1 = \"UPDATE [Hearth].[dbo].[matches] SET [player\"+str(player_no)+\"_hand_str]='$HAND_STR$' WHERE [sessionID]='$SESSION_ID$'\"\r\n SQL_TEMPLATE2 = \"UPDATE [Hearth].[dbo].[matches] SET [player\"+str(player_no)+\"_deck_str]='$DECK_STR$' WHERE [sessionID]='$SESSION_ID$'\"\r\n \r\n hand_str=\"\"\r\n for card in player.hand:\r\n hand_str+=card.name+\";\"\r\n \r\n deck_str=\"\"\r\n for card in player.deck.cards:\r\n deck_str+=card.name+\";\"\r\n \r\n SQL_TEMPLATE1=SQL_TEMPLATE1.replace(\"$HAND_STR$\",hand_str.replace(\"'\",\"''\").rstrip(\";\"))\r\n SQL_TEMPLATE1=SQL_TEMPLATE1.replace(\"$SESSION_ID$\",str(player.board.sessionID))\r\n SQL_TEMPLATE2=SQL_TEMPLATE2.replace(\"$DECK_STR$\",deck_str.replace(\"'\",\"''\").rstrip(\";\"))\r\n SQL_TEMPLATE2=SQL_TEMPLATE2.replace(\"$SESSION_ID$\",str(player.board.sessionID))\r\n \r\n try:\r\n cursor.execute(SQL_TEMPLATE1)\r\n cursor.execute(SQL_TEMPLATE2)\r\n except:\r\n print(SQL_TEMPLATE1)\r\n print(SQL_TEMPLATE2)\r\n \r\n conn.commit()\r\n conn.close() \r\n\r\ndef encrypt(password):\r\n enc=\"\"\r\n for c in password:\r\n enc+=chr(ord(c)+21)\r\n enc.replace(\"'\",'\"')\r\n return enc[::-1]\r\n\r\ndef decrypt(password):\r\n enc=\"\"\r\n for c in password:\r\n enc+=chr(ord(c)-21)\r\n return enc[::-1]\r\n\r\ndef str_to_cards(name,player):\r\n card_names = name.split(\";\")\r\n cards=[]\r\n for card_name in card_names:\r\n card=getattr(card_collection,cleaned(card_name))(owner=player)\r\n cards.append(card)\r\n return cards\r\n \r\ndef synchronize_opponent_starting_cards(player):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n cursor.execute(\"SELECT [player1] FROM [Hearth].[dbo].[matches] WHERE [sessionID]='\"+str(player.board.sessionID)+\"'\")\r\n player1_name=cursor.fetchone()[0]\r\n player_no={True:1,False:2}[player1_name==player.name]\r\n\r\n FOUND=False\r\n while not FOUND:\r\n cursor.execute(\"SELECT [player\"+str(player_no)+\"_hand_str],[player\"+str(player_no)+\"_deck_str] FROM [Hearth].[dbo].[matches] WHERE [sessionID]='\"+str(player.board.sessionID)+\"'\")\r\n results=cursor.fetchall()\r\n if len(results)>0 and results[0][0] is not None:\r\n FOUND=True\r\n hand_str,deck_str=results[0]\r\n elif player.name==\"AI\":#Copy player hand and deck for AI\r\n FOUND=True\r\n cursor.execute(\"SELECT [player\"+str(3-player_no)+\"_hand_str],[player\"+str(3-player_no)+\"_deck_str] FROM [Hearth].[dbo].[matches] WHERE [sessionID]='\"+str(player.board.sessionID)+\"'\")\r\n results=cursor.fetchall()\r\n hand_str,deck_str=results[0]\r\n else:\r\n print(\"Waiting for opponent mulligan\")\r\n time.sleep(5)\r\n \r\n\r\n for card in str_to_cards(hand_str,player):\r\n player.add_hand(card)\r\n \r\n player.deck.cards=[]\r\n for card in str_to_cards(deck_str,player):\r\n player.deck.add_card(card,randomize=False)\r\n\r\n conn.close() \r\n \r\n \r\ndef get_events(player,event_type=\"play\"):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n SQL_TEMPLATE = \"SELECT [eventID],[event] FROM [Hearth].[dbo].[events] WHERE [sessionID]='\"+str(player.board.sessionID)+\"' AND [player]='\"+player.name+\"' AND [resolve_time] is NULL AND [event_type]='\"+event_type+\"' ORDER BY [register_time] ASC\"\r\n try:\r\n cursor.execute(SQL_TEMPLATE)\r\n results = cursor.fetchall()\r\n\r\n except:\r\n print(SQL_TEMPLATE)\r\n \r\n conn.close() \r\n \r\n return results \r\n\r\ndef insert_event(player,entity,target,event_pos,event_type=\"play\",instant_resolve=False):\r\n source_str=player.name+\":\"+str(int(player.mouse_move_length))+\":\" #Record mouse length when turn ends\r\n target_str=\"::\"\r\n if entity is not None:\r\n if isinstance(entity,str):#Record mouse length and emote choice when turn ends\r\n source_str=player.name+\":\"+str(int(player.mouse_move_length))+\":\"+entity\r\n else:\r\n source_str=str(get_player(entity).name)+\":\"+entity.get_area()+\":\"+str(entity.get_index())\r\n if target is not None:\r\n if target.__class__==int:\r\n target_str=\"::\"+str(target)\r\n else:\r\n target_str=str(get_player(target).name)+\":\"+target.get_area()+\":\"+str(target.get_index())\r\n event_pos_str=str(event_pos[0])+\":\"+str(event_pos[1])\r\n event_str=source_str+\";\"+target_str+\";\"+event_pos_str\r\n \r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n SQL_TEMPLATE = \"INSERT INTO [Hearth].[dbo].[events] ([sessionID],[player],[event],[register_time],[resolve_time],[event_type])\\\r\n VALUES ('$SESSION_ID$','$PLAYER_NAME$','$EVENT$','$REG_TIME$',$RES_TIME$,'$EVENT_TYPE$')\"\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$SESSION_ID$\",str(player.board.sessionID))\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$PLAYER_NAME$\",player.name)\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$EVENT$\",event_str)\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$REG_TIME$\",str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n if instant_resolve:\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$RES_TIME$\",\"'\"+str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"))+\"'\")\r\n else:\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$RES_TIME$\",\"NULL\")\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$EVENT_TYPE$\",event_type)\r\n\r\n try:\r\n cursor.execute(SQL_TEMPLATE)\r\n except:\r\n print(SQL_TEMPLATE)\r\n conn.commit()\r\n conn.close()\r\n\r\ndef resolve_event(eventID):\r\n conn = get_connection()\r\n cursor = conn.cursor()\r\n SQL_TEMPLATE = \"UPDATE [Hearth].[dbo].[events] SET [resolve_time]='$RES_TIME$' WHERE [eventID]='$EVENT_ID$'\"\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$EVENT_ID$\",str(eventID))\r\n SQL_TEMPLATE=SQL_TEMPLATE.replace(\"$RES_TIME$\",str(datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")))\r\n\r\n try:\r\n cursor.execute(SQL_TEMPLATE)\r\n except:\r\n print(SQL_TEMPLATE)\r\n conn.commit()\r\n conn.close()\r\n \r\nif __name__ == '__main__':\r\n create_tables() \r\n \r\n try:\r\n '''Insert Coin not crawled from gamepedia'''\r\n \r\n insert_card(\"The Coin\",\"Spell\",\"\",\"\",\"Basic\",\"\",0,0,0,0,-1,-1,\"Ben Thompson\",\"Gain 1 Mana Crystal this turn only.\",\"\",\"\",'uncollectible')\r\n \r\n '''Insert Basic Hero Powers not crawled from gamepedia'''\r\n \r\n insert_card(\"Armor Up!\",\"Hero Power\",\"Warrior\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Efrem Palacios\",\"Hero Power Gain 2 Armor.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Armor Up!\",\"Gain Armor\")\r\n \r\n insert_card(\"Dagger Mastery\",\"Hero Power\",\"Rogue\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Dave Allsop\",\"Hero Power Equip a 1/2 Dagger.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Dagger Mastery\",\"Equip\")\r\n insert_tag(\"Dagger Mastery\",\"Weapon-generating\")\r\n \r\n insert_card(\"Demon Claws\",\"Hero Power\",\"Demon Hunter\",\"\",\"Basic\",\"\",1,0,0,0,-1,-1,\"Virtuos\",\"Hero Power +1 Attack this turn.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Demon Claws\",\"Increment attribute\")\r\n \r\n insert_card(\"Fireblast\",\"Hero Power\",\"Mage\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Jim Nelson\",\"Hero Power Deal 1 damage.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Fireblast\",\"Deal damage\")\r\n insert_tag(\"Fireblast\",\"Targeted\")\r\n \r\n insert_card(\"Lesser Heal\",\"Hero Power\",\"Priest\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Cos Koniotis\",\"Hero Power Restore 2 Health.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Lesser Heal\",\"Restore Health\")\r\n insert_tag(\"Lesser Heal\",\"Targeted\")\r\n \r\n insert_card(\"Life Tap\",\"Hero Power\",\"Warlock\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Luca Zontini\",\"Hero Power +1 Attack this turn.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Life Tap\",\"Deal damage\")\r\n insert_ability(\"Life Tap\",\"Draw cards\")\r\n \r\n insert_card(\"Reinforce\",\"Hero Power\",\"Paladin\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Theodore Park\",\"Hero Power Summon a 1/1 Silver Hand Recruit.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Reinforce\",\"Summon\")\r\n insert_tag(\"Reinforce\",\"Recruit-generating\") \r\n \r\n insert_card(\"Shapeshift\",\"Hero Power\",\"Druid\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Aleksi Briclot\",\"Hero Power +1 Attack this turn. +1 Armor.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Shapeshift\",\"Increment attribute\")\r\n insert_ability(\"Shapeshift\",\"Gain Armor\") \r\n \r\n insert_card(\"Steady Shot\",\"Hero Power\",\"Hunter\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Glenn Rane\",\"Hero Power Deal 2 damage to the enemy hero.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Steady Shot\",\"Deal damage\")\r\n \r\n insert_card(\"Totemic Call\",\"Hero Power\",\"Shaman\",\"\",\"Basic\",\"\",2,0,0,0,-1,-1,\"Massive Black\",\"Hero Power Summon a random Totem.\",\"\",\"\",'uncollectible')\r\n insert_ability(\"Totemic Call\",\"Summon\")\r\n insert_tag(\"Totemic Call\",\"Random\") \r\n insert_tag(\"Totemic Call\",\"Spell Damage-generating\") \r\n insert_tag(\"Totemic Call\",\"Taunt-generating\") \r\n insert_tag(\"Totemic Call\",\"Totem-generating\") \r\n except:\r\n pass\r\n '''Sample \r\n insert_card(\"Shattered Sun Cleric\",\"Minion\",\"Neutral\",\"\",\"Basic\",\"Common\",3,3,2,0,-1,-1,\"Doug Alexander\",\"Battlecry: Give a friendly minion +1/+1.\",\"They always have a spare flask of Sunwell Energy Drink!\",\"The Shattered Sun Offensive is an army of blood elf and draenei priests, paladins, magi and warriors rallied by the naaru to combat Kael'thas Sunstrider's mad bid to use the Sunwell as a portal to summon his master, Kil'jaeden. Their operations are centered around the Isle of Quel'Danas, where the Sunwell is located. The Offensive is the culmination of a plea for unity between the draenei priests of the Aldor and the blood elf wizards of the Scryers in an effort to prevent Kil'jaeden's summoning, which would result in the end of all life on Azeroth.\")\r\n insert_ability(\"Shattered Sun Cleric\",\"Battlecry\")\r\n insert_ability(\"Shattered Sun Cleric\",\"Increment Attribute\")\r\n insert_tag(\"Shattered Sun Cleric\",\"Targeted\")\r\n '''\r\n insert_card(\"Sheep (Mean Streets of Gadgetzan)\",\"Minion\",\"Mage, Priest, Warlock\",\"Beast\",\"Mean Streets of Gadgetzan\",\"Common\",1,1,1,0,-1,-1,\"Evegeniy Zagumennyy\",\"\",\"\")","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":29735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"621942200","text":"from __future__ import division, absolute_import\r\nimport math\r\nfrom attr import attr, attributes, Factory\r\nfrom .. import conf\r\nfrom ..helpers import near_equal\r\nfrom ..vector3d import Vector3D, Heading3D\r\n\r\nfrom .space_object import SpaceObject\r\n\r\n@attributes(hash=False)\r\nclass Missile(SpaceObject):\r\n\ttable = 'missiles'\r\n\ttype_name = \"Missile\"\r\n\tmass = attr(default=0.0)\r\n\tfuel = attr(default=0)\r\n\tfuel_efficiency = attr(default=0.0)\r\n\tthrust = attr(default=0)\r\n\tangular_velocity = attr(default=0.0)\r\n\tdamage_range = attr(default=1.0)\r\n\tdamage = attr(default=0)\r\n\tfall_off = attr(default=1.0)\r\n\tmax_velocity = attr(default=300000)\r\n\ttarget = attr(default=None)\r\n\theading = attr(default=Factory(lambda: Vector3D(Heading3D(0, 0))))\r\n\tdesired_heading = attr(default=Factory(lambda: Vector3D(Heading3D(0, 0))))\r\n\tlaunched_from = attr(default=None)\r\n\tdestination = attr(default=None)\r\n\thas_exploded = attr(default=False)\r\n\taccelerating = attr(default=True)\r\n\r\n\r\n\t@classmethod\r\n\tdef display_name(cls):\r\n\t\treturn \"Missile\"\r\n\r\n\tdef cycle(self):\r\n\t\tdestination = self.destination\r\n\t\ttravelling_vector = destination - self.location\r\n\t\ttravelling_distance = travelling_vector.length\r\n\t\tif travelling_distance < self.damage_range:\r\n\t\t\tself.explode()\r\n\t\t\treturn\r\n\t\tacceleration = self.thrust / self.mass\r\n\t\tcurrent_speed = self.velocity.length\r\n\t\tif self.desired_heading != travelling_vector.normalized:\r\n\t\t\theading_x_y = travelling_vector.heading_xy\r\n\t\t\theading_z = travelling_vector.heading_z\r\n\t\t\tself.desired_heading = Vector3D(Heading3D(heading_x_y, heading_z))\r\n\t\ttry:\r\n\t\t\ttime_to_stop = current_speed / acceleration + 1.0\r\n\t\texcept ZeroDivisionError:\r\n\t\t\treturn\r\n\t\tself.cycle_thrusters()\r\n\t\tif current_speed > travelling_distance:\r\n\t\t\tself.velocity = self.destination - self.location\r\n\t\telse:\r\n\t\t\tself.cycle_engines()\r\n\t\tsuper(Missile, self).cycle()\r\n\r\n\tdef cycle_thrusters(self):\r\n\t\tif self.heading == self.desired_heading:\r\n\t\t\treturn\r\n\t\tangular_accel = self.angular_velocity\r\n\t\tif not self.mass:\r\n\t\t\treturn\r\n\t\tangular_accel /= math.sqrt(self.mass)\r\n\t\thead_xy = self.heading.heading_xy\r\n\t\thead_z = self.heading.heading_z\r\n\t\tdest_xy = self.desired_heading.heading_xy\r\n\t\tdest_z = self.desired_heading.heading_z\r\n\t\tif head_xy > dest_xy:\r\n\t\t\tif (head_xy - dest_xy) < 180:\r\n\t\t\t\thead_xy -= angular_accel\r\n\t\t\t\tif head_xy < dest_xy:\r\n\t\t\t\t\thead_xy = dest_xy\r\n\t\t\telse:\r\n\t\t\t\thead_xy += angular_accel\r\n\t\t\t\tif head_xy >= 360:\r\n\t\t\t\t\thead_xy -= 360\r\n\t\t\t\t\tif head_xy > dest_xy:\r\n\t\t\t\t\t\thead_xy = dest_xy\r\n\t\telif head_xy < dest_xy:\r\n\t\t\tif (dest_xy - head_xy) < 180:\r\n\t\t\t\thead_xy += angular_accel\r\n\t\t\t\tif head_xy > dest_xy:\r\n\t\t\t\t\thead_xy = dest_xy\r\n\t\t\telse:\r\n\t\t\t\thead_xy -= angular_accel\r\n\t\t\t\tif head_xy < 0:\r\n\t\t\t\t\thead_xy += 360\r\n\t\t\t\t\tif head_xy < dest_xy:\r\n\t\t\t\t\t\thead_xy = dest_xy\r\n\t\tif head_z > dest_z:\r\n\t\t\tif (head_z - dest_z) < 180:\r\n\t\t\t\thead_z -= angular_accel\r\n\t\t\t\tif head_z < dest_z:\r\n\t\t\t\t\thead_z = dest_z\r\n\t\t\telse:\r\n\t\t\t\thead_z += angular_accel\r\n\t\t\t\tif head_z >= 360:\r\n\t\t\t\t\thead_z -= 360\r\n\t\t\t\t\tif head_z > dest_z:\r\n\t\t\t\t\t\thead_z = dest_z\r\n\t\telif head_z < dest_z:\r\n\t\t\tif (dest_z - head_z) < 180:\r\n\t\t\t\thead_z += angular_accel\r\n\t\t\t\tif head_z > dest_z:\r\n\t\t\t\t\thead_z = dest_z\r\n\t\t\telse:\r\n\t\t\t\thead_z -= angular_accel\r\n\t\t\t\tif head_z < 0:\r\n\t\t\t\t\thead_z += 360\r\n\t\t\t\t\tif head_z < dest_z:\r\n\t\t\t\t\t\thead_z = dest_z\r\n\t\tself.heading = Vector3D(Heading3D(head_xy, head_z))\r\n\r\n\tdef cycle_engines(self):\r\n\t\tif not self.accelerating:\r\n\t\t\treturn\r\n\t\tif not self.velocity and not self.max_velocity:\r\n\t\t\tif self.heading == self.desired_heading:\r\n\t\t\t\tself.accelerating = False\r\n\t\t\t\treturn\r\n\t\tcurrent_speed = self.velocity.length\r\n\t\tif self.velocity.normalized == self.heading and near_equal(current_speed, self.max_velocity, 0.001) and self.max_velocity != 0:\r\n\t\t\t#Desired speed and heading achieved.\r\n\t\t\tself.accelerating = False\r\n\t\t\treturn\r\n\t\tfuel_to_consume = self.thrust * self.fuel_efficiency\r\n\t\tif fuel_to_consume > self.fuel:\r\n\t\t\tself.accelerating = False\r\n\t\t\treturn\r\n\t\tself.fuel -= fuel_to_consume\r\n\t\tif conf.revert_thrust > 0.0:\r\n\t\t\tif self.max_velocity < (-self.max_velocity * conf.revert_thrust):\r\n\t\t\t\tself.max_velocity = -self.max_velocity * conf.revert_thrust\r\n\t\ttotal_thrust = self.thrust\r\n\t\ttotal_mass = self.mass\r\n\t\tif not total_thrust:\r\n\t\t\tself.accelerating = False\r\n\t\t\treturn\r\n\t\tif not total_mass:\r\n\t\t\t#Bail or we're gonna divide by 0.\r\n\t\t\treturn\r\n\t\tacceleration = total_thrust / total_mass\r\n\t\tif self.max_velocity < 0:\r\n\t\t\tif self.velocity.length <= -self.max_velocity:\r\n\t\t\t\treverse_heading = Vector3D()\r\n\t\t\t\treverse_heading -= self.heading\r\n\t\t\t\tself.velocity += reverse_heading * acceleration\r\n\t\t\tif self.velocity.length > -(self.max_velocity - acceleration):\r\n\t\t\t\tself.velocity = self.velocity.normalized * (self.velocity.length - acceleration)\r\n\t\t\telif self.velocity.length > self.max_velocity:\r\n\t\t\t\tself.velocity = self.velocity.normalized * -self.max_velocity\r\n\t\telse:\r\n\t\t\tif self.velocity.length <= self.max_velocity:\r\n\t\t\t\tself.velocity += self.heading * acceleration\r\n\t\t\tif self.velocity.length > (self.max_velocity + acceleration):\r\n\t\t\t\tnew_direction = self.velocity + (self.heading * acceleration)\r\n\t\t\t\tself.velocity = new_direction.normalized * (self.velocity.length - acceleration)\r\n\t\t\telif self.velocity.length > self.max_velocity:\r\n\t\t\t\tnew_direction = self.velocity + (self.heading * acceleration)\r\n\t\t\t\tself.velocity = new_direction.normalized * self.max_velocity\r\n\r\n\tdef explode(self):\r\n\t\tif self.has_exploded: # Avoid an ugly infinite loop if there's more than one missile\r\n\t\t\treturn\r\n\t\tself.has_exploded = True\r\n\t\tfor obj in self.universe.objects_around(self.location, self.damage_range):\r\n\t\t\tif obj is not self and hasattr(obj, 'handle_weapon_impact'):\r\n\t\t\t\tdistance = (self.location - obj.location).length\r\n\t\t\t\tdamage = self.damage - self.fall_off * distance # should do something fancier\r\n\t\t\t\tobj.handle_weapon_impact(damage, self)\r\n\t\tself.universe.remove(self)\r\n\r\n\tdef handle_weapon_impact(self, a_damage, a_weapon):\r\n\t\tif a_weapon is self:\r\n\t\t\treturn\r\n\t\tself.explode()\r\n","sub_path":"pyspace/space_objects/missile.py","file_name":"missile.py","file_ext":"py","file_size_in_byte":5969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"617340545","text":"\n### Sum of digits for a numbers\n\n\nmyvar=int(input(\"Enter your number: \"))\nmyvar_l=len(str(myvar))\ndsum=0\n\ndef dsum_f(n, dsum):\n while (n>0):\n dsum+=(myvar%(10**(n))//10**(n-1))\n n-=1\n return dsum\n\nprint(dsum_f(myvar_l, dsum))\n","sub_path":"Basic programs/Digit sum calculator/File.py","file_name":"File.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"259614527","text":"# imports\nimport pickle\nimport argparse\nimport json\n\ndef create_results_dict(inp_file):\n results_dict = {}\n \n results = open(inp_file, \"r\")\n for line in results:\n \"\"\"\n Create a results dictionary\n \"\"\"\n splits = line.strip().split(\"=\")\n results_dict[splits[0]] = json.loads(splits[1].replace(\"'\", \"\\\"\"))\n\n return results_dict\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Create pickle of all the pre-computed query results.')\n parser.add_argument('-qik_scene_match', default=\"pre_constructed_data/QIK_Scene_Match_Pre_Results_Dict.txt\", metavar='data', help='QIK video retrieval results, ranked using scene matching', required=False)\n parser.add_argument('-qik_lcs', default=\"pre_constructed_data/QIK_LCS_Pre_Results_Dict.txt\", metavar='data', help='QIK video retrieval results, ranked using LCS', required=False)\n parser.add_argument('-qik_ted_and_lcs', default=\"pre_constructed_data/QIK_LCS_And_TED_Pre_Results_Dict.txt\", metavar='data', help='QIK video retrieval results, ranked using LCS and TED', required=False)\n parser.add_argument('-qik_scene_match_ted_and_lcs', default=\"pre_constructed_data/QIK_Scene_Match_LCS_And_TED_Pre_Results_Dict.txt\", metavar='data', help='QIK video retrieval results, ranked using scene matching, LCS and TED', required=False)\n parser.add_argument('-dns_0', default=\"pre_constructed_data/DnS_0_Pre_Results_Dict.txt\", metavar='data', help='DnS results, with re-ranking percentage set a 0', required=False) # This is the results from a hijacked execution\n parser.add_argument('-dns_0_5', default=\"pre_constructed_data/DnS_0_5_Pre_Results_Dict.txt\", metavar='data', help='DnS results, with re-ranking percentage set a 0.5', required=False)\n parser.add_argument('-dns_1', default=\"pre_constructed_data/DnS_1_Pre_Results_Dict.txt\", metavar='data', help='DnS results, with re-ranking percentage set a 1', required=False)\n parser.add_argument('-csq', default=\"pre_constructed_data/CSQ_Pre_Results_Dict.txt\", metavar='data', help='CSQ results', required=False)\n parser.add_argument('-out', default=\"pre_constructed_data/Video_Retrieval_Results.pkl\", metavar='data', help='Pickled results file.', required=False)\n args = parser.parse_args()\n\n # Create the results dictionary\n qik_scene_match_results_dict = create_results_dict(args.qik_scene_match)\n qik_lcs_results_dict = create_results_dict(args.qik_lcs)\n qik_ted_and_lcs_results_dict = create_results_dict(args.qik_ted_and_lcs)\n qik_scene_match_ted_and_lcs_results_dict = create_results_dict(args.qik_scene_match_ted_and_lcs)\n dns_0_results_dict = create_results_dict(args.dns_0)\n dns_0_5_results_dict = create_results_dict(args.dns_0_5)\n dns_1_results_dict = create_results_dict(args.dns_1) \n csq_results_dict = create_results_dict(args.csq)\n \n # Combining all the results.\n results_dict = {}\n for video in qik_ted_and_lcs_results_dict:\n try:\n results_dict[video] = {**qik_scene_match_results_dict[video], **qik_lcs_results_dict[video], **qik_ted_and_lcs_results_dict[video], **qik_scene_match_ted_and_lcs_results_dict[video], **dns_0_results_dict[video], **dns_0_5_results_dict[video], **dns_1_results_dict[video], **csq_results_dict[video]}\n except:\n print(\"Video not present\")\n\n # Converting the results to a pickle file.\n with open(args.out, \"wb\") as f:\n pickle.dump(results_dict, f)\n \n","sub_path":"QIK-Videos/Evaluate/create_results_pickle.py","file_name":"create_results_pickle.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"531032331","text":"\ndef main():\n a=int(input(\"enter a number:\"))\n i=1\n \n sum=0\n for i in range(1,a):\n if(a%i==0):\n sum=sum+i\n if(sum==a):\n print(\"perfect\")\n else:\n print(\"not perefect\") \n \n\nif __name__==\"__main__\":main()\n\n","sub_path":"perfect.py","file_name":"perfect.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"575580839","text":"#!/usr/bin/python3\nimport csv\nimport ipaddress\nimport argparse\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.DEBUG, format='%(message)s')\n\nparser = argparse.ArgumentParser()\nparser.add_argument('ip', nargs='+')\nargs = parser.parse_args()\n\nrotten_ips = {}\nnetworks = []\n\n\ndef get_ip_list_from_dump():\n with open('dump.csv', 'r', errors='ignore') as csv_file:\n has_header = csv.Sniffer().has_header(csv_file.readline())\n csv_file.seek(0) # Rewind\n if has_header:\n next(csv_file, None) # Skip header row\n reader = csv.reader(csv_file, delimiter=';')\n for row in reader:\n for ip_address in row[0].split('|'):\n ip_address = ip_address.strip()\n if not ip_address:\n continue\n try:\n rotten_ips[ip2long(ip_address)] = ip_address\n except ValueError:\n networks.append(ipaddress.ip_network(ip_address))\n\n\ndef ip2long(ip_as_str):\n return int(ipaddress.ip_address(ip_as_str))\n\n\ndef contains_in_single_list(ip_str):\n ip_long = ip2long(ip_str)\n if ip_long in rotten_ips:\n return rotten_ips[ip_long]\n return None\n\n\ndef contains_in_networks_list(ip_str):\n ip_address = ipaddress.ip_address(ip_str)\n for network in networks:\n if ip_address in network:\n return network\n return None\n\n\ndef check_ip(ip_str):\n result = contains_in_single_list(ip_str) or contains_in_networks_list(ip_str)\n if result:\n logger.info('%s - BAD (%s)', ip_str, result)\n else:\n logger.info('%s - GOOD', ip_str)\n\n\nif __name__ == '__main__':\n get_ip_list_from_dump()\n logger.debug('single ip addresses size = %s', len(rotten_ips))\n logger.debug('networks size = %s', len(networks))\n for ip in args.ip:\n check_ip(ip)\n","sub_path":"rotten.py","file_name":"rotten.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"133086418","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport socket\nimport sys\nimport logging\nimport time\n\n\nsock = None\nmc_host = '127.0.0.1'\nmc_port = 11211\nst_host = '127.0.0.1'\nst_port = 8125\n\n\nap = argparse.ArgumentParser()\nap.add_argument('-m', '--memcached', help='memcached server host[:port]', required=True)\nap.add_argument('-n', '--name', help='node name, example: n1368', required=True)\nap.add_argument('-s', '--statsd', help='statsd server host[:port]', required=True)\n\noptions = ap.parse_args()\n\n_ = options.memcached.split(':')\n\nif len(_) == 1:\n mc_host = _[0]\nelse:\n mc_host, mc_port = _\n\n_ = options.statsd.split(':')\n\nif len(_) == 1:\n st_host = _[0]\nelse:\n st_host, st_port = _\n\n\ndef send_metric(name, value):\n global st_host, st_port\n _ = name % value\n msg = _.encode('ascii')\n print(msg)\n\n # sock = socket.socket(socket.AF_INET,\n # socket.SOCK_DGRAM | socket.SOCK_CLOEXEC)\n # sock.sendto(msg, (st_host, st_port))\n\n\ndef conn():\n global sock, mc_host, mc_port\n sock = None\n sock = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM | socket.SOCK_CLOEXEC)\n try:\n sock.connect((mc_host, mc_port))\n except Exception as err:\n logging.exception('')\n time.sleep(2)\n conn()\n\n\nmonitor_items = [\n 'curr_connections',\n 'get_cmd',\n 'set_cmd',\n 'get_hits',\n 'get_misses',\n 'bytes_read',\n 'bytes_written',\n 'accepting_conns',\n 'bytes',\n 'curr_items',\n 'total_items',\n 'evictions'\n]\n\n\n\nconn()\n\n_ = b'stats\\n'\nsock.send(_)\n\na = sock.recv(10240)\nlines = a.decode('ascii').split('\\r\\n')\n\nfor line in lines:\n _ = line.split(' ')\n if len(_) != 3:\n continue\n __, k, v = _\n\n if k in monitor_items:\n name = 'cache.memcached.%s.%s' % (options.name, k) + ':%s|g'\n value = v\n send_metric(name, value)\n","sub_path":"python/udp/memcached.py","file_name":"memcached.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"218240703","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom numpy import mean,pi,cos,sin,sqrt,tan,arctan2,exp,dot,array,log,inf, eye, zeros, ones, arange,reshape,concatenate,diag\r\nfrom matplotlib.pyplot import *\r\nfrom numpy.random import uniform as rand\r\nfrom numpy.random import randn as randn\r\nfrom numpy.linalg import inv, det, norm, eig\r\nfrom scipy.linalg import sqrtm,expm,norm,block_diag\r\nfrom scipy.signal import place_poles\r\nfrom mpl_toolkits.mplot3d import Axes3D\r\nimport random\r\n\r\nimport numpy.random as rnd\r\nfrom matplotlib.patches import Ellipse,Rectangle,Circle, Wedge, Polygon\r\n\r\nfrom matplotlib.collections import PatchCollection\r\n\r\n\r\n# Unicode https://en.wikipedia.org/wiki/List_of_Unicode_characters\r\n# for instance to get θ : shift + ctr + U03B8 \r\n# U+03B1 α alpha; U+03B2 β beta; U+03B3;\t Gamma \t0419; U+03B4 δ Delta;\r\n#U+03B5 Epsilon; U+03B6 Zeta; U+03B7 Eta; U+03B8 θ Theta;\r\n#U+03BB Lambda; U+03BC Mu; U+03BD Nu; U+03BE Xi; U+03C0 Pi; U+03C1 Rho;\r\n# U+03C3 Sigma; U+03C4 Tau; U+03C6 φ Phi; U+03C8 ψ Psi; U+03C9 Omega\r\n# U+0393 Γ\r\n\r\n\r\n\r\ndef eulermat(φ,θ,ψ):\r\n Ad_i = array([[0, 0, 0],[0,0,-1],[0,1,0]])\r\n Ad_j = array([[0,0,1],[0,0,0],[-1,0,0]])\r\n Ad_k = array([[0,-1,0],[1,0,0],[0,0,0]])\r\n M = expm(ψ*Ad_k) @ expm(θ*Ad_j) @ expm(φ*Ad_i)\r\n return(M) \r\n \r\ndef move_motif(M,x,y,θ):\r\n M1=ones((1,len(M[1,:])))\r\n M2=concatenate((M, M1), axis=0)\r\n R = array([[cos(θ),-sin(θ),x], [sin(θ),cos(θ),y]])\r\n return(R @ M2) \r\n \r\n \r\ndef draw_tank(x):\r\n x=x.flatten()\r\n M = array([[1,-1,0,0,-1,-1,0,0,-1,1,0,0,3,3,0], [-2,-2,-2,-1,-1,1,1,2,2,2,2,1,0.5,-0.5,-1]])\r\n M=move_motif(M,x[0],x[1],x[2])\r\n plot(M[0],M[1],\"darkblue\",2)\r\n \r\n \r\n\r\ndef draw_ellipse(c,Γ,η,ax,col): # Gaussian confidence ellipse with artist\r\n #draw_ellipse_artist(array([[1],[2]]),eye(2),0.9,ax,[1,0.8-0.3*i,0.8-0.3*i])\r\n if (norm(Γ)==0):\r\n Γ=Γ+0.001*eye(len(Γ[1,:]))\r\n A=sqrtm(-2*log(1-η)*Γ) \r\n w, v = eig(A) \r\n v1=array([[v[0,0]],[v[1,0]]])\r\n v2=array([[v[0,1]],[v[1,1]]]) \r\n f1=A @ v1\r\n f2=A @ v2 \r\n phi = (arctan2(v1 [1,0],v1[0,0]))\r\n alpha=phi*180/3.14\r\n e = Ellipse(xy=c, width=2*norm(f1), height=2*norm(f2), angle=alpha) \r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.7)\r\n e.set_facecolor(col)\r\n \r\n \r\n\r\ndef draw_disk(c,d,ax,col): \r\n #draw_disk(array([[1],[2]]),0.5,ax,\"blue\")\r\n e = Ellipse(xy=c, width=2*d, height=2*d, angle=0) \r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.7)\r\n e.set_facecolor(col)\r\n \r\n\r\ndef draw_box(x1,x2,y1,y2,ax,col): \r\n c=array([[x1],[y1]]) \r\n rect = Rectangle(c, width=x2-x1, height=y2-y1, angle=0)\r\n rect.set_facecolor(array([0.4,0.3,0.6])) \r\n ax.add_patch(rect)\r\n rect.set_clip_box(ax.bbox)\r\n rect.set_alpha(0.7)\r\n rect.set_facecolor(col) \r\n\r\ndef draw_polygon(P,ax,col): \r\n patches = [] \r\n patches.append(Polygon(P, True)) \r\n p = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.4, color=col)\r\n ax.add_collection(p)\r\n\r\n \r\n \r\n \t\r\ndef draw_car(x):\r\n x=x.flatten();\r\n M = array([ [-1, 4, 5, 5, 4, -1, -1, -1, 0, 0, -1, 1, 0, 0, -1, 1, 0, 0, 3, 3, 3], \r\n [-2, -2, -1, 1, 2, 2, -2, -2, -2, -3, -3, -3, -3, 3, 3, 3, 3, 2, 2, 3, -3],])\r\n \r\n M=move_motif(M,x[0],x[1],x[2])\r\n plot(M[0],M[1],\"blue\",2)\r\n \r\n W = array([[-1, 1], [0, 0]]) #Front Wheel \r\n# Wr = move2Dmat(x[0],x[1],x[2]) @ move2Dmat(3,3,x[4]) @ W\r\n Wr=move_motif(W,3,3,x[4])\r\n Wr=move_motif(Wr,x[0],x[1],x[2])\r\n\r\n\r\n# Wl = move2Dmat(x[0],x[1],x[2]) @ move2Dmat(3,-3,x[4]) @ W\r\n Wl=move_motif(W,3,-3,x[4])\r\n Wl=move_motif(Wl,x[0],x[1],x[2])\r\n\r\n plot(Wr[0, :], Wr[1, :], 'magenta', linewidth = 2)\r\n plot(Wl[0, :], Wl[1, :], 'magenta', linewidth = 2) \r\n\r\n\r\ndef tondarray(M):\r\n if type(M)==float:\r\n return array([[M]])\r\n elif type(M)==int:\r\n return array([[M]]) \r\n else:\r\n return M \r\n\r\n\r\n\r\ndef mvnrnd2(x,G): \r\n n=len(x)\r\n x1=x.reshape(n)\r\n y = np.random.multivariate_normal(x1,G).reshape(n,1)\r\n return(y) \r\n\r\ndef mvnrnd1(G):\r\n G=tondarray(G)\r\n n=len(G)\r\n x=array([[0]] * n)\r\n return(mvnrnd2(x,G)) \r\n \r\n\r\ndef kalman_predict(xup,Gup,u,Γα,A):\r\n Γ1 = A @ Gup @ A.T + Γα\r\n x1 = A @ xup + u \r\n return(x1,Γ1) \r\n\r\ndef kalman_correc(x0,Γ0,y,Γβ,C):\r\n S = C @ Γ0 @ C.T + Γβ \r\n K = Γ0 @ C.T @ inv(S) \r\n ytilde = y - C @ x0 \r\n Gup = (eye(len(x0))-K @ C) @ Γ0 \r\n xup = x0 + K@ytilde\r\n return(xup,Gup) \r\n \r\ndef kalman(x0,Γ0,u,y,Γα,Γβ,A,C):\r\n xup,Gup = kalman_correc(x0,Γ0,y,Γβ,C)\r\n x1,Γ1=kalman_predict(xup,Gup,u,Γα,A)\r\n return(x1,Γ1) \r\n\r\n \r\ndef demo_draw(): \r\n fig = figure(0)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n ax.set_xlim(-10, 10)\r\n ax.set_ylim(-10, 10)\r\n\r\n \r\n\r\n\r\n \r\n c=array([[5],[0]])\r\n e = Ellipse(xy=c, width=13.0, height=2.0, angle=45) \r\n ax.add_artist(e)\r\n e.set_clip_box(ax.bbox)\r\n e.set_alpha(0.9)\r\n e.set_facecolor(array([0.7,0.3,0.6])) \r\n \r\n rect = Rectangle( (1,1), width=5, height=3)\r\n rect.set_facecolor(array([0.4,0.3,0.6])) \r\n ax.add_patch(rect) \r\n \r\n pause(0.2) \r\n draw_tank(array([[-7],[5],[1]]))\r\n \r\n draw_car(array([[1],[2],[3],[4],[0.5]])) \r\n \r\n c = array([[-2],[-3]])\r\n G = array([[2,-1],[-1,4]])\r\n draw_ellipse(c,G,0.9,ax,[0.8,0.8,1])\r\n P=array([[5,-3],[9,-10],[7,-4],[7,-6]])\r\n draw_polygon(P,ax,'green')\r\n show() # only at the end. Otherwize, it closes the figure in a terminal mode\r\n\r\n\r\n\r\ndef demo_animation(): \r\n fig = figure(0)\r\n ax = fig.add_subplot(111, aspect='equal')\r\n for t in arange(0,5,0.1) :\r\n pause(0.01) #needed. Otherwize, draws only at the end \r\n cla()\r\n ax.set_xlim(-15,15)\r\n ax.set_ylim(-15,15)\r\n draw_car(array([[t],[2],[3+t],[4],[5+t]])) \r\n c = array([[-2+2*t],[-3]])\r\n G = array([[2+t,-1],[-1,4+t]])\r\n draw_ellipse(c,G,0.9,ax,[0.8,0.8,1])\r\n show()\r\n\r\n\r\ndef demo_random(): \r\n N=1000\r\n xbar = array([[1],[2]])\r\n Γx = array([[3,1],[1,3]])\r\n X=randn(2,N)\r\n X = (xbar @ ones((1,N))) + sqrtm(Γx) @ X\r\n xbar_ = mean(X,axis=1)\r\n Xtilde = X - xbar @ ones((1,N))\r\n Γx_ = (Xtilde @ Xtilde.T)/N\r\n fig = figure(0) \r\n ax = fig.add_subplot(111, aspect='equal')\r\n cla()\r\n ax.set_xlim(-20,20)\r\n ax.set_ylim(-20,20)\r\n draw_ellipse(xbar,Γx,0.9,ax,[1,0.8,0.8])\r\n pause(0.5) \r\n ax.scatter(X[0],X[1]) \r\n pause(0.3)\r\n plot() \r\n\r\n\r\nif __name__ == \"__main__\":\r\n \r\n\r\n \r\n demo_draw()\r\n \r\n# demo_animation() \r\n# demo_random()\r\n\r\n \r\n\r\n \r\n\r\n# M = array([ [1, 2], [5, 6], [9, 10]])\r\n# print(M)\r\n# x=array([[1], [2]]) \r\n# x2= M@x #multiplication dans Python 3\r\n#\r\n# G = [[1, 0], [0, 1]]\r\n# x3=mvnrnd2(x,G)\r\n# print(x3)\r\n# \r\n# x4=mvnrnd1(G)\r\n# print(x4)\r\n# \r\n# draw_box(-15,15,-15,15,'blue',4)\r\n# x=array([[2], [3], [1], [0], [0.5]]) \r\n# draw_car(x)\r\n# axis ('equal')\r\n# draw_tank(-2,-3,-1) \r\n# print(randn())\r\n# \r\n# A = array([[0,0,1,0],[0,0,0,1],[0,2,0,0],[0,3,0,0]])\r\n# B = array([[0,0,4,5]]).T\r\n# poles = [-2,-2.1,-2.2,-2.3]\r\n# K = place_poles(A,B,poles).gain_matrix\r\n# print(K)\r\n# \r\n# \r\n","sub_path":"py_scripts/roblib.py","file_name":"roblib.py","file_ext":"py","file_size_in_byte":7471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"76257759","text":"def szyfrPodstawieniowy(tekst, nowy_alfa):\n tekst = tekst.lower()\n tekst = tekst.replace(\" \", \"\")\n zaszyfrowany = \"\"\n for i in range(len(tekst)):\n x = alfa.find(tekst[i])\n zaszyfrowany += nowy_alfa[x]\n return zaszyfrowany\n\ndef odszyfrPodstawieniowy(zaszyfrowany, nowy_alfa):\n odszyfrowany = \"\"\n for i in range(len(zaszyfrowany)):\n x = nowy_alfa.find(zaszyfrowany[i])\n odszyfrowany += alfa[x]\n return odszyfrowany\n\nnowy_alfa = \"qwertyuiopasdfghjklzxcvbnm\"\nalfa = \"abcdefghijklmnopqrstuvwxyz\"\ntekst = \"zzAbcdefla ma kota\"\nzaszyfrowany = szyfrPodstawieniowy(tekst, nowy_alfa)\nodszyfrowany = odszyfrPodstawieniowy(zaszyfrowany, nowy_alfa)\nprint(zaszyfrowany)\nprint(odszyfrowany)","sub_path":"python programmes/szyfrowanie/szyfrPodstawieniowy.py","file_name":"szyfrPodstawieniowy.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"389744767","text":"from django.core.urlresolvers import reverse\nfrom django.shortcuts import HttpResponseRedirect\n\nfrom customproject.renren.exceptions import RenRenApiException\nfrom customproject.renren.perms import is_loginned_user\nfrom customproject.utils.exceptions import ParameterError\nfrom customproject.utils.log import log_exception\n\n\nclass ExceptionHandleMiddleware(object):\n def process_view(self, request, view_func, view_args, view_kwargs):\n try:\n res = view_func(request, *view_args, **view_kwargs)\n except ParameterError as e:\n log_exception(e.msg)\n raise e\n except RenRenApiException as e:\n log_exception(e.msg)\n return HttpResponseRedirect(reverse(\"welcome\"))\n return res\n\n\nclass PermissionMiddleware(object):\n def process_view(self, request, view_func, view_args, view_kwargs):\n full_path = request.get_full_path()\n if any((full_path in (\"/welcome\", ),\n full_path.startswith(\"/renren/login\"),\n full_path.startswith(\"/static\"))):\n return None\n else:\n if is_loginned_user(request):\n return None\n else:\n return HttpResponseRedirect(reverse(\"welcome\"))\n","sub_path":"customproject/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"170827192","text":"# pickling is the serialization of objects for saving\nimport pickle\n\nimelda = ('More Mayhem',\n 'Imelda May',\n '2011',\n ((1, 'Pulling the Rug'),\n (2, 'Psycho'),\n (3, 'Mayhem'),\n (4, 'Kentish Town Waltz')\n ))\n\nwith open(\"10-input-and-output/imelda.pickle\", \"wb\") as pickle_file:\n pickle.dump(imelda, pickle_file)\n\n\n\nwith open(\"10-input-and-output/imelda.pickle\", \"rb\") as pickle_file:\n imelda2 = pickle.load(pickle_file)\n\nalbum, artist, year, track_list = imelda2\nprint(album)\nprint(artist)\nprint(year)\nfor track in track_list:\n track_num, track_title = track\n print(track_num, track_title)\n\n\n\n# demonstration of security vulnerability\n# deletes imelda.pickle\npickle.loads(b\"cos\\nsystem\\n(S'rm 10-input-and-output/imelda.pickle'\\ntR.\")\n","sub_path":"10-input-and-output/pickling.py","file_name":"pickling.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"469734357","text":"import numpy as np\n\ndef sigmoid(x):\n \"\"\"\n Compute the sigmoid of x\n Aurguments:\n x -- A scalar or numpy array of any size\n\n Return:\n s -- sigmoid(x\n \"\"\"\n s = 1 /(1 + np.exp(-x))\n return s\n\nx = np.array([1,2,3])\nprint(sigmoid(x))\n\n\n\n","sub_path":"第一课第二周编程作业/Code_03.py","file_name":"Code_03.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"286821453","text":"from pySDC.helpers.problem_helper import get_finite_difference_stencil\nimport pytest\nimport numpy as np\n\n\ndef fd_stencil_single(derivative, order, stencil_type):\n \"\"\"\n Make a single tests where we generate a finite difference stencil using the generic framework above and compare to\n harscoded stencils that were implemented in a previous version of the code.\n\n Args:\n derivative (int): Order of the derivative\n order (int): Order of accuracy\n stencil_type (str): Type of the stencil\n\n Returns:\n None\n \"\"\"\n if derivative == 1:\n if stencil_type == 'center':\n if order == 2:\n stencil = [-1.0, 0.0, 1.0]\n zero_pos = 2\n coeff = 1.0 / 2.0\n elif order == 4:\n stencil = [1.0, -8.0, 0.0, 8.0, -1.0]\n zero_pos = 3\n coeff = 1.0 / 12.0\n elif order == 6:\n stencil = [-1.0, 9.0, -45.0, 0.0, 45.0, -9.0, 1.0]\n zero_pos = 4\n coeff = 1.0 / 60.0\n else:\n raise NotImplementedError(\"Order \" + str(order) + \" not implemented.\")\n elif stencil_type == 'upwind':\n if order == 1:\n stencil = [-1.0, 1.0]\n coeff = 1.0\n zero_pos = 2\n\n elif order == 2:\n stencil = [1.0, -4.0, 3.0]\n coeff = 1.0 / 2.0\n zero_pos = 3\n\n elif order == 3:\n stencil = [1.0, -6.0, 3.0, 2.0]\n coeff = 1.0 / 6.0\n zero_pos = 3\n\n elif order == 4:\n stencil = [-5.0, 30.0, -90.0, 50.0, 15.0]\n coeff = 1.0 / 60.0\n zero_pos = 4\n\n elif order == 5:\n stencil = [3.0, -20.0, 60.0, -120.0, 65.0, 12.0]\n coeff = 1.0 / 60.0\n zero_pos = 5\n else:\n raise NotImplementedError(\"Order \" + str(order) + \" not implemented.\")\n else:\n raise NotImplementedError(\n f\"No reference values for stencil_type \\\"{stencil_type}\\\" implemented for 1st derivative\"\n )\n elif derivative == 2:\n if stencil_type == 'center':\n coeff = 1.0\n if order == 2:\n stencil = [1, -2, 1]\n zero_pos = 2\n elif order == 4:\n stencil = [-1 / 12, 4 / 3, -5 / 2, 4 / 3, -1 / 12]\n zero_pos = 3\n elif order == 6:\n stencil = [1 / 90, -3 / 20, 3 / 2, -49 / 18, 3 / 2, -3 / 20, 1 / 90]\n zero_pos = 4\n elif order == 8:\n stencil = [-1 / 560, 8 / 315, -1 / 5, 8 / 5, -205 / 72, 8 / 5, -1 / 5, 8 / 315, -1 / 560]\n zero_pos = 5\n else:\n raise NotImplementedError(\n f\"No reference values for stencil_type \\\"{stencil_type}\\\" implemented for 2nd derivative\"\n )\n else:\n raise NotImplementedError(f\"No reference values for derivative {derivative} implemented\")\n\n # convert the reference values to a common way of writing with what we generate here\n coeff_reference = np.array(stencil) * coeff\n steps_reference = np.append(np.arange(-zero_pos + 1, 1), np.arange(1, zero_pos))[: len(coeff_reference)]\n sorted_idx_reference = np.argsort(steps_reference)\n\n coeff, steps = get_finite_difference_stencil(derivative=derivative, order=order, stencil_type=stencil_type)\n sorted_idx = np.argsort(steps)\n assert np.allclose(\n coeff_reference[sorted_idx_reference], coeff[sorted_idx]\n ), f\"Got different FD coefficients for derivative {derivative} with order {order} and stencil_type {stencil_type}! Expected {coeff_reference[sorted_idx_reference]}, got {coeff[sorted_idx]}.\"\n\n assert np.allclose(\n steps_reference[sorted_idx_reference], steps[sorted_idx]\n ), f\"Got different FD offsets for derivative {derivative} with order {order} and stencil_type {stencil_type}! Expected {steps_reference[sorted_idx_reference]}, got {steps[sorted_idx]}.\"\n\n\n@pytest.mark.base\ndef test_fd_stencils():\n \"\"\"\n Perform multiple tests for the generic FD stencil generating framework.\n\n Returns:\n None\n \"\"\"\n # Make tests to things that were previously implemented in the code\n for order in [1, 2, 3, 4, 5]:\n fd_stencil_single(1, order, 'upwind')\n for order in [2, 4, 6]:\n fd_stencil_single(1, order, 'center')\n for order in [2, 4, 6, 8]:\n fd_stencil_single(2, order, 'center')\n\n # Make some tests comparing to Wikipedia at https://en.wikipedia.org/wiki/Finite_difference_coefficient\n coeff, steps = get_finite_difference_stencil(derivative=1, order=3, stencil_type='forward')\n expect_coeff = [-11.0 / 6.0, 3.0, -3.0 / 2.0, 1.0 / 3.0]\n assert np.allclose(\n coeff, expect_coeff\n ), f\"Error in thrid order forward stencil for 1st derivative! Expected {expect_coeff}, got {coeff}.\"\n\n coeff, steps = get_finite_difference_stencil(derivative=2, order=2, stencil_type='backward')\n expect_coeff = [-1, 4, -5, 2][::-1]\n assert np.allclose(\n coeff, expect_coeff\n ), f\"Error in second order backward stencil for 2nd derivative! Expected {expect_coeff}, got {coeff}.\"\n\n # test if we get the correct result when we put in steps rather than a stencil_type\n new_coeff, _ = get_finite_difference_stencil(derivative=2, order=2, steps=steps)\n assert np.allclose(coeff, new_coeff), f\"Error when setting steps yourself! Expected {expect_coeff}, got {coeff}.\"\n","sub_path":"pySDC/tests/test_helpers/test_problem_helper.py","file_name":"test_problem_helper.py","file_ext":"py","file_size_in_byte":5570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"29596615","text":"# 트리의 부모 찾기\nimport sys\nfrom collections import deque\n\nN = int(sys.stdin.readline())\n\nA = [[] for i in range(N+1)]\n\nfor _ in range(N-1):\n a = list(map(int,sys.stdin.readline().split()))\n A[a[0]].append(a[1])\n A[a[1]].append(a[0])\n\nvis=[0]*(N+1)\nnode=[0]*(N+1)\n\nstack = deque()\nstack.append(1)\nwhile(1):\n i = stack.pop()\n vis[i] = 1\n for j in A[i]:\n if vis[j] == 1: continue\n node[j] = i\n vis[j] = 1\n stack.append(j)\n if len(stack) == 0:\n break\n\nfor i in range(2,N+1):\n print(node[i])\n\n\n ","sub_path":"Python/3주차_BFS,DFS/정글_3_11725.py","file_name":"정글_3_11725.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"383665101","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Comment',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('content', models.TextField()),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('changed', models.BooleanField(default=False)),\n ],\n ),\n migrations.CreateModel(\n name='Tip',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('created', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ('title', models.CharField(max_length=500, db_index=True)),\n ('content', models.TextField(db_index=True)),\n ('vote', models.IntegerField(default=0)),\n ('user', models.ForeignKey(related_name='user_tips', to=settings.AUTH_USER_MODEL)),\n ],\n ),\n migrations.AddField(\n model_name='comment',\n name='tip',\n field=models.ForeignKey(related_name='tip_comments', to='tip.Tip'),\n ),\n migrations.AddField(\n model_name='comment',\n name='user',\n field=models.ForeignKey(related_name='user_comments', to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"djangotips/apps/tip/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"407231972","text":"\nfrom __future__ import unicode_literals\n\nfrom django.conf import settings\nfrom django.contrib.auth.models import User\nfrom django.db import models\nfrom django.db.models.signals import post_save\nfrom django.dispatch import receiver\nfrom django.utils.encoding import python_2_unicode_compatible\n\n\n@python_2_unicode_compatible\nclass Profile(models.Model):\n GANGNAM = 1\n SUNGDONG = 2\n YEONGDEUNGPO = 3\n GWANGJIN = 4\n MAPO = 5\n SONGPA =6\n AREA_CHOICES = (\n (GANGNAM, '강남구'),\n (SUNGDONG, '성동구'),\n (YEONGDEUNGPO, '영등포구'),\n (GWANGJIN, '광진구'),\n (MAPO, '마포구'),\n (SONGPA, '송파구')\n )\n GENDER = (\n ('여성', '여성'),\n ('남성', '남성'),\n )\n user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n name = models.CharField(('name'), max_length=30, blank=True)\n birthdate = models.DateField(null=True, blank=True, verbose_name='생일')\n gender = models.CharField(max_length=7, choices=GENDER, default='', verbose_name='성별')\n in_area = models.PositiveSmallIntegerField(choices=AREA_CHOICES, null=True, blank=True, verbose_name='관심지역')\n\n\n class Meta: \n verbose_name = 'profile'\n verbose_name_plural = 'profiles'\n\n def __str__(self):\n return self.user.username\n\n#\n# @receiver(post_save, sender=User)\n# def create_user_profile(sender, instance, created, **kwargs):\n# if created:\n# Profile.objects.create(user=instance)\n#\n# @receiver(post_save, sender=User)\n# def save_user_profile(sender, instance, **kwargs):\n# instance.Profile.save()\n\n@receiver(post_save, sender=User)\ndef update_user_profile(sender, instance, created, **kwargs):\n if created:\n Profile.objects.create(user=instance)\n instance.profile.save()","sub_path":"userExtends/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"80733088","text":"# Desenvolva um programa que leia o peso e a altura de uma pessoa\n# calcular o imc\n\naltura = float(input(\"Diga sua altura\"))\nmassa = float(input(\"Diga sua massa\"))\nimc = massa / (altura **2)\nif imc < 18.5:\n print(\"Abaixo do peso\")\nelif 18.5 <= imc < 25:\n print(\"Peso normal\")\nelif 25<= imc <30:\n print(\"sobrepeso\")\nelif 30<=imc< 40:\n print(\"Obesidade\")\nelse:\n print(\"Obesidade morbida\")\n","sub_path":"ex043/exercicio043.py","file_name":"exercicio043.py","file_ext":"py","file_size_in_byte":403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"324089580","text":"# 发送纯文本的邮件\n# 发送html页面的邮件\n# 发送带附件文件的邮件\n# 发送能展示图片的邮件\n# 参考: https://zhuanlan.zhihu.com/p/89868804\nimport smtplib\nfrom email.mime.application import MIMEApplication\nfrom email.mime.image import MIMEImage # 负责构造图片\nfrom email.mime.multipart import MIMEMultipart # 负责将多个对象集合起来\nfrom email.mime.text import MIMEText # 负责构造文本\nfrom email.header import Header\n\nclass email(object):\n \"\"\"封装发送邮件类\"\"\"\n\n def __init__(self, mail_host, mail_sender, mail_license, mail_receivers, subject_content=\"\"\"Python邮件测试\"\"\",\n port=25):\n\n self.mail_sender = mail_sender\n self.mail_receivers = mail_receivers\n\n # 创建SMTP对象\n self.stp = smtplib.SMTP()\n # 设置发件人邮箱的域名和端口,端口地址为25\n self.stp.connect(mail_host, port=port)\n # 登录邮箱,传递参数1:邮箱地址,参数2:邮箱授权码\n self.stp.login(mail_sender, mail_license)\n\n # 1 构建MIMEMultipart对象代表邮件本身,可以往里面添加文本、图片、附件等\n self.msg = MIMEMultipart('related')\n\n # 2 设置邮件头部内容\n # 设置发送者,注意严格遵守格式,里面邮箱为发件人邮箱\n self.msg[\"From\"] = mail_sender\n # 设置接受者,注意严格遵守格式,里面邮箱为接受者邮箱\n self.msg[\"To\"] = mail_receivers\n # 设置邮件主题\n self.msg[\"Subject\"] = Header(subject_content, 'utf-8')\n\n def addBody(self, body_content=\"你好,这是一个测试邮件\", content_type='plain'):\n # 3 邮件正文内容\n # 构造文本,参数1:正文内容,参数2:文本格式,参数3:编码方式\n message_text = MIMEText(body_content, \"plain\", \"utf-8\")\n # 向MIMEMultipart对象中添加文本对象\n self.msg.attach(message_text)\n\n def addFile(self, filePath):\n # 构造附件\n atta = MIMEText(open(filePath, 'rb').read(), 'base64', 'utf-8')\n # 设置附件信息\n atta['Content-Type'] = 'application/octet-stream'\n atta[\"Content-Disposition\"] = 'attachment; filename=\"attach.txt\"'\n # 添加附件到邮件信息当中去\n self.msg.attach(atta)\n\n def addImage(self, imagePath):\n # 二进制读取图片\n image_data = open(imagePath, 'rb')\n # 设置读取获取的二进制数据\n message_image = MIMEImage(image_data.read())\n # 关闭刚才打开的文件\n image_data.close()\n # 添加图片文件到邮件信息当中去\n self.msg.attach(message_image)\n\n def send(self):\n # 4 发送邮件,传递参数1:发件人邮箱地址,参数2:收件人邮箱地址,参数3:把邮件内容格式改为str\n self.stp.sendmail(self.mail_sender,\n self.mail_receivers, self.msg.as_string())\n # 关闭SMTP对象\n self.stp.quit()\n print(\"邮件发送成功\")\n\n\n","sub_path":"sendEmail.py","file_name":"sendEmail.py","file_ext":"py","file_size_in_byte":3078,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"623648027","text":"#!/usr/bin/env python3\n\nimport redis\nimport argparse\nimport sys\nimport os\nimport calendar\nimport potiron\nimport subprocess\nfrom potiron_graph_annotation import field2string,bubble_annotation\n\nMAXVAL = sys.maxsize\n\n# Definition of the output file name\ndef output_name(source, field, date, dest):\n data_part = \"{}{}_{}\".format(dest, source, field)\n date_part = \"{}-{}\".format(date[0:4], date[4:6])\n return data_part, date_part\n\n# Search the scores in the ranged list of values, and put them in the disctionary of scores\ndef process_score(redisKey, score, general_score):\n # For each value ranged in decreasing order\n for v in red.zrevrangebyscore(redisKey,MAXVAL,0):\n countValue = red.zscore(redisKey,v)\n val = v.decode()\n # If the current value has to be skipped, go to the next iteration of the loop\n if val in args.skip :\n continue\n # If the current value is already present in the list of values, increment the score with the current score\n if val in score:\n score[val]+=countValue\n # On the other case, add the value with its score in the list\n else:\n score[val]=countValue\n if general_score is not None:\n # same operations for the dictionary containing the values for all the protocols\n if val in general_score:\n general_score[val]+=countValue\n else:\n general_score[val]=countValue\n\n# Sort the scores for the entire month and write them with their corresponding values in the .csv file\ndef process_file(score, name, prot):\n # Sort the complete list of values for the month by score\n res = list(sorted(score, key=score.__getitem__, reverse=True))\n l = 0\n values = []\n for s in res:\n # If the current value is not one that should be skipped, increment the number of values to include in the chart\n if s not in args.skip:\n values.append(s)\n l += 1\n # When the limit value is reached, we don't need to increment anymore, we break the loop\n if l >= limit:\n break\n # Write all the values and their scores into the csv datafile\n with open(\"{}.csv\".format(name),'w') as f:\n f.write(\"id,value\\n\")\n for v in values :\n val = bubble_annotation(field,field_string,v,potiron_path,prot)\n f.write(\"{}{},\\n\".format(v,val))\n f.write(\"{}{},{}\\n\".format(v,val,int(score[v])))\n return values\n\ndef generate_links(bokeh, v, logofile, namefile):\n n = namefile.split('/')\n name = n[-1].split('_')\n bokeh_filename = ''\n for s in n[:-1]:\n bokeh_filename += '{}/'.format(s)\n bokeh_filename += '{}_{}_{}_{}-*.html'.format(name[0],name[3],name[1],v)\n if not os.path.exists(bokeh_filename):\n cmd = \"{} -v {}-* --logo {}\".format(bokeh, v, logofile)\n# subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc.wait()\n proc.kill()\n\n# Parameters parser\nparser = argparse.ArgumentParser(description='Export one month data from redis')\nparser.add_argument(\"-s\",\"--source\", type=str, nargs=1, help='Sensor used as source (ex: \"chp-5890-1\")')\nparser.add_argument(\"-d\",\"--date\", type=str, nargs=1, help='Date of the informations to display (with the format YYYY-MM)')\nparser.add_argument(\"-f\",\"--field\", type=str, nargs=1, help='Field used (ex: \"dport\")')\nparser.add_argument(\"-l\",\"--limit\", type=int, nargs=1, help=\"Limit of values to export - default 20\")\nparser.add_argument(\"--skip\", type=str, default=None, action=\"append\", help=\"Skip a specific value\")\nparser.add_argument(\"-o\",\"--outputdir\", type=str, nargs=1, help=\"Output directory\")\nparser.add_argument(\"-u\",\"--unix\", type=str, nargs=1, help='Unix socket to connect to redis-server.')\nparser.add_argument(\"--links\", action='store_true', help=\"Use this parameter if you want to directly create the bokeh plots usefull to have all the links working\")\nparser.add_argument(\"-g\", \"--generate\", action='store_true', help=\"Auto generate the graphs, so you do not need to launch the command by your own\")\nparser.add_argument('--logo', type=str, nargs=1, help='Path of the logo file to display')\nargs = parser.parse_args()\n\nif args.source is None:\n source = \"potiron\"\nelse:\n source = args.source[0]\n\nif args.date is None:\n sys.stderr.write('A date must be specified.\\nThe format is : YYYY-MM')\n sys.exit(1)\ndate = args.date[0].replace(\"-\",\"\")\n\nif args.field is None:\n sys.stderr.write(\"A field must be specified.\\n\")\n sys.exit(1)\nfield = args.field[0]\n\nif args.limit is None:\n limit = 10\nelse:\n limit = args.limit[0]\n\nif args.skip is None:\n args.skip = []\n\nif args.outputdir is None:\n outputdir = \"./out/\"\nelse:\n outputdir = args.outputdir[0]\n if not outputdir.endswith('/'):\n outputdir = \"{}/\".format(outputdir)\nif not os.path.exists(outputdir):\n os.makedirs(outputdir)\n\nif args.unix is None:\n sys.stderr.write('A Unix socket must be specified.\\n')\n sys.exit(1)\nusocket = args.unix[0]\nred = redis.Redis(unix_socket_path=usocket)\n\nif red.sismember(\"CK\", \"YES\"):\n ck = True\nelse:\n ck = False\n \nlinks = args.links\n\ngen = args.generate\n\nbokeh = './bokeh-export.py -s {} -d {} -f {} -o {} -u {}'.format(source, date, field, outputdir, usocket)\n\n# Project directory\npotiron_path = os.path.dirname(os.path.realpath(__file__))[:-3]\n# Define path of circl logo, based on potiron path\nif args.logo is None:\n logofile = \"{}doc/circl.png\".format(potiron_path)\nelse:\n logofile = args.logo[0]\n\n# Definition of the protocol values\nprotocols_path = \"{}doc/protocols\".format(potiron_path)\nprotocols = potiron.define_protocols(protocols_path)\n# Definition of the strings containing the informations of the field, used in the legend and the file name\nfield_string, field_in_file_name = field2string(field, potiron_path)\nnamefile_data, namefile_date = output_name(source,field_in_file_name,date,outputdir)\ndays = calendar.monthrange(int(date[0:4]),int(date[4:6]))[1]\nif ck:\n at_least_one = False\n general_score = {}\n for prot in protocols:\n protocol = protocols[prot]\n score={}\n exists = False\n # For each day of the month\n for d in range(1,days+1):\n day = format(d, '02d')\n redisKey = \"{}:{}:{}{}:{}\".format(source, protocol, date, day, field)\n if red.exists(redisKey):\n exists = True\n process_score(redisKey, score, general_score)\n if exists:\n at_least_one = True\n namefile = \"{}_{}_{}\".format(namefile_data,protocol,namefile_date)\n val = process_file(score, namefile, protocol)\n if links:\n for v in val:\n generate_links(bokeh, v, logofile, namefile)\n if at_least_one:\n general_filename = \"{}_{}\".format(namefile_data, namefile_date)\n res = process_file(general_score, general_filename, None)\n if links:\n for v in res:\n generate_links(bokeh, v, logofile, namefile)\nelse:\n score={}\n exists = False\n # For each day of the month\n for d in range(1,days+1):\n day = format(d, '02d')\n redisKey = \"{}:{}{}:{}\".format(source, date, day, field)\n if red.exists(redisKey):\n exists = True\n process_score(redisKey, score, None)\n if exists:\n namefile = \"{}_{}\".format(namefile_data, namefile_date)\n val = process_file(score, namefile, None)\n if links:\n for v in val:\n generate_links(bokeh, v, logofile, namefile)\n\ngen = args.generate\nif gen:\n shell = './generate.sh {} {}'.format(outputdir, logofile)\n proc_sh = subprocess.Popen(shell, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n proc_sh.wait()\n","sub_path":"bin/export-csv-month.py","file_name":"export-csv-month.py","file_ext":"py","file_size_in_byte":7903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"179761832","text":"from PIL import ImageFont\nfrom PIL import Image\nfrom PIL import ImageDraw\nimport cv2\nimport numpy as np\nimport copy\nimport random\nfrom math import *\n\n\"\"\"\n用于黑色字体车牌\n\"\"\"\n\ndef rot(img, angel, shape, max_angel, point_dict):\n \"\"\"\n 仿射变换\n 使图像轻微的畸变\n \"\"\"\n size_o = [shape[1], shape[0]]\n\n size = (shape[1] + int(shape[0] * cos((float(max_angel) / 180) * 3.14)), shape[0])\n\n interval = abs(int(sin((float(angel) / 180) * 3.14) * shape[0]))\n\n pts1 = np.float32([[0, 0], [0, size_o[1]], [size_o[0], 0], [size_o[0], size_o[1]]])\n if (angel > 0):\n\n pts2 = np.float32([[interval, 0], [0, size[1]], [size[0], 0], [size[0] - interval, size_o[1]]])\n else:\n pts2 = np.float32([[0, 0], [interval, size[1]], [size[0] - interval, 0], [size[0], size_o[1]]])\n\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(img, M, size,None,None,None,(255,255,255))\n\n # 转换坐标点\n rot_pointG = []\n rot_point = []\n for pointx4 in point_dict:\n\n for pt in pointx4:\n rot_p = retRotPoint(pt, M)\n rot_point.append(rot_p)\n rot_pointG.append(rot_point.copy())\n rot_point.clear()\n\n return dst, rot_pointG\n\n\ndef rotRandrom(img, factor, size, point_dict):\n \"\"\"\n 添加透视畸变\n img 输入图像\n factor 畸变的参数\n size 为图片的目标尺寸\n \"\"\"\n shape = size\n pts1 = np.float32([[0, 0], [0, shape[0]], [shape[1], 0], [shape[1], shape[0]]])\n pts2 = np.float32([[r(factor), r(factor)], [r(factor), shape[0] - r(factor)], [shape[1] - r(factor), r(factor)],\n [shape[1] - r(factor), shape[0] - r(factor)]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n dst = cv2.warpPerspective(img, M, size,None,None,None,(255,255,255))\n rot_pointG = []\n rot_point = []\n for pointx4 in point_dict:\n for pt in pointx4:\n rot_p = retRotPoint(pt, M)\n rot_point.append(rot_p)\n rot_pointG.append(rot_point.copy())\n rot_point.clear()\n return dst, rot_pointG\n\n\ndef retRotPoint(pt, rotM):\n \"\"\"\n 经过透视变换后的坐标点\n :param pt: 原坐标点\n :param rotM: 透视变换矩阵(3x3)\n :return: 透视变换后的坐标\n \"\"\"\n pt3D = np.array([[pt[0]], [pt[1]], [1]], dtype=float)\n ptM = rotM.dot(pt3D)\n ptx = ptM[0][0]\n pty = ptM[1][0]\n ptz = ptM[2][0]\n rotpt = (int(ptx / ptz), int(pty / ptz))\n return rotpt\n\n\ndef tfactor(img):\n \"\"\"\n 添加饱和度光照的噪声\n \"\"\"\n hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n hsv[:, :, 0] = hsv[:, :, 0] * (0.8 + np.random.random() * 0.2)\n hsv[:, :, 1] = hsv[:, :, 1] * (0.3 + np.random.random() * 0.7)\n hsv[:, :, 2] = hsv[:, :, 2] * (0.2 + np.random.random() * 0.8)\n img = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)\n\n return img\n\n\ndef random_envirment(img, data_set):\n \"\"\"\n 添加自然环境的噪声\n 黑色像素替换为背景\n 白色字体车牌使用这个函数添加环境噪声\n \"\"\"\n index = r(len(data_set))\n env = cv2.imread(data_set[index])\n\n env = cv2.resize(env, (img.shape[1], img.shape[0]))\n\n gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n ret, th = cv2.threshold(gray_img, 254, 255, cv2.THRESH_BINARY)\n th = cv2.cvtColor(th,cv2.COLOR_GRAY2BGR)\n inv = cv2.bitwise_and(th,env)\n img = cv2.bitwise_xor(inv,img)\n\n return img\n\n\ndef GenCh(f, val):\n \"\"\"\n 生成中文字符\n :param f: 字体\n :param val: 字符\n :return: 单个字符图片\n \"\"\"\n img = Image.new(\"RGB\", (45, 70), (255, 255, 255)) # \"RGB\"模式,(45,70)文件大小,(255,255,255)背景颜色\n draw = ImageDraw.Draw(img)\n draw.text((0, 3), val, (0, 0, 0), font=f)\n img = img.resize((23, 70))\n A = np.array(img)\n\n return A\n\ndef GenCh_red(f, val):\n \"\"\"\n 生成中文字符\n :param f: 字体\n :param val: 字符\n :return: 单个字符图片\n \"\"\"\n img = Image.new(\"RGB\", (45, 70), (255, 255, 255)) # \"RGB\"模式,(45,70)文件大小,(255,255,255)背景颜色\n draw = ImageDraw.Draw(img)\n draw.text((0, 3), val, (0, 0, 255), font=f)\n img = img.resize((23, 70))\n A = np.array(img)\n\n return A\n\n\ndef GenCh1(f, val):\n \"\"\"\n 生成英文字符和数字\n :param f: 字体\n :param val: 字符\n :return: 单个字符图片\n \"\"\"\n img = Image.new(\"RGB\", (23, 70), (255, 255, 255))\n draw = ImageDraw.Draw(img)\n draw.text((0, 2), val.encode('utf-8').decode('utf-8'), (0, 0, 0), font=f)\n A = np.array(img)\n return A\n\n\n# def GenChGreen(f, val):\n# \"\"\"\n# 生成中文字符\n# :param f: 字体\n# :param val: 字符\n# :return: 单个字符图片\n# \"\"\"\n# img = Image.new(\"RGB\", (45, 70), (255, 255, 255)) # \"RGB\"模式,(45,70)文件大小,(255,255,255)背景颜色\n# draw = ImageDraw.Draw(img)\n# draw.text((0, 3), val, (0, 0, 0), font=f)\n# img = img.resize((21, 63))\n# A = np.array(img)\n#\n# return A\n\n\ndef GenChGreen1(f, val):\n \"\"\"\n 生成英文字符和数字\n :param f: 字体\n :param val: 字符\n :return: 单个字符图片\n \"\"\"\n img = Image.new(\"RGB\", (22, 63), (255, 255, 255))\n draw = ImageDraw.Draw(img)\n draw.text((0, 5), val.encode('utf-8').decode('utf-8'), (0, 0, 0), font=f)\n A = np.array(img)\n return A\n\ndef AddGauss(img, level):\n \"\"\"\n 添加高斯模糊\n \"\"\"\n return cv2.blur(img, (level * 2 + 1, level * 2 + 1))\n\n\ndef r(val):\n \"\"\"\n 生成0~val范围的随机数\n \"\"\"\n return int(np.random.random() * val)\n\n\ndef AddNoiseSingleChannel(single):\n \"\"\"\n 添加高斯噪声\n \"\"\"\n diff = 255 - single.max()\n noise = np.random.normal(0, 1 + r(6), single.shape)\n noise = (noise - noise.min()) / (noise.max() - noise.min())\n noise = diff * noise\n noise = noise.astype(np.uint8)\n dst = single + noise\n return dst\n\n\ndef addNoise(img, sdev=0.5, avg=10):\n \"\"\"\n 高斯噪声\n \"\"\"\n img[:, :, 0] = AddNoiseSingleChannel(img[:, :, 0])\n img[:, :, 1] = AddNoiseSingleChannel(img[:, :, 1])\n img[:, :, 2] = AddNoiseSingleChannel(img[:, :, 2])\n return img\n\n\ndef edgeFill(img, pointG, fill_size=0):\n \"\"\"\n 图像边缘填充\n :param img:图像\n :param pointG: 坐标点\n :param fill_size: 填充边缘大小\n :return: 填充图像,坐标点\n \"\"\"\n\n top_fill = fill_size + random.randint(0, 15)\n bottom_fill = fill_size + random.randint(0, 15)\n left_fill = fill_size + random.randint(0, 15)\n right_fill = fill_size + random.randint(0, 15)\n img = cv2.copyMakeBorder(img, top_fill, bottom_fill, left_fill, right_fill, cv2.BORDER_CONSTANT,\n value=(255, 255, 255))\n\n # 转换点坐标\n fill_pointG = []\n fill_point = []\n for pointx4 in pointG:\n for pt in pointx4:\n temp = (pt[0] + left_fill, pt[1] + top_fill)\n fill_point.append(temp)\n fill_pointG.append(fill_point.copy())\n fill_point.clear()\n\n return img, fill_pointG\n\n\ndef convert(size, box):\n \"\"\"\n 坐标转换为yolo可训练的txt坐标\n :param size:图像大小\n :param box:矩形坐标\n :return:yolo坐标\n \"\"\"\n # print(f'box={box}')\n dw = 1. / (size[0])\n dh = 1. / (size[1])\n x = (box[0] + box[1]) / 2.0 - 1\n y = (box[2] + box[3]) / 2.0 - 1\n w = box[1] - box[0]\n h = box[3] - box[2]\n # print(f'x={x},y={y},w={w},h={h}')\n x = x * dw\n w = w * dw\n y = y * dh\n h = h * dh\n # print(f't2,,x={x},y={y},w={w},h={h}')\n return x, y, w, h\n\n\ndef drawpoint(imgoo, pointG, str):\n \"\"\"\n 画出坐标点\n :param imgoo:图像\n :param pointG:点坐标集\n :param str:\n :return:\n \"\"\"\n img = copy.deepcopy(imgoo)\n for pp in pointG:\n for ptemp in pp:\n cv2.circle(img, ptemp, 3, (0, 255, 0), 2)\n cv2.imshow(str, img)\n cv2.waitKey(0)\n\n\ndef rectangle_vertex(pointA, pointB, pointC, pointD):\n \"\"\"\n 矩形框坐标\n :param pointA:左上角点\n :param pointB: 右上角点\n :param pointC: 左下角点\n :param pointD: 右下角点\n :return: 矩形框的左上角坐标和右下角坐标\n \"\"\"\n left_point = (pointA[0] + pointC[0]) / 2\n top_point = (pointA[1] + pointB[1]) / 2\n right_point = (pointB[0] + pointD[0]) / 2\n bottom_point = (pointC[1] + pointD[1]) / 2\n point_A = (int(left_point), int(top_point))\n point_D = (int(right_point), int(bottom_point))\n roi_rect = [point_A, point_D]\n return roi_rect\n\n\ndef Roi_Correct(rect, img):\n \"\"\"\n 对矩形区修正,防止图片越界\n :param rect: 矩形框的左上角坐标和右下角坐标\n :param img: 图像\n :return:\n \"\"\"\n if rect[0] < 0:\n rect[0] = 0\n if rect[1] < 0:\n rect[1] = 0\n if rect[1] < img.shape[1]:\n pass\n\n\ndef get_dict_key(dicts, value):\n '''\n 根据dict值获取键\n :param dicts: dict\n :param value: dict->value\n :return: dict->key\n '''\n temp = None\n for k, v in dicts.items():\n if v == value:\n temp = k\n return temp\n\n\ndef save_classes(dicts, fileName):\n '''\n 保存label.data\n :param dicts: 车牌字符dict\n :param fileName: 文件名\n :return: None\n '''\n with open(fileName, 'w') as f:\n for k, v in dicts.items():\n f.write(v + '\\n')","sub_path":"trnoise_black.py","file_name":"trnoise_black.py","file_ext":"py","file_size_in_byte":9321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"136632298","text":"import os\nimport unittest\nimport json\nfrom flask_sqlalchemy import SQLAlchemy\n\nfrom flaskr import create_app\nfrom models import setup_db, Question, Category\n\n\nclass TriviaTestCase(unittest.TestCase):\n \"\"\"This class represents the trivia test case\"\"\"\n\n def setUp(self):\n \"\"\"Define test variables and initialize app.\"\"\"\n self.app = create_app()\n self.client = self.app.test_client\n self.database_name = \"trivia_test\"\n self.database_path = \"postgresql://postgres:password@localhost:5432/{}\".format(\n self.database_name)\n\n setup_db(self.app, self.database_path)\n\n # binds the app to the current context\n with self.app.app_context():\n self.db = SQLAlchemy()\n self.db.init_app(self.app)\n # create all tables\n self.db.create_all()\n\n def tearDown(self):\n \"\"\"Executed after reach test\"\"\"\n pass\n\n def test_get_paginated_questions(self):\n\n res = self.client().get('/questions')\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], True)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(data['total_questions'])\n self.assertTrue(len(data['categories']))\n self.assertTrue(len(data['questions']))\n\n def test_get_questions_from_invalid_page(self):\n\n res = self.client().get('/questions?page=9999')\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], False)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['message'], 'Resource not found')\n\n def test_get_non_existing_category(self):\n\n res = self.client().get('/categories/4324')\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], False)\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data['message'], 'Resource not found')\n\n def test_get_categories(self):\n\n res = self.client().get('/categories')\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], True)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(len(data['categories']))\n\n\n def test_deleting_non_existing_question(self):\n res = self.client().delete('/questions/none')\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], False)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data['message'], 'Unprocessable')\n\n def test_delete_question(self):\n question = Question(question='sample question', answer='awesome answer',\n difficulty=1, category=1)\n question.insert()\n question_id = question.id\n\n res = self.client().delete(f'/questions/{question_id}')\n data = json.loads(res.data)\n\n question = Question.query.filter(\n Question.id == question.id).one_or_none()\n\n self.assertEqual(data['success'], True)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['deleted'], str(question_id))\n self.assertEqual(question, None)\n\n def test_search_questions(self):\n\n res = self.client().post('/questions/search',\n json={'searchTerm': 'testSeachQuestion'})\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], True)\n self.assertEqual(res.status_code, 200)\n self.assertIsNotNone(data['questions'])\n self.assertIsNotNone(data['total_questions'])\n\n def test_add_invalid_question(self):\n\n res = self.client().post('/questions', json={\n 'question': 'Is this question valid?',\n 'answer': 'No, there is no difficulty',\n 'category': 1\n })\n\n data = json.loads(res.data)\n self.assertEqual(data[\"success\"], False)\n\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data[\"message\"], \"Unprocessable\")\n\n def test_add_question(self):\n\n previous_number_questions = len(Question.query.all())\n\n res = self.client().post('/questions', json={\n 'question': 'Will I pass?',\n 'answer': 'Yes, your project is awesome!',\n 'difficulty': 1,\n 'category': 1\n })\n\n updated_number_question = len(Question.query.all())\n\n data = json.loads(res.data)\n self.assertEqual(data[\"success\"], True)\n self.assertEqual(res.status_code, 200)\n self.assertEqual(updated_number_question,\n previous_number_questions + 1)\n\n def test_search_questions(self):\n new_search = {'searchTerm': 'asd'}\n res = self.client().post('/questions/search', json=new_search)\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 200)\n self.assertEqual(data['success'], True)\n self.assertIsNotNone(data['questions'])\n self.assertIsNotNone(data['total_questions'])\n\n def test_invalid_search_question(self):\n\n res = self.client().post('/questions/search', json={'searchTerm': ''})\n\n data = json.loads(res.data)\n self.assertEqual(data[\"success\"], False)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"message\"], \"Resource not found\")\n\n def test_get_questions_by_category(self):\n res = self.client().get('/categories/1/questions')\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], True)\n self.assertEqual(res.status_code, 200)\n self.assertTrue(len(data['questions']))\n self.assertTrue(data['total_questions'])\n self.assertTrue(data['current_category'])\n\n def test_get_questions_by_invalid_category(self):\n res = self.client().get('/categories/invalid/questions')\n data = json.loads(res.data)\n\n self.assertEqual(res.status_code, 404)\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(data[\"message\"], \"Resource not found\")\n\n def test_get_random_quiz_question_invalid_category(self):\n res = self.client().post('/quizzes', json={ 'previous_questions': [] } )\n data = json.loads(res.data)\n\n self.assertEqual(data[\"success\"], False)\n self.assertEqual(res.status_code, 422)\n self.assertEqual(data[\"message\"], \"Unprocessable\")\n\n def test_get_random_quiz_question(self):\n\n res = self.client().post('/quizzes',\n json={'previous_questions': [], 'quiz_category': {'type': \"History\", 'id': \"4\"}})\n data = json.loads(res.data)\n\n self.assertEqual(data['success'], True)\n self.assertEqual(res.status_code, 200)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"projects/02_trivia_api/starter/backend/test_flaskr.py","file_name":"test_flaskr.py","file_ext":"py","file_size_in_byte":6663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"378021306","text":"#!/usr/bin/python3\n'''\ncontains a function that prints a text with 2 new lines\nafter each of these characters: ., ? and :\n'''\n\n\ndef text_indentation(text):\n '''\n function that prints a text with 2 new lines\n after each of these characters: ., ? and :\n '''\n\n if type(text) != str:\n raise TypeError('text must be a string')\n\n text = text.strip()\n new_text = ''\n\n for char in text:\n if char != '\\n':\n new_text += char\n if char == '?' or char == ':' or char == '.':\n new_text += '\\n'\n\n list_of_lines = new_text.split('\\n')\n\n new_list = []\n\n for line in list_of_lines:\n if line != '':\n item = line.strip(' ')\n print(item, end='')\n last_c = item[-1]\n if last_c == '?' or last_c == ':' or last_c == '.':\n print('\\n')\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"163827971","text":"import constants\nfrom helper_methods import discretize\nfrom helper_methods import clear_out_folder\nfrom object_crop import ObjectCrop\nfrom classification import classify\nimport csv\nfrom sklearn.model_selection import ParameterGrid\n\ncrop_iters = {\n 'size': constants.SIZES,\n 'grid_size': constants.GRID_SIZES,\n 'padding': constants.PADDINGS,\n 'iou': constants.ious\n}\ncrop_grid = ParameterGrid(crop_iters)\n\nclassify_iters = {\n 'batch_size': constants.BATCHES,\n 'size': constants.RESIZE,\n 'interpolation': constants.INTERPOLATION\n}\nclassify_grid = ParameterGrid(classify_iters)\n\n\ndef run_analysis():\n with open(constants.OUTPUT_FOLDER + 'results.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow([\"obj_sizes\",\"grid_size\", \"padding\", \"iou\", \"batch_size\", \"resize\",\n \"interpolation\", \"loss_0\", \"accuracy_0\", \"loss_1\", \"accuracy_1\"])\n file.flush()\n for crop_params in crop_grid:\n print(\"Clearing current directory...\")\n clear_out_folder()\n print(\"Completed\\n\")\n\n print(\"Deconstructing images...\")\n images = discretize()[crop_params['size']]\n print(images)\n exit(0)\n for image in images:\n print(\"Processing image\" + image)\n crop_obj = ObjectCrop(out_folder=constants.OUTPUT_FOLDER, img_label=image)\n crop_obj.deconstruct(gh=crop_params['grid_size'][1], gw=crop_params['grid_size'][0],padding=crop_params['padding'], iou_thresh=crop_params['iou'], test_run=False)\n print(\"Completed\\n\")\n\n print(\"Running classifications...\")\n for classify_params in classify_grid:\n classify_results = classify(classify_params['batch_size'], classify_params['size'], classify_params['interpolation'])\n writer.writerow([crop_params['size'],crop_params['grid_size'], crop_params['padding'],crop_params['iou'],\n classify_params['batch_size'],classify_params['size'],classify_params['interpolation'],\n classify_results[0], classify_results[1],classify_results[2], classify_results[3]])\n file.flush()\n print(\"Completed\")\n\nrun_analysis()\n\n","sub_path":"tir-vision/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":2294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"442004665","text":"import threading,socket\r\n\r\nHOST = 'localhost'\r\nPORT = 4000\r\n\r\ndef sendMsg(soc):\r\n while True:\r\n message = input(\"\")\r\n soc.sendall(message.encode(encoding='utf-8'))\r\n if message == 'quit':\r\n break\r\n print('클라이언트 메시지 입력 쓰레드 종료')\r\n\r\n\r\ndef recvMsg(soc):\r\n while True:\r\n data = client_socket.recv(1024)\r\n msg = data.decode()\r\n print(msg)\r\n if msg == 'quit':\r\n break\r\n client_soc.close()\r\n print('클라이언트 리시브 쓰레드 종료')\r\n\r\n\r\nclient_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\nclient_socket.connect((HOST, PORT))\r\n\r\nt = threading.Thread(target=sendMsg,args=(client_socket,))\r\nt.start()\r\nt2 = threading.Thread(target=recvMsg, args=(client_socket,))\r\nt2.start()\r\n\r\n \r\n \r\n","sub_path":"t_client.py","file_name":"t_client.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"11148438","text":"# https://www.geeksforgeeks.org/counting-sort/\n\ndef sort(arr):\n n = len(arr)\n result = [-1] * n\n counts = [0] * (max(arr) + 1)\n\n for el in arr:\n counts[el] += 1\n \n for i in range(1, len(counts)):\n counts[i] += counts[i-1]\n\n for i in range(n):\n result[counts[arr[i]] - 1] = arr[i]\n counts[arr[i]] -= 1\n\n return result\n\n\nif __name__ == '__main__':\n arr = [10, 7, 8, 9, 1, 5] \n assert [1, 5, 7, 8, 9, 10] == sort(arr)\n","sub_path":"sort/counting.py","file_name":"counting.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"541986808","text":"from summaryplot.statistics import compute_median\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom spt3g import core, calibration, dfmux, todfilter\nfrom spt3g.std_processing import obsid_to_g3time\nimport gc\nimport matplotlib.dates as mdates\nimport datetime\nfrom summaryplot.plot_util import plot_timeseries\n\n\ndef median_nei_01Hz_to_05Hz(frame, boloprops, selector_dict):\n if 'NEI_0.1Hz_to_0.5Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEI_0.1Hz_to_0.5Hz', boloprops, selector_dict) \n\n\ndef median_nei_1Hz_to_2Hz(frame, boloprops, selector_dict):\n if 'NEI_1.0Hz_to_2.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEI_1.0Hz_to_2.0Hz', boloprops, selector_dict) \n\n\ndef median_nei_3Hz_to_5Hz(frame, boloprops, selector_dict):\n if 'NEI_3.0Hz_to_5.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEI_3.0Hz_to_5.0Hz', boloprops, selector_dict) \n\n\ndef median_nei_10Hz_to_15Hz(frame, boloprops, selector_dict):\n if 'NEI_10.0Hz_to_15.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEI_10.0Hz_to_15.0Hz', boloprops, selector_dict) \n\n\ndef median_nep_01Hz_to_05Hz(frame, boloprops, selector_dict):\n if 'NEP_0.1Hz_to_0.5Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEP_0.1Hz_to_0.5Hz', boloprops, selector_dict) \n\n\ndef median_nep_1Hz_to_2Hz(frame, boloprops, selector_dict):\n if 'NEP_1.0Hz_to_2.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEP_1.0Hz_to_2.0Hz', boloprops, selector_dict) \n\n\ndef median_nep_3Hz_to_5Hz(frame, boloprops, selector_dict):\n if 'NEP_3.0Hz_to_5.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEP_3.0Hz_to_5.0Hz', boloprops, selector_dict) \n\n\ndef median_nep_10Hz_to_15Hz(frame, boloprops, selector_dict):\n if 'NEP_10.0Hz_to_15.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NEP_10.0Hz_to_15.0Hz', boloprops, selector_dict) \n\n\ndef median_net_01Hz_to_05Hz(frame, boloprops, selector_dict):\n if 'NET_0.1Hz_to_0.5Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NET_0.1Hz_to_0.5Hz', boloprops, selector_dict) \n\n\ndef median_net_1Hz_to_2Hz(frame, boloprops, selector_dict):\n if 'NET_1.0Hz_to_2.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NET_1.0Hz_to_2.0Hz', boloprops, selector_dict)\n\n\ndef number_of_lines_in_median_psds(frame, boloprops, selector_dict):\n lines = {'all': {}}\n for wafer, band in selector_dict.keys():\n if wafer not in lines:\n lines[wafer] = {}\n if band not in lines[wafer]:\n lines[wafer][band] = 0\n if band not in lines['all']:\n lines['all'][band] = 0\n for k, v in frame['LineLocations'].items():\n band, wafer = k.split('_')\n band = int(float(band))\n num_lines = len(v)\n lines[wafer][band] += num_lines\n lines['all'][band] += num_lines\n return lines\n\n\ndef median_net_3Hz_to_5Hz(frame, boloprops, selector_dict):\n if 'NET_3.0Hz_to_5.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NET_3.0Hz_to_5.0Hz', boloprops, selector_dict) \n\n\ndef median_net_10Hz_to_15Hz(frame, boloprops, selector_dict):\n if 'NET_10.0Hz_to_15.0Hz' not in frame.keys():\n return None\n return compute_median(frame, 'NET_10.0Hz_to_15.0Hz', boloprops, selector_dict) \n\n\ndef plot_median_noise(data, noise_type, wafers, outdir, xlims=None,\n ylims={'NEI': [0, 100], 'NET': [0, 5000], 'NEP': [0, 200]}):\n # min/max for plotting purposes\n lines = {}\n \n nex_name = noise_type.split('_')[0]\n labels = {'NEI': 'NEI [pA / sqrt(Hz)]',\n 'NET': 'NET [uK rtsec]',\n 'NEP': 'NEP [aW / sqrt(Hz)]'}\n units = {'NEI': core.G3Units.amp*1e-12 / np.sqrt(core.G3Units.Hz),\n 'NET': core.G3Units.microkelvin * np.sqrt(core.G3Units.sec),\n 'NEP': core.G3Units.attowatt / np.sqrt(core.G3Units.Hz)}\n\n for wafer in wafers: \n obsids = [obsid for obsid in data['noise']]\n f = plt.figure(figsize=(8,6))\n\n is_empty = True\n for band in [90, 150, 220]:\n noise = np.array([data['noise'][obsid][noise_type][wafer][band] / units[nex_name]\n for obsid in obsids\n if noise_type in data['noise'][obsid].keys()])\n timestamps = [obsid_to_g3time(int(obsid)).time / core.G3Units.seconds\n for obsid in obsids\n if noise_type in data['noise'][obsid].keys()]\n dts = np.array([datetime.datetime.utcfromtimestamp(ts) for ts in timestamps])\n plot_timeseries(dts, noise, band, xlims=xlims, ylims=ylims[nex_name])\n\n if len(noise)>0:\n is_empty = False\n\n if is_empty == False:\n xfmt = mdates.DateFormatter('%m-%d %H:%M')\n plt.gca().xaxis.set_major_formatter(xfmt)\n plt.xticks(rotation=25)\n\n plt.grid()\n plt.xlabel('observation time (UTC)')\n plt.ylabel(labels[nex_name])\n plt.title('{} ({})'.format(noise_type.replace('_', ' '), wafer))\n plt.legend()\n plt.tight_layout()\n plt.savefig('{}/median_{}_{}.png'.format(outdir, noise_type, wafer))\n plt.close()\n\n\n\ndef plot_number_of_lines(data, wafers, outdir,\n xlims=None, ylims=[-0.3, 10], ylims_all=[-0.9, 30]):\n for wafer in wafers:\n obsids = [obsid for obsid in data['noise']]\n f = plt.figure(figsize=(8,6))\n\n is_empty = True\n for band in [90, 150, 220]:\n number_lines = np.array([data['noise'][obsid]['NumberOfLinesInMedianPSDs'][wafer][band]\n for obsid in data['noise']])\n\n timestamps = [obsid_to_g3time(int(obsid)).time / core.G3Units.sec\n for obsid in obsids]\n dts = np.array([datetime.datetime.utcfromtimestamp(ts) for ts in timestamps])\n if wafer == 'all':\n plot_timeseries(dts, number_lines, band, xlims=xlims, ylims=ylims_all)\n else:\n plot_timeseries(dts, number_lines, band, xlims=xlims, ylims=ylims)\n\n if len(number_lines)>0:\n is_empty = False\n\n if is_empty == False:\n xfmt = mdates.DateFormatter('%m-%d %H:%M')\n plt.gca().xaxis.set_major_formatter(xfmt)\n plt.xticks(rotation=25)\n\n plt.grid()\n plt.xlabel('observation time (UTC)')\n if wafer == 'all':\n plt.ylabel('number of lines found (sum from all wafers)')\n else:\n plt.ylabel('number of lines found')\n plt.title('Number of lines found in median PSD ({})'.format(wafer))\n plt.legend()\n plt.tight_layout()\n plt.savefig('{}/number_of_lines_found_{}.png'.format(outdir, wafer))\n plt.close()\n","sub_path":"summaryplot/noise.py","file_name":"noise.py","file_ext":"py","file_size_in_byte":7030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"640816525","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport tkinter\nimport tkinter.messagebox\nimport random\nimport time\n\nHEIGHT = 18\nWIDTH = 10\n\nroot = tkinter.Tk()\nroot.title('Tetris')\nroot.resizable(False, False)\n\nclass App(tkinter.Frame):\n def __init__(self, master):\n tkinter.Frame.__init__(self)\n #key binding\n master.bind('', self.Up)\n master.bind('', self.Left)\n master.bind('', self.Right)\n master.bind('', self.Down)\n master.bind('', self.Space)\n master.bind('', self.Pause)\n master.bind('', self.FocusPause)\n\n\n self.backg = \"#%02x%02x%02x\" % (120, 150, 30)\n self.frontg = \"#%02x%02x%02x\" % (40, 120, 150)\n self.nextg = \"#%02x%02x%02x\" % (150, 100, 100)\n self.flashg = \"#%02x%02x%02x\" % (210, 130, 100)\n\n self.LineDisplay = tkinter.Label(master, text='Lines: ', bg='black', fg='red')\n self.Line = tkinter.Label(master, text='0', bg='black', fg='red')\n self.ScoreDisplay = tkinter.Label(master, text='Score: ', bg='black', fg='red')\n self.Score = tkinter.Label(master, text='0', bg='black', fg='red')\n\n self.TimeSpentDisplay = tkinter.Label(master, text='time: ', bg='black', fg='red')\n self.TimeSpent = tkinter.Label(master, text='0',bg='black', fg='red')\n self.LineDisplay.grid(row=HEIGHT-2, column=WIDTH, columnspan=2)\n self.Line.grid(row=HEIGHT-2, column=WIDTH+2, columnspan=3)\n self.ScoreDisplay.grid(row=HEIGHT-1, column=WIDTH, columnspan=2)\n self.Score.grid(row=HEIGHT-1, column=WIDTH+2, columnspan=3)\n self.TimeSpentDisplay.grid(row=HEIGHT-4, column=WIDTH, columnspan=2)\n self.TimeSpent.grid(row=HEIGHT-4, column=WIDTH+2, columnspan=3)\n self.TotalTime = 0\n self.TotalLine = 0\n self.TotalScore = 0\n\n self.isGameOver = False\n self.isPause = False\n self.isStart = False\n self.NextList = []\n self.NextRowList = []\n\n r = 0\n c = 0\n\n for k in range(4*4):\n LN = tkinter.Label(master, text=' ', bg=str(self.nextg), fg='white', relief=tkinter.FLAT, bd=4)\n LN.grid(row=r, column=WIDTH+c, sticky=tkinter.N+tkinter.E+tkinter.S+tkinter.W)\n self.NextRowList.append(LN)\n c = c + 1\n if c >= 4:\n r = r + 1\n c = 0\n self.NextList.append(self.NextRowList)\n self.NextRowList = []\n\n self.blocks = [\n #[][][][]\n [[0,0],[0,1],[0,2],[0,3]],\n #[][]\n #[][]\n [[0,1],[0,2],[1,1],[1,2]],\n # []\n #[][][]\n [[0,2],[1,1],[1,2],[1,3]],\n #[]\n #[][][]\n [[0,1],[1,1],[1,2],[1,3]],\n # []\n #[][][]\n [[0,3],[1,1],[1,2],[1,3]],\n #[][]\n # [][]\n [[0,1],[0,2],[1,2],[1,3]],\n # [][]\n #[][]\n [[0,2],[0,3],[1,1],[1,2]],\n #[][][][] This will make it *MY* tetris\n # []\n [[0,0],[0,1],[0,2],[0,3],[1,1]],\n #[][][][]\n # []\n [[0,0],[0,1],[0,2],[0,3],[1,2]],\n ]\n\n self.BlockList = []\n self.LabelList = []\n self.BlockRowList = []\n self.LabelRowList = []\n row = 0\n col = 0\n for i in range(HEIGHT * WIDTH):\n L = tkinter.Label(master, text=' ', bg=str(self.backg), fg='white',relief=tkinter.FLAT, bd=4)\n L.grid(row=row, column=col, sticky=tkinter.N+tkinter.E+tkinter.S+tkinter.W)\n L.row = row\n L.col = col\n L.isActive=False\n self.BlockRowList.append(0)\n self.LabelRowList.append(L)\n col = col + 1\n\n if col >= WIDTH:\n row = row + 1\n col = 0\n self.BlockList.append(self.BlockRowList)\n self.LabelList.append(self.LabelRowList)\n self.BlockRowList = []\n self.LabelRowList = []\n\n self.time = 500\n\n self.OnTimer()\n\n def __del__(self):\n pass\n\n def GamePause(self):\n if self.isStart:\n self.isPause = not self.isPause\n self.TimeSpent.config(text=('Pause'))\n root.title('Press p to continue')\n\n def GameResume(self):\n if self.isStart:\n self.isPause = not self.isPause\n self.DisplayTime()\n root.title('Tetris')\n\n def FocusPause(self, event):\n self.isPause = True\n root.title('Press p to continue')\n\n def Pause(self, event):\n if not self.isPause:\n self.GamePause()\n else:\n self.GameResume()\n\n def Up(self, event):\n BlockList = self.BlockList\n LabelList = self.LabelList\n Moveable = True\n xTotal = 0\n yTotal = 0\n count = 0\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if LabelList[i][j].isActive:\n xTotal = xTotal + i\n yTotal = yTotal + j\n count = count + 1\n\n SourceList = []\n DestList = []\n\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if LabelList[i][j].isActive:\n x0 = (xTotal + yTotal) // count\n y0 = (yTotal - xTotal) // count\n xr = (xTotal + yTotal) % count\n yr = (yTotal - xTotal) % count\n\n x = x0 - j\n y = y0 + i\n if xr >= count / 2:\n x = x + 1\n if yr >= count / 2:\n y = y + 1\n SourceList.append([i, j])\n DestList.append([x, y])\n\n if x < 0 or x >= HEIGHT or y < 0 or y >= WIDTH:\n Moveable = False\n if x >= 0 and x < HEIGHT and y >= 0 and y < WIDTH and BlockList[x][y] == 1 and not LabelList[x][y].isActive:\n Moveable = False\n\n if Moveable:\n for i in range(len(SourceList)):\n self.Empty(SourceList[i][0], SourceList[i][1])\n for i in range(len(DestList)):\n self.Fill(DestList[i][0], DestList[i][1])\n\n def Left(self, event):\n if self.isPause:\n return\n\n BlockList = self.BlockList\n LabelList = self.LabelList\n Moveable = True\n\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if LabelList[i][j].isActive and j-1 < 0:\n Moveable = False\n if LabelList[i][j].isActive and j-1 >= 0 and BlockList[i][j-1] == 1 and not LabelList[i][j-1].isActive:\n Moveable = False\n\n if Moveable:\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if j-1 >= 0 and LabelList[i][j].isActive and BlockList[i][j-1] == 0:\n self.Fill(i, j-1)\n self.Empty(i, j)\n\n def Right(self, event):\n if self.isPause:\n return\n\n BlockList = self.BlockList\n LabelList = self.LabelList\n Moveable = True\n\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if LabelList[i][j].isActive and j + 1 == WIDTH:\n Moveable = False\n if LabelList[i][j].isActive and j+1 < WIDTH and BlockList[i][j+1] == 1 and not LabelList[i][j+1].isActive:\n Moveable = False\n\n if Moveable:\n for i in range(HEIGHT-1, -1, -1):\n for j in range(WIDTH-1, -1, -1):\n if j + 1 < WIDTH and LabelList[i][j].isActive and BlockList[i][j+1] == 0:\n self.Fill(i, j+1)\n self.Empty(i, j)\n\n def Space(self, event):\n if not self.isStart or self.isPause:\n return\n while self.Down(0):\n pass\n \n def DisplayTime(self):\n if self.isStart and not self.isPause:\n self.TimeSpent.config(text=str(self.TotalTime))\n self.TotalTime = self.TotalTime + 1\n self.after(1000, self.DisplayTime)\n\n def OnTimer(self):\n if not self.isPause:\n self.Down(0)\n\n if self.TotalScore > 400:\n self.time = 200\n elif self.TotalScore > 300:\n self.time = 300\n elif self.TotalScore > 200:\n self.time = 375\n elif self.TotalScore > 100:\n self.time = 450\n else:\n self.time = 500\n\n self.after(self.time, self.OnTimer)\n\n def Down(self, event):\n BlockList = self.BlockList\n LabelList = self.LabelList\n Moveable = True\n\n for i in range(HEIGHT):\n for j in range(WIDTH):\n if LabelList[i][j].isActive and i+1 == HEIGHT:\n Moveable = False\n if LabelList[i][j].isActive and i+1 < HEIGHT and BlockList[i+1][j] == 1 and not LabelList[i+1][j].isActive:\n Moveable = False\n\n if Moveable:\n for i in range(HEIGHT-1, -1, -1):\n for j in range(WIDTH-1, -1, -1):\n if i+1 < HEIGHT and LabelList[i][j].isActive and BlockList[i+1][j] == 0:\n self.Fill(i+1, j)\n self.Empty(i, j)\n if not Moveable:\n for i in range(HEIGHT):\n for j in range(WIDTH):\n LabelList[i][j].isActive = False\n self.JudgeLineFill()\n self.Start()\n if self.isGameOver:\n self.isStart = False\n self.ShowScore()\n self.Destroy()\n return False\n for i in range(4):\n for j in range(4):\n self.NextEmpty(i, j)\n self.Rnd()\n return Moveable\n\n #jugde if there are line(s) full\n def JudgeLineFill(self):\n BlockList = self.BlockList\n LabelList = self.LabelList\n count = 0\n LineList = []\n for i in range(WIDTH):\n LineList.append(1)\n for i in range(HEIGHT):\n if BlockList[i] == LineList:\n count = count + 1\n for k in range(WIDTH):\n LabelList[i][k].config(bg=str(self.flashg))\n LabelList[i][k].update()\n if count != 0:\n self.after(100)\n\n for i in range(HEIGHT):\n if BlockList[i] == LineList:\n for j in range(i, 0, -1):\n for k in range(WIDTH):\n BlockList[j][k] = BlockList[j-1][k]\n LabelList[j][k]['relief'] = LabelList[j-1][k].cget('relief')\n LabelList[j][k]['bg'] = LabelList[j-1][k].cget('bg')\n for l in range(WIDTH):\n BlockList[0][l] = 0\n LabelList[0][l].config(relief=tkinter.FLAT, bg=str(self.backg))\n self.TotalLine = self.TotalLine + count\n if count == 1:\n modify = 1\n elif count == 2:\n modify = 3\n elif count == 3:\n modify = 6\n elif count == 4:\n modify = 10\n else:\n modify = 0\n\n self.TotalScore = self.TotalScore + modify * WIDTH\n self.Line.config(text=str(self.TotalLine))\n self.Score.config(text=str(self.TotalScore))\n\n def Fill(self, i, j):\n if j < 0:\n return \n if self.BlockList[i][j] == 1:\n self.isGameOver = True\n self.BlockList[i][j] = 1\n self.LabelList[i][j].isActive = True\n self.LabelList[i][j].config(relief=tkinter.RAISED, bg=str(self.frontg))\n\n def Empty(self, i, j):\n self.BlockList[i][j] = 0\n self.LabelList[i][j].isActive = False\n self.LabelList[i][j].config(relief=tkinter.FLAT, bg=str(self.backg))\n\n def Play(self, event):\n pass\n\n def NextFill(self, i, j):\n self.NextList[i][j].config(relief=tkinter.RAISED, bg=str(self.frontg))\n\n def NextEmpty(self, i, j):\n self.NextList[i][j].config(relief=tkinter.FLAT, bg=str(self.nextg))\n\n def ShowScore(self):\n tkinter.messagebox.showinfo('GAME OVER.','Game Over\\nYour score: ' + str(self.TotalScore) + '.\\n Try harder next time.')\n\n def Destroy(self):\n for i in range(HEIGHT):\n for j in range(WIDTH):\n self.Empty(i,j)\n self.TotalLine = 0\n self.TotalScore = 0\n self.TotalTime = 0\n self.Line.config(text=str(self.TotalLine))\n self.Score.config(text=str(self.TotalScore))\n self.TimeSpent.config(text=str(self.TotalTime))\n self.isGameOver = False\n self.isStart = False\n self.time = 500\n root.title('Tetris')\n for i in range(4):\n for j in range(4):\n self.NextEmpty(i,j)\n\n def Start(self):\n self.Draw(self.blocks[self.x])\n self.isStart=True\n\n def Draw(self, blocks):\n for block in blocks:\n self.Fill(block[0], WIDTH // 2 - 2 + block[1])\n\n def DrawNext(self, blocks):\n for block in blocks:\n self.NextFill(block[0], block[1])\n\n def Rnd(self):\n self.x=random.randint(0, len(self.blocks)-1)\n self.DrawNext(self.blocks[self.x])\n\n def RndFirst(self):\n self.x=random.randint(0, len(self.blocks)-1)\n \n\ndef Start():\n if app.isStart:\n return\n app.RndFirst()\n app.Start()\n app.Rnd()\n app.DisplayTime()\n\ndef End():\n if not app.isPause:\n app.GamePause()\n root.title('Tetris')\n app.ShowScore()\n app.Destroy()\n\ndef Set():\n pass\ndef Show():\n app.Show()\n\n\nmainmenu=tkinter.Menu(root)\nroot['menu']=mainmenu\nmainmenu.add_command(label='Start', command=Start)\nmainmenu.add_command(label='End_Game', command=End)\nmainmenu.add_command(label='Exit', command=root.quit)\n\napp=App(root)\nroot.mainloop()","sub_path":"mytetris.py","file_name":"mytetris.py","file_ext":"py","file_size_in_byte":14009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"86287132","text":"import sys\nimport numpy as np\nfile = sys.argv[-1]\n\nwith open(file) as f:\n\tcnt = f.readlines()\n\ncount = []\ndistortion = []\ncalibration = []\nlinf = []\nfor line in cnt:\n\tif line.startswith('Adversarial Example Found Successfully:'):\n\t\tcount.append(int(line.split(' ')[-2]))\n\t\tdistortion.append(eval(line.split(' ')[-6]))\n\telif line.startswith('1 Predicted label'):\n\t\tcalibration.append(eval(line.split(' ')[-3]))\n\t\tlinf.append(eval(line.split(' ')[-2]))\nprint('len:', len(count))\nprint('count:', np.mean(count), np.median(count), np.min(count), np.max(count))\nprint('distortion:', np.mean(distortion), np.median(distortion), np.min(distortion), np.max(distortion))\nif len(linf) != 0:\n print('calibration:', np.mean(calibration), np.median(calibration), np.min(calibration), np.max(calibration))\n print('linf:', np.mean(linf), np.median(linf), np.min(linf), np.max(linf))\n","sub_path":"extract_info_opt_blackbox.py","file_name":"extract_info_opt_blackbox.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"275600039","text":"class Solution(object):\n def lengthOfLongestSubstringTwoDistinct(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n maxlen = 0\n left = 0\n worddic = {}\n for (i,word) in enumerate(s):\n if word not in worddic and len(worddic)==2:\n left = min(worddic.values())+1\n for j in worddic:\n if worddic[j] == left-1:\n del worddic[j]\n break\n worddic[word] = i\n maxlen = max(maxlen, i - left + 1)\n return maxlen\n","sub_path":"Longest_Substring_with_At_Most_Two_Distinct_Characters.py","file_name":"Longest_Substring_with_At_Most_Two_Distinct_Characters.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"403092684","text":"#!/user/bin/python3\n# -*- coding = utf-8 -*-\n# @Time : 2021/2/7\n# @Author : 郑煜辉\n# @File : Peashooter\n\nimport pygame\nimport random\n\n\nclass Zombie(pygame.sprite.Sprite):\n def __init__(self):\n super(Zombie, self).__init__()\n self.images = [pygame.image.load('images/Zombie/Zombie_{:d}.png'.format(i)).convert_alpha() for i in\n range(22)] # 实现动态效果\n self.rect = self.images[0].get_rect()\n self.rect.left = 1000\n self.rect.top = 25 + random.randrange(0,4)*125\n self.speed = 1\n self.blood = 25\n self.is_live = True\n self.stop = False\n # self.final = False\n self.kill = 1\n self.old = 0\n\n def move(self):\n if not self.stop:\n self.rect.left -= self.speed\n\n def changimage(self):\n if self.old ==0:\n self.images = [pygame.image.load('images/Zombie/Zombie_{:d}.png'.format(i)).convert_alpha() for i in\n range(22)] # 实现动态效果\n elif self.old == 1:\n self.images = [pygame.image.load('images/ZombieAttack/ZombieAttack_{:d}.png'.format(i)).convert_alpha() for i in\n range(20)] # 实现动态效果\n\n def attack(self,enemyList):\n for enemy in enemyList:\n enemy.rect.left,enemy.rect.top = enemy.zone\n if pygame.sprite.collide_circle_ratio(0.5)(enemy,self):\n #print('hit')\n self.stop = True\n self.old =1\n enemy.blood-=self.kill\n if enemy.blood == 0:\n enemy.is_live = False\n self.stop = False\n self.old=0\n\n","sub_path":"zombie.py","file_name":"zombie.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"577421584","text":"def DNI(nombre):\n nombre=raw_input('Nombre DNI: ')\n intnombre=int(nombre)\n lletres={0:\"T\",1:\"R\",2:\"W\",3:\"A\",4:\"G\",5:\"M\",6:\"Y\",7:\"F\",8:\"P\",9:\"D\",10:\"X\",\n 11:\"B\",12:\"N\",13:\"J\",14:\"Z\",15:\"S\",16:\"Q\",17:\"V\",18:\"H\",19:\"L\",\n 20:\"C\",21:\"K\",22:\"E\"}\n residu=intnombre%23\n lletra=lletres[residu]\n \n print(\"La seva lletra del DNI ES: \",lletra,\"el seu nombre complet es: \",nombre,lletra)\n\nDNI(47858503)\n","sub_path":"1R S/INFORMÀTICA_1/python EMACS/lab_07/EOL3-1.py","file_name":"EOL3-1.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"292820070","text":"#!/usr/bin/env python\n\n\"\"\"\n Relax all servos by disabling the torque for each.\n\"\"\"\nimport roslib\nroslib.load_manifest('maggie_head_movement')\nimport rospy, time\nfrom dynamixel_controllers.srv import TorqueEnable, SetSpeed\n\nclass Relax():\n def __init__(self):\n rospy.init_node('relax_all_servos')\n \n dynamixel_namespace = rospy.get_namespace()\n if dynamixel_namespace == '/':\n dynamixel_namespace = rospy.get_param('~dynamixel_namespace', '/dynamixel_controller')\n dynamixels = rospy.get_param(dynamixel_namespace + '/dynamixels', dict())\n \n servo_torque_enable = list()\n servo_set_speed = list()\n \n for name in sorted(dynamixels):\n torque_enable_service = dynamixel_namespace + '/' + name + '_controller/torque_enable'\n rospy.wait_for_service(torque_enable_service) \n servo_torque_enable.append(rospy.ServiceProxy(torque_enable_service, TorqueEnable))\n \n set_speed_service = dynamixel_namespace + '/' + name + '_controller/set_speed'\n rospy.wait_for_service(set_speed_service)\n servo_set_speed.append(rospy.ServiceProxy(set_speed_service, SetSpeed))\n\n # Give the servos an intial speed that is relatively slow for safety\n for set_speed in servo_set_speed:\n set_speed(0.3)\n \n # Relax all servos to give them a rest.\n for torque_enable in servo_torque_enable:\n torque_enable(False)\n \nif __name__=='__main__':\n Relax()\n","sub_path":"maggie/component_ws/src/point_head/src/relax_all_servos.py","file_name":"relax_all_servos.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"580566834","text":"from openpyxl import load_workbook\nfrom dbconnect.dbconnect import *\n\n\nclass AddRatesToDB:\n\n def __init__(self, excel_file, sheet_name):\n\n try:\n self.wb = load_workbook(excel_file)\n sheet = self.wb[sheet_name]\n for row in sheet.rows:\n insert_rates(row[0].value, row[1].value, row[2].value, row[3].value, row[4].value, row[5].value)\n\n except Exception as e:\n print(e)\n\n\nif __name__ == '__main__':\n AddRatesToDB('rates.xlsx', 'Sheet')\n","sub_path":"AddRatesToDB.py","file_name":"AddRatesToDB.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"31391119","text":"#!/usr/bin/python\n\"\"\"\nTest for 'exclusive access' of the terminal by\nplaying with vim and nano.\n\nTests simply opening and typing,\nthen some background and job control with exclusive access\nto make sure you maintain proper terminal state.\n\"\"\"\n\nfrom testutil import *\nfrom tempfile import mkstemp\nsetup_tests()\n\n\ndef expect_stopped(command):\n run_builtin('jobs')\n\n job = parse_job_line()\n\n assert command in job.command, 'Job was not: {0}'.format(command)\n assert 'stopped' in job.status.lower(), 'Vim not stopped'\n expect_prompt()\n\nexpect_prompt()\n_, tmpfile = mkstemp()\n# Start nano\nsendline('nano {0}'.format(tmpfile))\nwait_for_fg_child()\n\n# Quit out to make sure nano can read keys\nsendline('test that it works')\ntime.sleep(0.5)\nsendcontrol('x')\ntime.sleep(0.5)\nsendline('y\\r')\nexpect_prompt()\n\nwith open(tmpfile) as fd:\n assert 'test that it works' in fd.read()\nos.unlink(tmpfile)\n\n\n# Assert that the shell has regained control\n\n\n# Try to start vim in the background\n# Should stop and wait for exclusive control\nsendline('vim -u NONE &')\nparse_bg_status()\nexpect_prompt()\ntime.sleep(0.5)\nexpect_stopped('vim')\n\n# Send vim that was stopped into the foreground\nrun_builtin('fg', '1')\nwait_for_fg_child()\n\n\n# Stop the currently running vim\nsendcontrol('z')\nexpect_prompt()\n\nexpect_stopped('vim')\n\n# Send it back into the foreground and try and quit it.\nrun_builtin('fg', '1')\n\nwait_for_fg_child()\n\nsendline(':q')\n\n# See if shell correctly regains control\nexpect_prompt()\n\ntest_success()\n\n","sub_path":"Systems/esh-spring-2015.git/eshtests/advanced/exclusive_access_test.py","file_name":"exclusive_access_test.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"59397551","text":"# -------------------------------------------------------------------------\n# The CodeChecker Infrastructure\n# This file is distributed under the University of Illinois Open Source\n# License. See LICENSE.TXT for details.\n# -------------------------------------------------------------------------\n\n\"\"\"\nSupported analyzer types.\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nimport io\nimport os\nimport platform\nimport re\nimport sys\n\nfrom libcodechecker.analyze import analyzer_env\nfrom libcodechecker.analyze import host_check\nfrom libcodechecker.analyze.analyzers import analyzer_clang_tidy\nfrom libcodechecker.analyze.analyzers import analyzer_clangsa\nfrom libcodechecker.analyze.analyzers import config_handler_clang_tidy\nfrom libcodechecker.analyze.analyzers import config_handler_clangsa\nfrom libcodechecker.logger import get_logger\n\nLOG = get_logger('analyzer')\n\nCLANG_SA = 'clangsa'\nCLANG_TIDY = 'clang-tidy'\n\nsupported_analyzers = {CLANG_SA: analyzer_clangsa.ClangSA,\n CLANG_TIDY: analyzer_clang_tidy.ClangTidy}\n\n\ndef check_supported_analyzers(analyzers, context):\n \"\"\"\n Checks the given analyzers in the current context for their executability\n and support in CodeChecker.\n\n This method also updates the given context.analyzer_binaries if the\n context's configuration is bogus but had been resolved.\n\n :return: (enabled, failed) where enabled is a list of analyzer names\n and failed is a list of (analyzer, reason) tuple.\n \"\"\"\n\n check_env = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n\n analyzer_binaries = context.analyzer_binaries\n\n enabled_analyzers = set()\n failed_analyzers = set()\n\n for analyzer_name in analyzers:\n if analyzer_name not in supported_analyzers:\n failed_analyzers.add((analyzer_name,\n \"Analyzer unsupported by CodeChecker.\"))\n continue\n\n # Get the compiler binary to check if it can run.\n available_analyzer = True\n analyzer_bin = analyzer_binaries.get(analyzer_name)\n if not analyzer_bin:\n failed_analyzers.add((analyzer_name,\n \"Failed to detect analyzer binary.\"))\n available_analyzer = False\n elif not os.path.isabs(analyzer_bin):\n # If the analyzer is not in an absolute path, try to find it...\n found_bin = supported_analyzers[analyzer_name].\\\n resolve_missing_binary(analyzer_bin, check_env)\n\n # found_bin is an absolute path, an executable in one of the\n # PATH folders.\n # If found_bin is the same as the original binary, ie., normally\n # calling the binary without any search would have resulted in\n # the same binary being called, it's NOT a \"not found\".\n if found_bin and os.path.basename(found_bin) != analyzer_bin:\n LOG.debug(\"Configured binary '{0}' for analyzer '{1}' was \"\n \"not found, but environment PATH contains '{2}'.\"\n .format(analyzer_bin, analyzer_name, found_bin))\n context.analyzer_binaries[analyzer_name] = found_bin\n\n analyzer_bin = found_bin\n\n if not analyzer_bin or \\\n not host_check.check_clang(analyzer_bin, check_env):\n # Analyzers unavailable under absolute paths are deliberately a\n # configuration problem.\n failed_analyzers.add((analyzer_name,\n \"Cannot execute analyzer binary.\"))\n available_analyzer = False\n\n if available_analyzer:\n enabled_analyzers.add(analyzer_name)\n\n return enabled_analyzers, failed_analyzers\n\n\ndef construct_analyzer(buildaction,\n analyzer_config_map):\n \"\"\"\n Construct an analyzer.\n \"\"\"\n try:\n analyzer_type = buildaction.analyzer_type\n # Get the proper config handler for this analyzer type.\n config_handler = analyzer_config_map.get(analyzer_type)\n\n LOG.debug_analyzer('Constructing ' + analyzer_type + ' analyzer')\n if analyzer_type in supported_analyzers:\n analyzer = supported_analyzers[analyzer_type](config_handler,\n buildaction)\n else:\n analyzer = None\n LOG.error('Unsupported analyzer type: ' + analyzer_type)\n return analyzer\n\n except Exception as ex:\n LOG.debug_analyzer(ex)\n return None\n\n\ndef gen_name_variations(checkers):\n \"\"\"\n Generate all applicable name variations from the given checker list.\n \"\"\"\n checker_names = [name for name, _ in checkers]\n reserved_names = []\n\n for name in checker_names:\n delim = '.' if '.' in name else '-'\n parts = name.split(delim)\n # Creates a list of variations from a checker name, e.g.\n # ['security', 'security.insecureAPI', 'security.insecureAPI.gets']\n # from 'security.insecureAPI.gets' or\n # ['misc', 'misc-dangling', 'misc-dangling-handle']\n # from 'misc-dangling-handle'.\n variations = [delim.join(parts[:(i + 1)]) for i in range(len(parts))]\n reserved_names += variations\n\n return reserved_names\n\n\ndef initialize_checkers(config_handler,\n available_profiles,\n package_root,\n checkers,\n checker_config=None,\n cmdline_checkers=None,\n enable_all=False):\n \"\"\"\n Initializes the checker list for the specified config handler based\n on given checker profiles, commandline arguments and the analyzer-retrieved\n checker list.\n \"\"\"\n\n # By default disable all checkers.\n for checker_name, description in checkers:\n config_handler.add_checker(checker_name, False, description)\n\n # Set default enabled or disabled checkers, retrieved from the config file.\n if checker_config:\n # Check whether a default profile exists.\n profile_lists = checker_config.values()\n all_profiles = (\n profile for check_list in profile_lists for profile in check_list)\n if 'default' not in all_profiles:\n LOG.warning(\"No default profile found!\")\n else:\n # Turn default checkers on.\n for checker_name, profile_list in checker_config.items():\n if 'default' in profile_list:\n config_handler.set_checker_enabled(checker_name)\n\n # If enable_all is given, almost all checkers should be enabled.\n if enable_all:\n for checker_name, enabled in checkers:\n if not checker_name.startswith(\"alpha.\") and \\\n not checker_name.startswith(\"debug.\") and \\\n not checker_name.startswith(\"osx.\"):\n # There are a few exceptions, though, which still need to\n # be manually enabled by the user: alpha and debug.\n config_handler.set_checker_enabled(checker_name)\n\n if checker_name.startswith(\"osx.\") and \\\n platform.system() == 'Darwin':\n # OSX checkers are only enable-all'd if we are on OSX.\n config_handler.set_checker_enabled(checker_name)\n\n # Set user defined enabled or disabled checkers from the command line.\n if cmdline_checkers:\n\n # Construct a list of reserved checker names.\n # (It is used to check if a profile name is valid.)\n reserved_names = gen_name_variations(checkers)\n\n for identifier, enabled in cmdline_checkers:\n\n # The identifier is a profile name.\n if identifier in available_profiles:\n profile_name = identifier\n\n if profile_name == \"list\":\n LOG.error(\"'list' is a reserved profile keyword. \")\n LOG.error(\"Please choose another profile name in \"\n \"'{0}'/config/config.json and rebuild.\"\n .format(package_root))\n sys.exit(1)\n\n if profile_name in reserved_names:\n LOG.error(\"Profile name '\" + profile_name + \"' conflicts \"\n \"with a checker(-group) name.\")\n LOG.error(\"Please choose another profile name in \"\n \"'{0}'/config/config.json and rebuild.\"\n .format(package_root))\n sys.exit(1)\n\n profile_checkers = (name for name, profile_list\n in checker_config.items()\n if profile_name in profile_list)\n for checker_name in profile_checkers:\n config_handler.set_checker_enabled(checker_name, enabled)\n\n # The identifier is a checker(-group) name.\n else:\n checker_name = identifier\n config_handler.set_checker_enabled(checker_name, enabled)\n\n\ndef __replace_env_var(cfg_file):\n def replacer(matchobj):\n env_var = matchobj.group(1)\n if matchobj.group(1) not in os.environ:\n LOG.error(env_var + ' environment variable not set in ' + cfg_file)\n return ''\n return os.environ[env_var]\n\n return replacer\n\n\ndef __get_compiler_resource_dir(context, analyzer_binary):\n if context.compiler_resource_dir:\n resource_dir = context.compiler_resource_dir\n # If not set then ask the binary for the resource dir.\n else:\n # Can be None if Clang is too old.\n resource_dir = host_check.get_resource_dir(analyzer_binary)\n if resource_dir is None:\n resource_dir = \"\"\n return resource_dir\n\n\ndef __build_clangsa_config_handler(args, context):\n \"\"\"\n Build the config handler for clang static analyzer.\n Handle config options from the command line and config files.\n \"\"\"\n\n config_handler = config_handler_clangsa.ClangSAConfigHandler()\n config_handler.analyzer_plugins_dir = context.checker_plugin\n config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_SA)\n config_handler.compiler_resource_dir =\\\n __get_compiler_resource_dir(context, config_handler.analyzer_binary)\n\n check_env = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n\n if 'ctu_phases' in args:\n config_handler.ctu_dir = os.path.join(args.output_path,\n args.ctu_dir)\n\n config_handler.ctu_has_analyzer_display_ctu_progress = \\\n host_check.has_analyzer_feature(\n context.analyzer_binaries.get(CLANG_SA),\n '-analyzer-display-ctu-progress',\n check_env)\n config_handler.log_file = args.logfile\n config_handler.path_env_extra = context.path_env_extra\n config_handler.ld_lib_path_extra = context.ld_lib_path_extra\n\n try:\n with open(args.clangsa_args_cfg_file, 'rb') as sa_cfg:\n config_handler.analyzer_extra_arguments = \\\n re.sub(r'\\$\\((.*?)\\)',\n __replace_env_var(args.clangsa_args_cfg_file),\n sa_cfg.read().strip())\n except IOError as ioerr:\n LOG.debug_analyzer(ioerr)\n except AttributeError as aerr:\n # No clangsa arguments file was given in the command line.\n LOG.debug_analyzer(aerr)\n\n analyzer = supported_analyzers[CLANG_SA](config_handler, None)\n\n checkers = analyzer.get_analyzer_checkers(config_handler, check_env)\n\n # Read clang-sa checkers from the config file.\n clang_sa_checkers = context.checker_config.get(CLANG_SA + '_checkers')\n\n try:\n cmdline_checkers = args.ordered_checkers\n except AttributeError:\n LOG.debug_analyzer('No checkers were defined in '\n 'the command line for ' + CLANG_SA)\n cmdline_checkers = None\n\n initialize_checkers(config_handler,\n context.available_profiles,\n context.package_root,\n checkers,\n clang_sa_checkers,\n cmdline_checkers,\n 'enable_all' in args and args.enable_all)\n\n return config_handler\n\n\ndef __build_clang_tidy_config_handler(args, context):\n \"\"\"\n Build the config handler for clang tidy analyzer.\n Handle config options from the command line and config files.\n \"\"\"\n\n config_handler = config_handler_clang_tidy.ClangTidyConfigHandler()\n config_handler.analyzer_binary = context.analyzer_binaries.get(CLANG_TIDY)\n\n # FIXME We cannot get the resource dir from the clang-tidy binary,\n # therefore now we get a clang binary which is a sibling of the clang-tidy.\n # TODO Support \"clang-tidy -print-resource-dir\" .\n check_env = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n # Overwrite PATH to contain only the parent of the clang binary.\n if os.path.isabs(config_handler.analyzer_binary):\n check_env['PATH'] = os.path.dirname(config_handler.analyzer_binary)\n clang_bin = analyzer_clangsa.ClangSA.resolve_missing_binary('clang',\n check_env)\n config_handler.compiler_resource_dir =\\\n __get_compiler_resource_dir(context, clang_bin)\n\n try:\n with open(args.tidy_args_cfg_file, 'rb') as tidy_cfg:\n config_handler.analyzer_extra_arguments = \\\n re.sub(r'\\$\\((.*?)\\)', __replace_env_var,\n tidy_cfg.read().strip())\n except IOError as ioerr:\n LOG.debug_analyzer(ioerr)\n except AttributeError as aerr:\n # No clang tidy arguments file was given in the command line.\n LOG.debug_analyzer(aerr)\n\n try:\n # The config file dumped by clang-tidy contains \"...\" at the end. This\n # has to be emitted, otherwise -config flag of clang-tidy can't consume\n # it.\n with io.open(args.tidy_config, 'rb') as tidy_config:\n lines = tidy_config.readlines()\n lines = filter(lambda x: x != '...\\n', lines)\n config_handler.checker_config = ''.join(lines)\n except IOError as ioerr:\n LOG.debug_analyzer(ioerr)\n except AttributeError as aerr:\n # No clang tidy config file was given in the command line.\n LOG.debug_analyzer(aerr)\n\n analyzer = supported_analyzers[CLANG_TIDY](config_handler, None)\n check_env = analyzer_env.get_check_env(context.path_env_extra,\n context.ld_lib_path_extra)\n\n checkers = analyzer.get_analyzer_checkers(config_handler, check_env)\n\n # Read clang-tidy checkers from the config file.\n clang_tidy_checkers = context.checker_config.get(CLANG_TIDY + '_checkers')\n\n try:\n cmdline_checkers = args.ordered_checkers\n except AttributeError:\n LOG.debug_analyzer('No checkers were defined in '\n 'the command line for ' +\n CLANG_TIDY)\n cmdline_checkers = None\n\n initialize_checkers(config_handler,\n context.available_profiles,\n context.package_root,\n checkers,\n clang_tidy_checkers,\n cmdline_checkers,\n 'enable_all' in args and args.enable_all)\n\n return config_handler\n\n\ndef build_config_handlers(args, context, enabled_analyzers):\n \"\"\"\n\n Handle config from command line or from config file if no command line\n config is given.\n\n Supported command line config format is in JSON tidy supports YAML also but\n no standard lib for yaml parsing is available in python.\n \"\"\"\n\n analyzer_config_map = {}\n\n for ea in enabled_analyzers:\n if ea == CLANG_SA:\n config_handler = __build_clangsa_config_handler(args, context)\n elif ea == CLANG_TIDY:\n config_handler = __build_clang_tidy_config_handler(args, context)\n else:\n config_handler = None\n LOG.debug(\"Unhandled analyzer: \" + str(ea))\n analyzer_config_map[ea] = config_handler\n\n return analyzer_config_map\n","sub_path":"libcodechecker/analyze/analyzers/analyzer_types.py","file_name":"analyzer_types.py","file_ext":"py","file_size_in_byte":16483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"17"} +{"seq_id":"75840485","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n##########################################\n######## initialization ########\n##########################################\nmapmap = np.load('map.npy')\nprint(mapmap.shape)\nstartPosition = (2, 0) #Initial point\ngoalPosition = (13, 11) #End point\ndirection = [(1,0),(0,1),(0,-1),(-1,0),(1,1),(1,-1),(-1,1),(-1,-1)]\nmapRow, mapCol = mapmap.shape;\nif mapmap[startPosition[0], startPosition[1]]:\n exit('Parameters Error! in startPosition')\nelif mapmap[startPosition[0], startPosition[1]]:\n exit('Parameters Error! in goalPoaition')\n \n##########################################\n######## heuristic function ########\n##########################################\ndef cost(startPoistion, goalPosition):\n dx, dy = abs(np.array(startPoistion)-np.array(goalPosition))\n distance = 10*(dx+dy) - 6*min(dx,dy)\n #distance = dx+dy-0.6*min(dx,dy)\n return distance\n\ndef g(startPoistion, startCost, goalPosition):\n goalCost = startCost + cost(startPoistion, goalPosition)\n return goalCost\n\ndef heuristic(startPosition, goalPosition):\n dx, dy = abs(np.array(startPosition)-np.array(goalPosition))\n distance = 10*(dx+dy) - 6*min(dx,dy)\n #distance = dx+dy-0.6*min(dx,dy)\n return distance\n\n##########################################\n######## useful function ########\n##########################################\ndef isPositionValid(position):\n return position[0]>=0 and position[1]>=0 and position[0]